Merge pull request #9880 from projectdiscovery/k8s-templates

k8s templates 🚀
patch-4
Prince Chaddha 2024-06-25 13:51:12 +04:00 committed by GitHub
commit f428588b08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
35 changed files with 1586 additions and 0 deletions

View File

@ -0,0 +1,49 @@
id: k8s-cpu-limits-not-set
info:
name: CPU limits not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing CPU limits in Kubernetes Deployments, which can lead to excessive CPU usage and affect other applications
impact: |
Missing CPU limits in Kubernetes Deployments can cause excessive CPU usage that can starve other applications, leading to performance degradation across the cluster.
remediation: |
Set CPU limits for all containers in Kubernetes Deployments to ensure fair CPU resource distribution and prevent performance issues.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.resources && container.resources.limits && container.resources.limits.cpu)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks CPU limits.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 490a0046304402207794d2cf587203ba357f4376862e445b2f8612f8fb9eee2683d45f445f4a450d02206387d8f375f6157ef9ac572a2595210e717fef4fcd935b5a7f243f806bea47f4:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,49 @@
id: k8s-cpu-requests-not-set
info:
name: CPU Requests not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing CPU requests in Kubernetes Deployments, which can lead to inadequate scheduling and resource allocation.
impact: |
Missing CPU requests in Kubernetes Deployments can cause poor scheduling decisions and suboptimal resource allocation, potentially leading to degraded application performance.
remediation: |
Set CPU requests for all containers in Kubernetes Deplayments to ensure efficient scheduling and resource allocation.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.resources && container.resources.requests && container.resources.requests.cpu)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks CPU requests.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a0047304502205bb61869d917787455c6863fe6115682cf34d1267258db863a2e6380ccc9049d022100b783912d411495b9b93ed83dbd6318049f59fc40574db2c881e9b82e95566e4d:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-default-namespace-used
info:
name: Default Namespace Usage in Deployments
author: princechaddha
severity: high
description: Checks if Kubernetes Deployments are using the default namespace, which can lead to security risks and mismanagement issues.
impact: |
Using the default namespace for Kubernetes Deployments can increase security risks as it might allow broader access than necessary. It also complicates resource management across multiple teams and applications.
remediation: |
Avoid using the default namespace for Kubernetes Deployments. Create and specify dedicated namespaces tailored to specific applications or teams to enhance security and manage resources effectively.
reference:
- https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
tags: cloud,devops,kubernetes,k8s,devsecops,namespaces,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (deployment.metadata.namespace.toLowerCase() === "default") {
let result = (`Deployment '${deployment.metadata.name}' is using the default namespace, which is not recommended.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a00473045022100f48af70d2fd53e58592c7c951684543c97128981f343667899279760d37658fb022017c6dc9cbae1b0be18b1e11214bc5e1f51d909339c63b354a8d56a62f496ab79:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,50 @@
id: k8s-host-ports-check
info:
name: Host ports should not be used
author: princechaddha
severity: medium
description: Checks Kubernetes Deployments to ensure they are not configured to use host ports, which can expose the host to potential security risks.
impact: |
Using host ports can compromise the isolation between the host and the containers, increasing the risk of unauthorized access to host resources. This can lead to security breaches.
remediation: |
Avoid using host ports in Kubernetes Deployments. Use services or other networking mechanisms to expose container applications.
reference:
- https://kubernetes.io/docs/concepts/services-networking/service/
tags: cloud,devops,kubernetes,devsecops,deployments,k8s,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[] | {name: .metadata.name, namespace: .metadata.namespace, containers: .spec.template.spec.containers}'
javascript:
- code: |
let deploymentData = JSON.parse(template.deployment);
deploymentData.containers.forEach(container => {
if (container.ports && container.ports.some(port => port.hostPort)) {
let result = (`Deployment '${deploymentData.name}' in namespace '${deploymentData.namespace}' uses host ports.`);
Export(result);
}
});
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100f3a3753e5b3711de8e19eb7d96fe6cb9bbb71ea0d38975957c40c504f3469967022100c210bc6cb2adbcf0da1de9cd21c812ec890563c3cd8bb2edc76196cd311aecbd:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-image-pull-policy-always
info:
name: Image Pull Policy set to Always
author: princechaddha
severity: low
description: Ensures that Kubernetes deployments have the image pull policy set to 'Always', which guarantees the most up-to-date version of the image is used.
impact: |
Not setting the image pull policy to 'Always' may cause pods to use outdated versions of images, which can lead to security vulnerabilities if the images contain fixes or updates.
remediation: Update the image pull policy in Kubernetes Deployments to 'Always' to ensure that the latest container images are always used.
reference:
- https://kubernetes.io/docs/concepts/containers/images/#updating-images
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,images,docker,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.every(container => container.imagePullPolicy === 'Always')) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' does not have image pull policy set to Always.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a0047304502200409430ae2d1311531b8e20737a1b5d8b93a3a57d0ddffa0003d34a99a4a06dd022100964eda202924507b711a38850dcae155eb2d3966b48557787e2c0e5a60c58e64:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,52 @@
id: k8s-image-tag-not-fixed
info:
name: Image Tag should be fixed - not latest or blank
author: princechaddha
severity: low
description: Checks if Kubernetes Deployment container images are using tags other than 'latest' or blank, which can lead to unstable and unpredictable deployments.
impact: |
Using 'latest' or blank image tags can result in deploying non-reproducible container images, potentially leading to unexpected application behavior and difficulties in troubleshooting.
remediation: |
Use specific image tags for all containers in Kubernetes Deployments to ensure reproducibility and stability of application deployments.
reference:
- https://kubernetes.io/docs/concepts/containers/images/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
deployment.spec.template.spec.containers.forEach(container => {
const tag = container.image.split(':').pop();
if (tag === 'latest' || tag === '') {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' uses 'latest' or blank image tag for container '${container.name}'.`);
Export(result);
}
});
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100a73d7f300303791070156f087c9a729d56361a04a4c2f11adb16ce3c66addd30022100e1ecf1ead4985f23fdbe80dafb0954bc5c17be9d036573d2cd40494c1fcddd7e:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-liveness-probe-not-configured
info:
name: Liveness Probe Not Configured in Deployments
author: princechaddha
severity: medium
description: Checks for missing liveness probes in Kubernetes Deployments, which are essential for managing container health and automatic recovery
impact: |
Absence of liveness probes can lead to unresponsive containers remaining in service, potentially degrading application performance and availability.
remediation: Configure liveness probes for all containers in Kubernetes Deployments to ensure proper health checks and automatic restarts of failing containers
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.livenessProbe)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks a configured liveness probe.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 490a0046304402200d2e45e711a22fa4ab57ff3065de5eb87f78aa2904b41e829e3566b04de8109e02206750a88213ad16e78ffaf03980b9b7a6994acb71bdebaf83292d6608fa07a130:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-memory-limits-not-set
info:
name: Memory limits not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing memory limits in Kubernetes Deployments, which can lead to resource contention and instability
impact: |
Missing memory limits in Kubernetes Deployments can cause resource contention and potential application instability.
remediation: Set memory limits for all containers in Kubernetes Deployments to ensure resource management and application stability
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.resources && container.resources.limits && container.resources.limits.memory)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks memory limits.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a0047304502203a66813cc15b12b1260c6862926c6748694a84bc66c9ca24dff7052f998b5aaf022100ef69cce9aba0cb47d58dd0bc916ee54f02a32632a4749e98e7a22e3b92da0e8d:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-memory-requests-not-set
info:
name: Memory requests not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing memory requests in Kubernetes Deployments, which can lead to inefficient scheduling and potential node resource exhaustion.
impact: |
Missing memory requests in Kubernetes Deployments can lead to inefficient pod scheduling, causing potential resource exhaustion on nodes.
remediation: Set memory requests for all containers in Kubernetes Deployments to ensure efficient pod scheduling and node resource utilization.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.resources && container.resources.requests && container.resources.requests.memory)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks memory requests.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100fe240dbf4c3fb7cc25858eae6a77ff3f3af8cce2dd2386358374f3bb179d1c2502210088357bf87098f8125c3a6038dde87ba3ac750d253e525b786f11013219119a7b:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,51 @@
id: minimize-added-capabilities
info:
name: Minimize container added capabilities
author: princechaddha
severity: high
description: Checks for containers in Kubernetes Deployments with added capabilities beyond the default set, increasing security risks.
impact: |
Containers with additional capabilities are granted more privileges than necessary, potentially allowing them to bypass intended security restrictions. This increases the risk of exploitation and unauthorized access.
remediation: |
Ensure that no unnecessary capabilities are added to containers within Kubernetes Deployments. Use security contexts to define the minimum necessary privileges.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
for (const container of deployment.spec.template.spec.containers) {
if (container.securityContext && container.securityContext.capabilities && container.securityContext.capabilities.add && container.securityContext.capabilities.add.length > 0) {
let addedCaps = container.securityContext.capabilities.add.join(', ');
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' has added capabilities: ${addedCaps}.`);
Export(result);
}
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100a4752be32718d5e3bf67d19c2c12b73dd2ddd70a44dc46ce8684cc5a70231e26022100f5f8e9f911051eeb8a41db141b0b2511c798e6d9be50aeef740011123d239ce6:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,52 @@
id: k8s-privileged-containers
info:
name: Privileged Containers Found in Deployments
author: princechaddha
severity: critical
description: Checks for containers running in privileged mode within Kubernetes Deployments, and now also checks for user privileges and privilege escalation settings.
impact: |
Running containers in privileged mode, as the root user, or with privilege escalation enabled can grant them access to host resources and could lead to security breaches if the container is compromised.
remediation: |
Ensure that no container in Kubernetes Deployments runs in privileged mode, as the root user, or with privilege escalation enabled. Modify the security context for each container to set `privileged: false`, `runAsUser` appropriately, and `allowPrivilegeEscalation: false`.
reference:
- https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privileged
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
for (let container of deployment.spec.template.spec.containers) {
let sc = container.securityContext || {};
if (sc.privileged || sc.runAsUser < 1000 || sc.allowPrivilegeEscalation) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' is running container '${container.name}' with insecure settings: Privileged=${sc.privileged}, runAsUser=${sc.runAsUser}, allowPrivilegeEscalation=${sc.allowPrivilegeEscalation}.`);
Export(result);
break;
}
}
extractors:
- type: dsl
dsl:
- response
# digest: 490a00463044022038295a725d25b77f920cfd32a5a407220f8f9c5faa37544670696f4cea356434022027fd56a2feae1101fe03c0d2e6e2452a2a89947f5c14e9711b8a744d11368b1e:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-readiness-probe-not-set
info:
name: Readiness Probes not set in Deployments
author: princechaddha
severity: medium
description: Checks for missing readiness probes in Kubernetes Deployments, which can lead to traffic being sent to unready containers
impact: |
Not configuring readiness probes in Kubernetes Deployments can result in the routing of traffic to containers that are not ready to handle requests, leading to potential downtime or degraded performance.
remediation: |
Define readiness probes in all containers within your Kubernetes Deployments to ensure that traffic is only routed to containers that are fully prepared to handle it.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
tags: cloud,devops,kubernetes,k8s,devsecops,deployments,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment",deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (!deployment.spec.template.spec.containers.some(container => container.readinessProbe)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' lacks readiness probes.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a004730450220592b68587cbae5ddf8d0c03554b81c57008a3878d45b2e878666aae7661e886d022100a8bffd45d8dcf071df20071daf0542bf6bbc9424e86c2aa38c522d7b10c6be3b:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-root-container-admission
info:
name: Minimize the admission of root containers
author: princechaddha
severity: critical
description: Checks if any Kubernetes Deployments admit containers that run as root, which can pose a significant security risk.
impact: |
Allowing containers to run as root can lead to privilege escalation and unauthorized access to host resources, significantly compromising the security of the cluster.
remediation: |
Configure security contexts for all pods to run containers with a non-root user. Use Pod Security Policies or OPA/Gatekeeper to enforce these configurations.
reference:
- https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups
tags: cloud,devops,kubernetes,devsecops,deployments,k8s,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
if (deployment.spec.template.spec.containers.some(container => container.securityContext && container.securityContext.runAsUser === null)) {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' permits containers to run as root.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a0047304502203296d04807538e04f058f6bbf7241e04ed2bf22ece4282aca6a268d9463a86cf022100abe93bb221464d568cbeb7cf07d382ea36e6987e9dca958912f6231b6c4e2cdf:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,53 @@
id: k8s-seccomp-profile-set
info:
name: Set appropriate seccomp profile
author: princechaddha
severity: medium
description: Checks if the seccomp profile is set to docker/default or runtime/default in Kubernetes Deployments.
impact: |
Using a default seccomp profile helps in reducing the attack surface of the container by limiting the syscalls containers can make, which can prevent certain types of exploits.
remediation: |
Ensure that all containers in Kubernetes Deployments have a seccomp profile of docker/default or runtime/default set in their security contexts.
reference:
- https://kubernetes.io/docs/tutorials/clusters/seccomp/
tags: cloud,devops,kubernetes,devsecops,containers,k8s,k8s-cluster-security
flow: |
code(1);
for (let deployment of template.items) {
set("deployment", deployment)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get deployments --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
deployment = JSON.parse(template.deployment);
deployment.spec.template.spec.containers.forEach(container => {
if (container.securityContext && container.securityContext.seccompProfile &&
(container.securityContext.seccompProfile.type === 'RuntimeDefault' || container.securityContext.seccompProfile.type === 'DockerDefault')) {
// No action needed, configured properly
} else {
let result = (`Deployment '${deployment.metadata.name}' in namespace '${deployment.metadata.namespace}' does not have an appropriate seccomp profile set.`);
Export(result);
}
});
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a004730450221008b59741a5c3cbb00fea807cbfad091c4bbd5b4cfb68d0eaba6cbad0f5b41b031022021a3ff36185afd480db929ad18207f2b03a2a823ec1d1858de0facf7fd7b2bbf:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,31 @@
id: kubernetes-code-env
info:
name: Kubernetes Cluster Validation
author: princechaddha
severity: info
description: |
Checks if kubernetes CLI is set up and all necessary tools are installed on the environment.
reference:
- https://kubernetes.io/
tags: cloud,devops,kubernetes,k8s,k8s-cluster-security
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl version
extractors:
- type: regex
internal: true
name: server_version
group: 1
regex:
- 'Server Version:\s*(v[0-9]+\.[0-9]+\.[0-9]+)'
- type: dsl
dsl:
- '"kubectl is successfully connected to the Kubernetes API Server Version: " + server_version'
# digest: 4a0a004730450220619359f90501cb0301a0a76aea235c33bf0d89d0295b92f754fedbadae743cad02210081662171da38b9b6cbeee60e25edb542f288457118bde3833e2ea4e784333f18:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-netpol-egress-rules
info:
name: Network policies define egress rules
author: princechaddha
severity: medium
description: Checks for network policies in Kubernetes that do not define egress rules, which can leave the network exposed to external threats.
impact: |
Lack of egress rules in network policies can result in unrestricted outbound network traffic, which may allow data exfiltration or unauthorized access to external services.
remediation: Define egress rules in all network policies to control outbound traffic from your Kubernetes pods, thereby reducing security risks.
reference:
- https://kubernetes.io/docs/concepts/services-networking/network-policies/
tags: cloud,devops,kubernetes,devsecops,k8s,k8s-cluster-security
flow: |
code(1);
for (let policy of template.items) {
set("policy", policy)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get networkpolicies --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[] | {policy: .metadata.name, egress: .spec.egress}'
javascript:
- code: |
let policyData = JSON.parse(template.policy);
if (!policyData.egress || policyData.egress.length === 0) {
let result = (`Network policy '${policyData.policy}' does not define egress rules.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100adb84e8a912b21d2e2bfd1f9253aec9cc33b6feb9fe7ee538ee0057e61ef8bb9022100d26ff6d9a2ac5f662df09dbb322793b0f9402594c96c361189367f58586344bc:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-netpol-namespace
info:
name: Network Policies specify namespace
author: princechaddha
severity: medium
description: Checks for Kubernetes Network Policies that do not specify a namespace, which can lead to potential misconfigurations and security issues.
impact: |
Omitting the namespace in Network Policies can cause the policies to apply incorrectly, potentially exposing Kubernetes resources to unauthorized access. This poses a security risk by not isolating network traffic properly within the cluster.
remediation: |
Ensure that all Network Policies explicitly define a namespace to maintain proper network isolation and security boundaries.
reference:
- https://kubernetes.io/docs/concepts/services-networking/network-policies/
tags: cloud,devops,kubernetes,devsecops,k8s,k8s-cluster-security
flow: |
code(1);
for (let policy of template.items) {
set("policy", policy)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get netpol --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[] | {policy: .metadata.name, namespace: .metadata.namespace}'
javascript:
- code: |
let policyData = JSON.parse(template.policy);
if (!policyData.namespace) {
let result = (`Network Policy '${policyData.policy}' does not specify a namespace.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 490a00463044022005edb8b78c4db40572f8297946636ce446d578c62f1ec7bf7f1621ed021f27c9022078555811953b55f080c0dc21ec6138fbd712b5069ca571e2492c5e7cc3172759:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,48 @@
id: k8s-network-ingress-rules
info:
name: Define network ingress rules
author: princechaddha
severity: medium
description: Checks if Kubernetes network policies define specific ingress rules, which can help secure network communication within the cluster.
impact: |
Without specific ingress rules defined in network policies, unintended traffic may access pods within the Kubernetes cluster, increasing the risk of malicious activity.
remediation: |
Define specific ingress rules in all network policies to control the flow of inbound traffic to pods, ensuring only authorized traffic can access cluster resources.
reference:
- https://kubernetes.io/docs/concepts/services-networking/network-policies/
tags: cloud,devops,kubernetes,security,k8s,k8s-cluster-security
flow: |
code(1);
for (let policy of template.items) {
set("policy", policy)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get networkpolicies --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[] | {policy: .metadata.name, ingress: .spec.ingress}'
javascript:
- code: |
let policyData = JSON.parse(template.policy);
if (!policyData.ingress || policyData.ingress.length === 0) {
let result = `Network policy '${policyData.policy}' does not define any ingress rules.`;
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a004730450220506a30ff32ae7bcddc875449aabe33208c4437745e3f0ba016b3087a8d780fe2022100c1be39affffaa403e6022ded544a51e09ff5b41c4812601ee90848f89039ae3f:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,50 @@
id: k8s-allow-privilege-escalation-set
info:
name: Containers run with allowPrivilegeEscalation enabled
author: princechaddha
severity: critical
description: Checks for containers running with the allowPrivilegeEscalation flag enabled, which can increase security risks by allowing privileges to be escalated
impact: |
Enabling allowPrivilegeEscalation in container deployments can result in elevated privileges, potentially allowing attackers to gain further access to host resources. This poses significant security risks.
remediation: Ensure that the allowPrivilegeEscalation flag is set to false in all container configurations to minimize security risks
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
tags: cloud,devops,kubernetes,security,devsecops,containers,k8s,k8s-cluster-security
flow: |
code(1);
for (let container of template.items) {
set("container", container)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[] | {pod: .metadata.name, containers: .spec.containers}'
javascript:
- code: |
let podData = JSON.parse(template.container);
podData.containers.forEach(container => {
if (container.securityContext && container.securityContext.allowPrivilegeEscalation === true) {
let result = (`Container '${container.name}' in pod '${podData.pod}' running with allowPrivilegeEscalation enabled.`);
Export(result);
}
});
extractors:
- type: dsl
dsl:
- response
# digest: 490a0046304402202e23ef1e6b258a44e394494f51808bb7b81a856101efe5a929429a6fcde414d4022058eacd480cb3e4b61fe6a86674f1f218b53144a3e8fed02c20554c0a34ae00d1:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-containers-share-host-ipc
info:
name: Containers sharing host IPC namespace
author: princechaddha
severity: critical
description: Checks if any containers in Kubernetes Pods are configured to share the host's IPC namespace, which can lead to security risks.
impact: |
Sharing the host's IPC namespace allows containers to access data across all containers on the same host, posing potential security risks.
remediation: Ensure that no container in Kubernetes Pods is set to share the host IPC namespace. Configure 'spec.hostIPC' to 'false' for all pods to isolate IPC namespaces.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
tags: cloud,devops,kubernetes,k8s,devsecops,pods,k8s-cluster-security
flow: |
code(1);
for (let pod of template.items) {
set("pod",pod)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
pod = JSON.parse(template.pod);
if (pod.spec.hostIPC) {
let result = (`Pod '${pod.metadata.name}' in namespace '${pod.metadata.namespace}' is configured to share the host IPC namespace.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100cc2146147ae70fb4bfc9d107d68d692ac4a287dfdaebfb356b425af0761eda00022100a8262c6003997d011ca625ceb8c1f8cc5e245c64c7306870001756811b39889b:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-host-network-namespace-shared
info:
name: Host Network Namespace Sharing
author: princechaddha
severity: high
description: Checks if containers in Kubernetes Pods are configured to share the host's network namespace, which can lead to security risks.
impact: |
Sharing the host's network namespace allows containers to access the host network directly. This can lead to potential security breaches as containers might bypass network policies and gain unrestricted network access on the host.
remediation: |
Ensure that the 'hostNetwork' field is set to false in all Kubernetes Pods to prevent containers from sharing the host's network namespace.
reference:
- https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces
tags: cloud,devops,kubernetes,k8s,devsecops,namespace,k8s-cluster-security
flow: |
code(1);
for (let pod of template.items) {
set("pod", pod)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
pod = JSON.parse(template.pod);
if (pod.spec.hostNetwork) {
let result = (`Pod '${pod.metadata.name}' in namespace '${pod.metadata.namespace}' is configured to share the host's network namespace.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4b0a00483046022100e21998047fdf04b608359872be3fd0d5767bb0a3f6a7f8c66547c2ab9943fdfc02210089231ef6e0d74220e0fa8e2cfc7eec254e48299d53bfb4abe2891748a2e3187f:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-host-pid-namespace-sharing
info:
name: Host PID Namespace Sharing
author: princechaddha
severity: critical
description: Checks if containers in Kubernetes pods share the host's process ID namespace, which can pose a security risk.
impact: |
Sharing the host's PID namespace allows processes within the pod to view all of the processes on the host, potentially leading to privilege escalation and other security vulnerabilities.
remediation: |
Ensure that the 'hostPID' field is set to 'false' in Kubernetes Pod specifications to prevent containers from sharing the host's PID namespace.
reference:
- https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces
tags: cloud,devops,kubernetes,k8s,devsecops,pods,k8s-cluster-security
flow: |
code(1);
for (let pod of template.items) {
set("pod", pod)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[]'
javascript:
- code: |
pod = JSON.parse(template.pod);
if (pod.spec.hostPID) {
let result = (`Pod '${pod.metadata.name}' in namespace '${pod.metadata.namespace}' is sharing the host's PID namespace.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a0047304502202982af00e2f77f8a8d34d3a60faa749adcc4621fcaa816c2f19f4f6fa109ef8a022100d768ba6500983f601db45742b46b488b4efee0e2e15389034b0000c2667b67d2:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,47 @@
id: k8s-readonly-fs
info:
name: Enforce Read-Only Filesystem for Containers
author: princechaddha
severity: critical
description: Checks for containers that do not use a read-only filesystem, which can prevent malicious write operations at runtime
impact: |
Not using a read-only filesystem can expose containers to risks of malicious modifications at runtime, compromising the container's integrity and security.
remediation: Configure containers to use read-only filesystems where possible to enhance security and minimize risk of unauthorized data modification
reference:
- https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation
tags: cloud,devops,kubernetes,k8s,devsecops,pods,k8s-cluster-security
flow: |
code(1);
for (let container of template.items) {
set("container", container)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[].spec.containers[]'
javascript:
- code: |
container = JSON.parse(template.container);
if (!container.securityContext || container.securityContext.readOnlyRootFilesystem !== true) {
let result = (`Container '${container.name}' in pod '${container.metadata.name}' in namespace '${container.metadata.namespace}' does not use a read-only filesystem.`);
Export(result);
}
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a00473045022100822332f29c05236643aceb706c563112c463a6fdda5a60f391aaec1308fa9e3902207c8e95c0302c1f6f85c9a8e43e719b3e52c67684e4b5806e4e9ec0c44e1bfb20:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,50 @@
id: k8s-readonly-rootfs
info:
name: Pods with read-only root filesystem
author: princechaddha
severity: medium
description: Checks for pods and containers running with a read-only root filesystem to prevent modifications to the filesystem, enhancing security.
impact: |
Running containers with a read-only root filesystem ensures that applications are not able to write to the filesystem or modify existing content. This is a common security practice to prevent malicious changes.
remediation: |
Configure all pods and containers to have their root filesystem set to read-only mode. This can be achieved by setting the securityContext.readOnlyRootFilesystem parameter to true in the pod or container configuration.
reference:
- https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems
tags: cloud,devops,kubernetes,devsecops,pods,k8s,k8s-cluster-security
flow: |
code(1);
for (let pod of template.items) {
set("pod", pod)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[] | {pod: .metadata.name, containers: .spec.containers}'
javascript:
- code: |
let podData = JSON.parse(template.pod);
podData.containers.forEach(container => {
if (container.securityContext && container.securityContext.readOnlyRootFilesystem !== true) {
let result = (`Container '${container.name}' in pod '${podData.pod}' is not running with a read-only root filesystem.`);
Export(result);
}
});
extractors:
- type: dsl
dsl:
- response
# digest: 490a0046304402205ca1449c5ae245df848df2d8b4117966ed8fc276841e0132e2844c643179f4c9022056438a03c5cb5402b1d07b3d1d88f123559df889348d5605c01ce0aedbedaf47:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,49 @@
id: k8s-root-user-id
info:
name: Pods run with root user ID
author: princechaddha
severity: low
description: Checks for pods running with the user ID of the root user, increasing security risks.
impact: |
Running pods with the root user ID can allow malicious entities to gain unnecessary privileges, leading to potential compromises in the Kubernetes environment.
remediation: Configure pods to run with a non-root user ID by setting the 'securityContext' for each container and the pod itself.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
tags: cloud,devops,kubernetes,devsecops,pods,k8s,k8s-cluster-security
flow: |
code(1);
for (let pod of template.items) {
set("pod", pod)
javascript(1);
}
self-contained: true
code:
- engine:
- sh
- bash
source: kubectl get pods --all-namespaces --output=json
extractors:
- type: json
name: items
internal: true
json:
- '.items[] | {pod: .metadata.name, containers: .spec.containers}'
javascript:
- code: |
let podData = JSON.parse(template.pod);
podData.containers.forEach(container => {
if (container.securityContext && container.securityContext.runAsUser === 0) {
let result = (`Container '${container.name}' in pod '${podData.pod}' is running with root user ID.`);
Export(result);
}
});
extractors:
- type: dsl
dsl:
- response
# digest: 4a0a00473045022100c1eee5714a5bba3549ccef97ba37cc178ad76f7786c90732ff49d211d1d4153802204dae0cd7ef2c634e85d3d27a7feb5c9508f4066a91f7ef56f861ed86af5bb420:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,42 @@
id: audit-log-path-set
info:
name: Ensure audit-log-path set
author: princechaddha
severity: medium
description: Checks if the audit-log-path argument is properly set in the Kubernetes API server configuration, which is essential for maintaining a reliable audit trail.
impact: |
Without the audit-log-path argument, Kubernetes does not record API server audit logs, reducing the visibility into operations and making it harder to detect and respond to malicious activities.
remediation: |
Configure the Kubernetes API server to include the audit-log-path argument pointing to a secure, writeable directory where audit logs will be stored. Ensure that this directory is properly secured and regularly monitored.
reference:
- https://kubernetes.io/docs/tasks/debug-application-cluster/audit/
tags: cloud,devops,kubernetes,devsecops,api-server,k8s,k8s-cluster-security
variables:
argument: "audit-log-path"
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath="{.items[*].spec.containers[*].command}"
matchers-condition: and
matchers:
- type: word
words:
- 'kube-apiserver'
- type: word
words:
- "audit-log-path"
negative: true
extractors:
- type: dsl
dsl:
- '"API server configuration is missing the " + argument + " argument."'
# digest: 4a0a0047304502204104b24f090bfea60d25246f47dd468a5696ce9e436fe282748f60d6c4929718022100c4902c1fc9855589dda168845c10e65647624c849700bcb556ada0418a10136a:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,42 @@
id: k8s-enc-prov-conf
info:
name: Ensure that encryption providers are configured
author: princechaddha
severity: medium
description: Checks if encryption providers are appropriately configured in Kubernetes, ensuring that data at rest is secured.
impact: |
Misconfigured encryption providers can lead to unsecured data at rest, potentially exposing sensitive information to unauthorized access.
remediation: |
Ensure that the encryption provider configuration file is set up correctly and referenced properly in the API server configuration. Encryption should be enabled and configured according to the security best practices.
reference:
- https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/
tags: cloud,devops,kubernetes,devsecops,encryption,k8s,k8s-cluster-security
variables:
argument: "encryption-provider-config"
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath="{.items[*].spec.containers[*].command}"
matchers-condition: and
matchers:
- type: word
words:
- 'kube-apiserver'
- type: word
words:
- "encryption-provider-config"
negative: true
extractors:
- type: dsl
dsl:
- '"API server configuration is missing the " + argument + " argument"'
# digest: 4b0a00483046022100d14427c3db24f5ff548847a899b85731aa480204820c6d9916a85e01dfd67939022100aeefbe775d7dade76e9c6df085d0fca70f39ead3c3374b189912d92b646253ad:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,42 @@
id: k8s-etcd-cafile-set
info:
name: Ensure etcd-cafile argument set
author: princechaddha
severity: medium
description: Checks if the etcd-cafile argument is properly set in the etcd configuration, crucial for secure client connections to etcd.
impact: |
Without specifying the etcd-cafile argument, etcd might not establish secure and authenticated connections, leading to potential security breaches.
remediation: |
Configure etcd to use an etcd-cafile argument that points to a valid CA certificate bundle. This setting should be part of the etcd startup arguments or in its configuration file.
reference:
- https://etcd.io/docs/v3.5/op-guide/security/
tags: cloud,devops,kubernetes,devsecops,etcd,k8s,k8s-cluster-security
variables:
argument: "etcd-cafile"
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl get pods -n kube-system -l component=etcd -o jsonpath="{.items[*].spec.containers[*].command}"
matchers-condition: and
matchers:
- type: word
words:
- 'etcd'
- type: word
words:
- "etcd-cafile"
negative: true
extractors:
- type: dsl
dsl:
- '"Etcd configuration is missing the " + argument + " argument"'
# digest: 4a0a004730450220594bb2a708ae66a4c884326cc844ef1f6886bf8a0b305630686bd04feeb76136022100fd52f890fc86dd1b66edf3798c51cda58cff07559fe9ea37f851eeec416fd052:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,43 @@
id: k8s-etcd-files-set
info:
name: Ensure etcd cert and key set
author: princechaddha
severity: medium
description: Checks if the etcd-certfile and etcd-keyfile arguments are properly set in the etcd server configuration, crucial for secure communication.
impact: |
If the etcd-certfile and etcd-keyfile arguments are not set, the etcd server might not encrypt its communications, potentially allowing unauthorized access to sensitive data.
remediation: |
Configure the etcd server to use etcd-certfile and etcd-keyfile arguments that point to valid certificate and key files respectively. This ensures that communications to and from the etcd server are properly encrypted.
reference:
- https://etcd.io/docs/v3.4.0/op-guide/security/
tags: cloud,devops,kubernetes,devsecops,etcd,k8s,k8s-cluster-security
variables:
argument: "etcd-certfile or etcd-keyfile"
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl get pods -n kube-system -l component=etcd -o jsonpath="{.items[*].spec.containers[*].command}"
matchers-condition: and
matchers:
- type: word
words:
- 'etcd'
- type: word
words:
- "etcd-certfile"
- "etcd-keyfile"
negative: true
extractors:
- type: dsl
dsl:
- '"etcd server configuration is missing the " + argument + " arguments."'
# digest: 4a0a00473045022100cfc23ca747bd1aecd67bf39514f649aaaff7816196d78c5dc95666a03cb4c5090220365eb627df9b1bca710f3a45ef2371ab91d59dd2e43f3d180cd95b119ca758b3:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,42 @@
id: k8s-ns-usage-check
info:
name: Ensure namespaces are utilized
author: princechaddha
severity: info
description: Checks if Kubernetes namespaces are actively used to separate resources, which is critical for resource organization and access control.
impact: |
Lack of namespaces usage can lead to disorganized resources and potentially flawed access controls, impacting security and management.
remediation: |
Implement and use namespaces to organize resources within the Kubernetes cluster effectively. Define access controls and resource quotas on a per-namespace basis.
reference:
- https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
tags: cloud,devops,kubernetes,devsecops,namespaces,k8s,k8s-cluster-security
variables:
argument: "namespaces"
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl get namespaces --output=json
matchers-condition: and
matchers:
- type: word
words:
- '"items":'
- type: word
words:
- '"Namespace"'
negative: true
extractors:
- type: dsl
dsl:
- '"Kubernetes cluster is not utilizing " + argument'
# digest: 490a0046304402202672a3c25ca835a804437f2745bf15f10a66112c320e7a5b51dfe586de57195d0220710bfe5832faacbd8efd9f24794ba191e008f325933688c53fc3a982a784da90:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,42 @@
id: k8s-svc-acct-issuer-set
info:
name: Checks if service-account-issuer is correctly configured
author: princechaddha
severity: medium
description: Checks if the service-account-issuer argument is correctly configured in the API server, critical for issuing valid service tokens.
impact: |
If the service-account-issuer argument is not set, the API server may issue tokens that are not accepted by other services, leading to authentication failures.
remediation: |
Set the service-account-issuer argument to a valid issuer URL in the API server's startup arguments or configuration file. This ensures the tokens issued are trusted across services.
reference:
- https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
tags: cloud,devops,kubernetes,devsecops,api-server,k8s,k8s-cluster-security
variables:
argument: "service-account-issuer"
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath="{.items[*].spec.containers[*].command}"
matchers-condition: and
matchers:
- type: word
words:
- 'kube-apiserver'
- type: word
words:
- "service-account-issuer"
negative: true
extractors:
- type: dsl
dsl:
- '"API server configuration lacks the " + argument + " argument."'
# digest: 4a0a00473045022100c383c51f45c32761519dc9ae727df05e29281c3c290d9d57d16db6930fa148b20220228d5e842cdfd0f2c0b6cdf7361d18f45d9ed24f62d49ce47aa81ec2a470e548:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,42 @@
id: k8s-svc-acct-key
info:
name: Ensure service-account-key-file set
author: princechaddha
severity: medium
description: Checks if the service-account-key-file argument is properly set in the API server configuration, which is critical for validating service account tokens.
impact: |
The absence of the service-account-key-file argument means that the API server might not perform robust authentication checks for service accounts, potentially allowing unauthorized access.
remediation: |
Configure the API server to use a service-account-key-file that points to a valid private key used to sign service account tokens. This setting should be part of the API server startup arguments or in its configuration file.
reference:
- https://kubernetes.io/docs/admin/kube-apiserver/
tags: cloud,devops,kubernetes,security,devsecops,api-server,k8s,k8s-cluster-security
variables:
argument: "service-account-key-file"
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath="{.items[*].spec.containers[*].command}"
matchers-condition: and
matchers:
- type: word
words:
- 'kube-apiserver'
- type: word
words:
- "service-account-key-file"
negative: true
extractors:
- type: dsl
dsl:
- '"API server configuration is missing the " + argument + " argument."'
# digest: 4b0a00483046022100d90abd4d95997cdae687e28ede25e595c3567439758ad5e5b8adf28ac88684ff022100a25db689afdc09e1640bf03bd0b212e5e20b0b4b1532723c985c617f99fb5ad8:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,42 @@
id: k8s-svc-acct-lookup-set
info:
name: Ensure service-account-lookup set
author: princechaddha
severity: medium
description: Checks if the service-account-lookup argument is set to true in the API server configuration, which is essential for verifying service accounts against the stored secrets.
impact: |
Without the service-account-lookup argument set to true, the API server may not verify service accounts against stored secrets, potentially allowing unauthorized access.
remediation: |
Set the service-account-lookup argument to true in the API server's startup arguments or configuration file to ensure proper verification of service accounts.
reference:
- https://kubernetes.io/docs/admin/kube-apiserver/
tags: cloud,devops,kubernetes,security,devsecops,api-server,k8s,k8s-cluster-security
variables:
argument: "service-account-lookup=true"
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath="{.items[*].spec.containers[*].command}"
matchers-condition: and
matchers:
- type: word
words:
- 'kube-apiserver'
- type: word
words:
- "service-account-lookup=true"
negative: true
extractors:
- type: dsl
dsl:
- '"API server configuration is missing the \"" + argument + "\" argument."'
# digest: 4b0a0048304602210085946e152e8d65fcadb1a22c5eac0e8376ed742a2f8932f74e3dbf2d30411a24022100c5c0c4d7313d6e28cb338c82c20170e4ae7f5b45ae784715be32160b4314e357:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,43 @@
id: k8s-tls-config-set
info:
name: Ensure TLS config appropriately set
author: princechaddha
severity: medium
description: Checks if the tls-cert-file and tls-private-key-file arguments are properly set in the API server configuration, which is essential for secure communication.
impact: |
The absence of tls-cert-file and tls-private-key-file arguments means that the API server may not use TLS for secure communications, leading to potential security risks.
remediation: |
Configure the API server to use tls-cert-file and tls-private-key-file that point to a valid certificate and key file respectively. This setting should be part of the API server startup arguments or in its configuration file.
reference:
- https://kubernetes.io/docs/admin/kube-apiserver/
tags: cloud,devops,kubernetes,security,devsecops,api-server,k8s,k8s-cluster-security
variables:
argument: "tls-cert-file or tls-private-key-file"
self-contained: true
code:
- engine:
- sh
- bash
source: |
kubectl get pods -n kube-system -l component=kube-apiserver -o jsonpath="{.items[*].spec.containers[*].command}"
matchers-condition: and
matchers:
- type: word
words:
- 'kube-apiserver'
- type: word
words:
- "tls-cert-file"
- "tls-private-key-file"
negative: true
extractors:
- type: dsl
dsl:
- '"API server configuration is missing the " + argument + " argument."'
# digest: 490a00463044022061ee1577d8528e7614d9ad92ed72f68f00426fc1d950015d2bef6fce8fbf285402207dd62ff126ce6129997240e3edc31f8adaf047cf5c341ba65b0ff1851b35ee99:366f2a24c8eb519f6968bd8801c08ebe

View File

@ -0,0 +1,6 @@
# Nuclei scan profile for scanning aws ACLs
code: true # enable code templates
tags:
- k8s-cluster-security # filter templates with "k8s-cluster-security" tags