Merge pull request #22 from digitalocean/varsha/hostpath-pv

Hostpath check: Checks if there are pods which use hostpath volumes
varsha/versions
Varsha Varadarajan 2019-06-26 09:05:00 -04:00 committed by GitHub
commit 8f324cf6d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 213 additions and 0 deletions

View File

@ -187,3 +187,68 @@ Group: `workload-health`
Description: This check is done so users can find out if they have unhealthy pods in their cluster before upgrade. If there are suspicious failed pods, this check will indicate the same.
This check is not run by default. Specify group name or check name in order to run this check.
###### HostPath Volume
Name: `hostpath-volume`
Group: `basic`
Description: Using hostPath volumes is best avoided because:
- Pods with identical configuration (such as created from a podTemplate) may behave differently on different nodes due to different files on the nodes.
- When Kubernetes adds resource-aware scheduling, as is planned, it will not be able to account for resources used by a hostPath
the files or directories created on the underlying hosts are only writable by root.
- You either need to run your process as root in a privileged Container or modify the file permissions on the host to be able to write to a hostPath volume
For more details about hostpath, please refer to the Kubernetes [documentation](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath)
Example:
```yaml
# Don't do this
apiVersion: v1
kind: Pod
metadata:
name: test-pd
spec:
containers:
- image: docker.io/nginx:1.17.0
name: test-container
volumeMounts:
- mountPath: /test-pd
name: test-volume
volumes:
- name: test-volume
hostPath:
path: /data
type: Directory
```
How to fix:
```yaml
# Use other volume sources. See https://kubernetes.io/docs/concepts/storage/volumes/
apiVersion: v1
kind: Pod
metadata:
name: test-pd
spec:
containers:
- image: docker.io/nginx:1.17.0
name: test-container
volumeMounts:
- mountPath: /test-pd
name: test-volume
volumes:
- name: test-volume
cephfs:
monitors:
- 10.16.154.78:6789
user: admin
secretFile: "/etc/ceph/admin.secret"
readOnly: true
```

54
checks/basic/hostpath.go Normal file
View File

@ -0,0 +1,54 @@
package basic
import (
"fmt"
"github.com/digitalocean/clusterlint/checks"
"github.com/digitalocean/clusterlint/kube"
)
func init() {
checks.Register(&hostPathCheck{})
}
type hostPathCheck struct{}
// Name returns a unique name for this check.
func (h *hostPathCheck) Name() string {
return "hostpath-volume"
}
// Groups returns a list of group names this check should be part of.
func (h *hostPathCheck) Groups() []string {
return []string{"basic"}
}
// Description returns a detailed human-readable description of what this check
// does.
func (h *hostPathCheck) Description() string {
return "Check if there are pods using hostpath volumes"
}
// Run runs this check on a set of Kubernetes objects. It can return warnings
// (low-priority problems) and errors (high-priority problems) as well as an
// error value indicating that the check failed to run.
func (h *hostPathCheck) Run(objects *kube.Objects) ([]checks.Diagnostic, error) {
var diagnostics []checks.Diagnostic
for _, pod := range objects.Pods.Items {
for _, volume := range pod.Spec.Volumes {
pod := pod
if volume.VolumeSource.HostPath != nil {
d := checks.Diagnostic{
Severity: checks.Error,
Message: fmt.Sprintf("Avoid using hostpath for volume '%s'.", volume.Name),
Kind: checks.Pod,
Object: &pod.ObjectMeta,
Owners: pod.ObjectMeta.GetOwnerReferences(),
}
diagnostics = append(diagnostics, d)
}
}
}
return diagnostics, nil
}

View File

@ -0,0 +1,87 @@
package basic
import (
"testing"
"github.com/digitalocean/clusterlint/checks"
"github.com/digitalocean/clusterlint/kube"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
)
func TestHostpathCheckMeta(t *testing.T) {
hostPathCheck := hostPathCheck{}
assert.Equal(t, "hostpath-volume", hostPathCheck.Name())
assert.Equal(t, "Check if there are pods using hostpath volumes", hostPathCheck.Description())
assert.Equal(t, []string{"basic"}, hostPathCheck.Groups())
}
func TestHostpathCheckRegistration(t *testing.T) {
hostPathCheck := &hostPathCheck{}
check, err := checks.Get("hostpath-volume")
assert.Equal(t, check, hostPathCheck)
assert.Nil(t, err)
}
func TestHostpathVolumeError(t *testing.T) {
scenarios := []struct {
name string
arg *kube.Objects
expected []checks.Diagnostic
}{
{
name: "no pods",
arg: initPod(),
expected: nil,
},
{
name: "pod with no volumes",
arg: container("docker.io/nginx:foo"),
expected: nil,
},
{
name: "pod with other volume",
arg: volume(corev1.VolumeSource{
GitRepo: &corev1.GitRepoVolumeSource{Repository: "boo"},
}),
expected: nil,
},
{
name: "pod with hostpath volume",
arg: volume(corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{Path: "/tmp"},
}),
expected: []checks.Diagnostic{
{
Severity: checks.Error,
Message: "Avoid using hostpath for volume 'bar'.",
Kind: checks.Pod,
Object: GetObjectMeta(),
Owners: GetOwners(),
},
},
},
}
hostPathCheck := hostPathCheck{}
for _, scenario := range scenarios {
t.Run(scenario.name, func(t *testing.T) {
d, err := hostPathCheck.Run(scenario.arg)
assert.NoError(t, err)
assert.ElementsMatch(t, scenario.expected, d)
})
}
}
func volume(volumeSrc corev1.VolumeSource) *kube.Objects {
objs := initPod()
objs.Pods.Items[0].Spec = corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "bar",
VolumeSource: volumeSrc,
}},
}
return objs
}

View File

@ -16,6 +16,13 @@ func TestMeta(t *testing.T) {
assert.Equal(t, []string{"workload-health"}, podStatusCheck.Groups())
}
func TestPodStateCheckRegistration(t *testing.T) {
podStatusCheck := &podStatusCheck{}
check, err := checks.Get("pod-state")
assert.Equal(t, check, podStatusCheck)
assert.Nil(t, err)
}
func TestPodStateError(t *testing.T) {
scenarios := []struct {
name string