Check for config map references in nodes and projected volumes

varsha/versions
Varsha Varadarajan 2019-07-01 10:15:44 -04:00
parent 2d097ba31a
commit d0eb5a4b0a
5 changed files with 111 additions and 25 deletions

View File

@ -277,7 +277,6 @@ Description: This check reports all the persistent volume claims in the cluster
How to fix:
```bash
<<<<<<< HEAD
kubectl delete pvc <unused pvc>
```

View File

@ -27,28 +27,23 @@ func (c *unusedClaimCheck) Description() string {
return "Check if there are unused persistent volume claims in the cluster"
}
type identifier struct {
Name string
Namespace string
}
// Run runs this check on a set of Kubernetes objects. It can return warnings
// (low-priority problems) and errors (high-priority problems) as well as an
// error value indicating that the check failed to run.
func (c *unusedClaimCheck) Run(objects *kube.Objects) ([]checks.Diagnostic, error) {
var diagnostics []checks.Diagnostic
used := make(map[identifier]bool)
used := make(map[kube.Identifier]bool)
for _, pod := range objects.Pods.Items {
for _, volume := range pod.Spec.Volumes {
claim := volume.VolumeSource.PersistentVolumeClaim
if claim != nil {
used[identifier{Name: claim.ClaimName, Namespace: pod.GetNamespace()}] = true
used[kube.Identifier{Name: claim.ClaimName, Namespace: pod.GetNamespace()}] = true
}
}
}
for _, claim := range objects.PersistentVolumeClaims.Items {
if _, ok := used[identifier{Name: claim.GetName(), Namespace: claim.GetNamespace()}]; !ok {
if _, ok := used[kube.Identifier{Name: claim.GetName(), Namespace: claim.GetNamespace()}]; !ok {
d := checks.Diagnostic{
Severity: checks.Warning,
Message: "Unused persistent volume claim",

View File

@ -15,11 +15,6 @@ func init() {
type unusedCMCheck struct{}
type identifier struct {
Name string
Namespace string
}
// Name returns a unique name for this check.
func (c *unusedCMCheck) Name() string {
return "unused-config-map"
@ -42,13 +37,22 @@ func (c *unusedCMCheck) Description() string {
func (c *unusedCMCheck) Run(objects *kube.Objects) ([]checks.Diagnostic, error) {
var diagnostics []checks.Diagnostic
used, err := checkReferences(objects)
used, err := checkPodReferences(objects)
if err != nil {
return nil, err
}
nodeRefs, err := checkNodeReferences(objects)
if err != nil {
return nil, err
}
for k, v := range nodeRefs {
used[k] = v
}
for _, cm := range objects.ConfigMaps.Items {
if _, ok := used[identifier{Name: cm.GetName(), Namespace: cm.GetNamespace()}]; !ok {
if _, ok := used[kube.Identifier{Name: cm.GetName(), Namespace: cm.GetNamespace()}]; !ok {
cm := cm
d := checks.Diagnostic{
Severity: checks.Warning,
@ -63,9 +67,28 @@ func (c *unusedCMCheck) Run(objects *kube.Objects) ([]checks.Diagnostic, error)
return diagnostics, nil
}
//checkReferences checks each pod for config map references in volumes and environment variables
func checkReferences(objects *kube.Objects) (map[identifier]bool, error) {
used := make(map[identifier]bool)
func checkNodeReferences(objects *kube.Objects) (map[kube.Identifier]bool, error) {
used := make(map[kube.Identifier]bool)
var mu sync.Mutex
var g errgroup.Group
for _, node := range objects.Nodes.Items {
node := node
g.Go(func() error {
source := node.Spec.ConfigSource
if source != nil {
mu.Lock()
used[kube.Identifier{Name: source.ConfigMap.Name, Namespace: source.ConfigMap.Namespace}] = true
mu.Unlock()
}
return nil
})
}
return used, g.Wait()
}
//checkPodReferences checks each pod for config map references in volumes and environment variables
func checkPodReferences(objects *kube.Objects) (map[kube.Identifier]bool, error) {
used := make(map[kube.Identifier]bool)
var mu sync.Mutex
var g errgroup.Group
for _, pod := range objects.Pods.Items {
@ -76,9 +99,19 @@ func checkReferences(objects *kube.Objects) (map[identifier]bool, error) {
cm := volume.VolumeSource.ConfigMap
if cm != nil {
mu.Lock()
used[identifier{Name: cm.LocalObjectReference.Name, Namespace: namespace}] = true
used[kube.Identifier{Name: cm.LocalObjectReference.Name, Namespace: namespace}] = true
mu.Unlock()
}
if volume.VolumeSource.Projected != nil {
for _, source := range volume.VolumeSource.Projected.Sources {
cm := source.ConfigMap
if cm != nil {
mu.Lock()
used[kube.Identifier{Name: cm.LocalObjectReference.Name, Namespace: namespace}] = true
mu.Unlock()
}
}
}
}
identifiers := checkEnvVars(pod.Spec.Containers, namespace)
identifiers = append(identifiers, checkEnvVars(pod.Spec.InitContainers, namespace)...)
@ -96,12 +129,12 @@ func checkReferences(objects *kube.Objects) (map[identifier]bool, error) {
}
// checkEnvVars checks for config map references in container environment variables
func checkEnvVars(containers []corev1.Container, namespace string) []identifier {
var refs []identifier
func checkEnvVars(containers []corev1.Container, namespace string) []kube.Identifier {
var refs []kube.Identifier
for _, container := range containers {
for _, env := range container.EnvFrom {
if env.ConfigMapRef != nil {
refs = append(refs, identifier{Name: env.ConfigMapRef.LocalObjectReference.Name, Namespace: namespace})
refs = append(refs, kube.Identifier{Name: env.ConfigMapRef.LocalObjectReference.Name, Namespace: namespace})
}
}
}

View File

@ -34,7 +34,7 @@ func TestUnusedConfigMapWarning(t *testing.T) {
}{
{
name: "no config maps",
objs: &kube.Objects{Pods: &corev1.PodList{}, ConfigMaps: &corev1.ConfigMapList{}},
objs: &kube.Objects{Nodes: &corev1.NodeList{}, Pods: &corev1.PodList{}, ConfigMaps: &corev1.ConfigMapList{}},
expected: nil,
},
{
@ -43,10 +43,20 @@ func TestUnusedConfigMapWarning(t *testing.T) {
expected: nil,
},
{
name: "environment variable references to config map",
name: "environment variable references config map",
objs: configMapEnvSource(),
expected: nil,
},
{
name: "projected volume references config map",
objs: projectedVolume(),
expected: nil,
},
{
name: "node config source references config map",
objs: nodeConfigSource(),
expected: nil,
},
{
name: "unused config map",
objs: initConfigMap(),
@ -75,6 +85,14 @@ func TestUnusedConfigMapWarning(t *testing.T) {
func initConfigMap() *kube.Objects {
objs := &kube.Objects{
Nodes: &corev1.NodeList{
Items: []corev1.Node{
{
TypeMeta: metav1.TypeMeta{Kind: "Node", APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{Name: "node_foo"},
},
},
},
Pods: &corev1.PodList{
Items: []corev1.Pod{
{
@ -95,6 +113,19 @@ func initConfigMap() *kube.Objects {
return objs
}
func nodeConfigSource() *kube.Objects {
objs := initConfigMap()
objs.Nodes.Items[0].Spec = corev1.NodeSpec{
ConfigSource: &corev1.NodeConfigSource{
ConfigMap: &corev1.ConfigMapNodeConfigSource{
Name: "cm_foo",
Namespace: cmNamespace,
},
},
}
return objs
}
func configMapVolume() *kube.Objects {
objs := initConfigMap()
objs.Pods.Items[0].Spec = corev1.PodSpec{
@ -111,6 +142,28 @@ func configMapVolume() *kube.Objects {
return objs
}
func projectedVolume() *kube.Objects {
objs := initConfigMap()
objs.Pods.Items[0].Spec = corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: "bar",
VolumeSource: corev1.VolumeSource{
Projected: &corev1.ProjectedVolumeSource{
Sources: []corev1.VolumeProjection{
{
ConfigMap: &corev1.ConfigMapProjection{
LocalObjectReference: corev1.LocalObjectReference{Name: "cm_foo"},
},
},
},
},
},
}},
}
return objs
}
func configMapEnvSource() *kube.Objects {
objs := initConfigMap()
objs.Pods.Items[0].Spec = corev1.PodSpec{

View File

@ -9,6 +9,12 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
//Identifier is used to identify a specific namspace scoped object.
type Identifier struct {
Name string
Namespace string
}
// Objects encapsulates all the objects from a Kubernetes cluster.
type Objects struct {
Nodes *corev1.NodeList