controller-runtime: Can't delete namespaces from test environment

I’d like to use namespaces to isolate my tests from one another. For example, I want to create a new namespace with a random name in my BeforeEach and delete it in AfterEach so that individual tests don’t have to worry about cleaning up after themselves.

However, I’ve observed that namespace deletion doesn’t work when using envtest. When I delete a namespace, the deletion timestamp on resources within that namespace remain unset, and the namespace stays in the Terminating phase seemingly forever. The latter is true even if the namespace is empty (e.g. I delete it immediately after creating it).

Is this behavior expected? Is whatever mechanism that normally executes namespace deletion missing from envtest?

About this issue

  • Original URL
  • State: open
  • Created 4 years ago
  • Reactions: 16
  • Comments: 16 (9 by maintainers)

Commits related to this issue

Most upvoted comments

Here is the work-around I implemented in my test helper functions. Basically, I use the discovery API to optimistically delete all namespaced resources in the namespace I’m trying to delete and then I update the /finalize subresource of the namespace to remove the kubernetes finalizer. The idea here is that it is a (perhaps poor) approximation of what kube-controller-manager would normally do to finalize a terminating namespace.

I brought in k8s.io/client-go in order to access the /finalize subresource and to interact with the discovery API (though I suspect the latter is possible with the standard controller-runtime client)

import (
	"context"
	"strings"

	. "github.com/onsi/gomega"

	corev1 "k8s.io/api/core/v1"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
	"k8s.io/apimachinery/pkg/runtime"
	"k8s.io/apimachinery/pkg/runtime/schema"
	"k8s.io/client-go/kubernetes"
	"sigs.k8s.io/controller-runtime/pkg/client"
)

func deleteAll(objs ...runtime.Object) {
	ctx := context.Background()
	clientGo, err := kubernetes.NewForConfig(testEnv.Config)
	Expect(err).ShouldNot(HaveOccurred())
	for _, obj := range objs {
		Expect(client.IgnoreNotFound(k8sClient.Delete(ctx, obj))).Should(Succeed())

		if ns, ok := obj.(*corev1.Namespace); ok {
			// Normally the kube-controller-manager would handle finalization
			// and garbage collection of namespaces, but with envtest, we aren't
			// running a kube-controller-manager. Instead we're gonna approximate
			// (poorly) the kube-controller-manager by explicitly deleting some
			// resources within the namespace and then removing the `kubernetes`
			// finalizer from the namespace resource so it can finish deleting.
			// Note that any resources within the namespace that we don't
			// successfully delete could reappear if the namespace is ever
			// recreated with the same name.

			// Look up all namespaced resources under the discovery API
			_, apiResources, err := clientGo.Discovery().ServerGroupsAndResources()
			Expect(err).ShouldNot(HaveOccurred())
			namespacedGVKs := make(map[string]schema.GroupVersionKind)
			for _, apiResourceList := range apiResources {
				defaultGV, err := schema.ParseGroupVersion(apiResourceList.GroupVersion)
				Expect(err).ShouldNot(HaveOccurred())
				for _, r := range apiResourceList.APIResources {
					if !r.Namespaced || strings.Contains(r.Name, "/") {
						// skip non-namespaced and subresources
						continue
					}
					gvk := schema.GroupVersionKind{
						Group:   defaultGV.Group,
						Version: defaultGV.Version,
						Kind:    r.Kind,
					}
					if r.Group != "" {
						gvk.Group = r.Group
					}
					if r.Version != "" {
						gvk.Version = r.Version
					}
					namespacedGVKs[gvk.String()] = gvk
				}
			}

			// Delete all namespaced resources in this namespace
			for _, gvk := range namespacedGVKs {
				var u unstructured.Unstructured
				u.SetGroupVersionKind(gvk)
				err := k8sClient.DeleteAllOf(ctx, &u, client.InNamespace(ns.Name))
				Expect(client.IgnoreNotFound(ignoreMethodNotAllowed(err))).ShouldNot(HaveOccurred())
			}

			Eventually(func() error {
				key, err := client.ObjectKeyFromObject(ns)
				if err != nil {
					return err
				}
				if err := k8sClient.Get(ctx, key, ns); err != nil {
					return client.IgnoreNotFound(err)
				}
				// remove `kubernetes` finalizer
				const kubernetes = "kubernetes"
				finalizers := []corev1.FinalizerName{}
				for _, f := range ns.Spec.Finalizers {
					if f != kubernetes {
						finalizers = append(finalizers, f)
					}
				}
				ns.Spec.Finalizers = finalizers

				// We have to use the k8s.io/client-go library here to expose
				// ability to patch the /finalize subresource on the namespace
				_, err = clientGo.CoreV1().Namespaces().Finalize(ns)
				return err
			}, timeout, interval).Should(Succeed())
		}

		Eventually(func() metav1.StatusReason {
			key, _ := client.ObjectKeyFromObject(obj)
			if err := k8sClient.Get(ctx, key, obj); err != nil {
				return apierrors.ReasonForError(err)
			}
			return ""
		}, timeout, interval).Should(Equal(metav1.StatusReasonNotFound))
	}
}

func ignoreMethodNotAllowed(err error) error {
	if err != nil {
		if apierrors.ReasonForError(err) == metav1.StatusReasonMethodNotAllowed {
			return nil
		}
	}
	return err
}

Based on what I’m gleaning from https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest is it correct to assume that one would exec kind prior to starting envtest, and then just ensure envtest is passed the correct rest.Config and UseExistingCluster = true?

Yep, that. It will use your default kubeconfig so set whatever context you need before starting and then enable UseExistingCluster.