Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,14 +236,6 @@ func main() {
os.Exit(1)
}

if err = (&controller.NodeEvictionLabelReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Node")
os.Exit(1)
}

if err = (&controller.NodeDecommissionReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Expand Down
45 changes: 28 additions & 17 deletions internal/controller/gardener_node_lifecycle_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import (
corev1ac "k8s.io/client-go/applyconfigurations/core/v1"
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
policyv1ac "k8s.io/client-go/applyconfigurations/policy/v1"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
Expand Down Expand Up @@ -72,40 +71,52 @@ func (r *GardenerNodeLifecycleController) Reconcile(ctx context.Context, req ctr
return ctrl.Result{}, k8sclient.IgnoreNotFound(err)
}

hv := kvmv1.Hypervisor{}
if err := r.Get(ctx, k8sclient.ObjectKey{Name: req.Name}, &hv); k8sclient.IgnoreNotFound(err) != nil {
hv := &kvmv1.Hypervisor{}
if err := r.Get(ctx, k8sclient.ObjectKey{Name: req.Name}, hv); k8sclient.IgnoreNotFound(err) != nil {
return ctrl.Result{}, err
}

if !hv.Spec.LifecycleEnabled {
// Nothing to be done
return ctrl.Result{}, nil
}

if isTerminating(node) {
changed, err := setNodeLabels(ctx, r.Client, node, map[string]string{labelEvictionRequired: valueReasonTerminating})
if changed || err != nil {
return ctrl.Result{}, err
// Only, if the maintenance controller is not active
if _, found := node.Labels["cloud.sap/maintenance-profile"]; !found {
// Sync the terminating status into the hypervisor spec
if isTerminating(node) && hv.Spec.Maintenance != kvmv1.MaintenanceTermination {
base := hv.DeepCopy()
hv.Spec.Maintenance = kvmv1.MaintenanceTermination
if err := r.Patch(ctx, hv, k8sclient.MergeFromWithOptions(base, k8sclient.MergeFromWithOptimisticLock{}), k8sclient.FieldOwner(MaintenanceControllerName)); err != nil {
return ctrl.Result{}, err
}
}
}

// We do not care about the particular value, as long as it isn't an error
var minAvailable int32 = 1
evictionValue, found := node.Labels[labelEvictionApproved]
if found && evictionValue != "false" {

// Onboarding is not in progress anymore, i.e. the host is onboarded
onboardingCompleted := meta.IsStatusConditionFalse(hv.Status.Conditions, kvmv1.ConditionTypeOnboarding)
// Evicting is not in progress anymore, i.e. the host is empty
evictionComplete := meta.IsStatusConditionFalse(hv.Status.Conditions, kvmv1.ConditionTypeEvicting)

if evictionComplete {
minAvailable = 0

if onboardingCompleted && isTerminating(node) {
// Onboarded & terminating & eviction complete -> disable HA
if err := disableInstanceHA(hv); err != nil {
return ctrl.Result{}, err
}
}
}

if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
return r.ensureBlockingPodDisruptionBudget(ctx, node, minAvailable)
}); err != nil {
if err := r.ensureBlockingPodDisruptionBudget(ctx, node, minAvailable); err != nil {
return ctrl.Result{}, err
}

onboardingCompleted := meta.IsStatusConditionFalse(hv.Status.Conditions, kvmv1.ConditionTypeOnboarding)

if err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
return r.ensureSignallingDeployment(ctx, node, minAvailable, onboardingCompleted)
}); err != nil {
if err := r.ensureSignallingDeployment(ctx, node, minAvailable, onboardingCompleted); err != nil {
return ctrl.Result{}, err
}

Expand Down
120 changes: 103 additions & 17 deletions internal/controller/gardener_node_lifecycle_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,51 +18,137 @@ limitations under the License.
package controller

import (
"fmt"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"

kvmv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
)

var _ = Describe("Gardener Maintenance Controller", func() {
const nodeName = "node-test"
var controller *GardenerNodeLifecycleController
var (
controller *GardenerNodeLifecycleController
name = types.NamespacedName{Name: nodeName}
reconcileReq = ctrl.Request{NamespacedName: name}
maintenanceName = types.NamespacedName{Name: fmt.Sprintf("maint-%v", nodeName), Namespace: "kube-system"}
)

BeforeEach(func(ctx SpecContext) {
controller = &GardenerNodeLifecycleController{
Client: k8sClient,
Scheme: k8sClient.Scheme(),
}

By("creating the namespace for the reconciler")
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "monsoon3"}}
Expect(client.IgnoreAlreadyExists(k8sClient.Create(ctx, ns))).To(Succeed())

By("creating the core resource for the Kind Node")
resource := &corev1.Node{
node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
Labels: map[string]string{labelEvictionRequired: "true"},
Name: nodeName,
},
}
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
Expect(k8sClient.Create(ctx, node)).To(Succeed())
DeferCleanup(func(ctx SpecContext) {
Expect(client.IgnoreNotFound(k8sClient.Delete(ctx, resource))).To(Succeed())
By("Cleanup the specific node")
Expect(k8sClient.Delete(ctx, node)).To(Succeed())
})

By("creating the core resource for the Kind hypervisor")
hypervisor := &kvmv1.Hypervisor{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
Spec: kvmv1.HypervisorSpec{
LifecycleEnabled: true,
},
}
Expect(k8sClient.Create(ctx, hypervisor)).To(Succeed())
DeferCleanup(func(ctx SpecContext) {
Expect(k8sClient.Delete(ctx, hypervisor)).To(Succeed())
})
})

Context("When reconciling a node", func() {
It("should successfully reconcile the resource", func(ctx SpecContext) {
req := ctrl.Request{
NamespacedName: types.NamespacedName{Name: nodeName},
}
Context("When reconciling a terminating node", func() {
BeforeEach(func(ctx SpecContext) {
By("Marking the node as terminating")
node := &corev1.Node{}
Expect(k8sClient.Get(ctx, name, node)).To(Succeed())
node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{
Type: "Terminating",
})
Expect(k8sClient.Status().Update(ctx, node)).To(Succeed())
})

It("should successfully reconcile the resource", func(ctx SpecContext) {
By("Reconciling the created resource")
_, err := controller.Reconcile(ctx, req)
_, err := controller.Reconcile(ctx, reconcileReq)
Expect(err).NotTo(HaveOccurred())

hypervisor := &kvmv1.Hypervisor{}
Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed())
Expect(hypervisor.Spec.Maintenance).To(Equal(kvmv1.MaintenanceTermination))
})
})

Context("When reconciling a node", func() {
JustBeforeEach(func(ctx SpecContext) {
_, err := controller.Reconcile(ctx, reconcileReq)
Expect(err).NotTo(HaveOccurred())
})
It("should create a poddisruptionbudget", func(ctx SpecContext) {
pdb := &policyv1.PodDisruptionBudget{}
Expect(k8sClient.Get(ctx, maintenanceName, pdb)).To(Succeed())
Expect(pdb.Spec.MinAvailable).To(HaveField("IntVal", BeNumerically("==", 1)))
})

It("should create a failing deployment to signal onboarding not being completed", func(ctx SpecContext) {
dep := &appsv1.Deployment{}
Expect(k8sClient.Get(ctx, maintenanceName, dep)).To(Succeed())
Expect(dep.Spec.Template.Spec.Containers).To(HaveLen(1))
Expect(dep.Spec.Template.Spec.Containers[0].StartupProbe.Exec.Command).To(Equal([]string{"/bin/false"}))
})

When("the node has been onboarded", func() {
BeforeEach(func(ctx SpecContext) {
hypervisor := &kvmv1.Hypervisor{}
Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed())
meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{
Type: kvmv1.ConditionTypeOnboarding,
Status: metav1.ConditionFalse,
Reason: "dontcare",
Message: "dontcare",
})
Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed())
})

It("should create a deployment with onboarding completed", func(ctx SpecContext) {
dep := &appsv1.Deployment{}
Expect(k8sClient.Get(ctx, maintenanceName, dep)).To(Succeed())
Expect(dep.Spec.Template.Spec.Containers).To(HaveLen(1))
Expect(dep.Spec.Template.Spec.Containers[0].StartupProbe.Exec.Command).To(Equal([]string{"/bin/true"}))
})
})

When("the node has been evicted", func() {
BeforeEach(func(ctx SpecContext) {
hypervisor := &kvmv1.Hypervisor{}
Expect(k8sClient.Get(ctx, name, hypervisor)).To(Succeed())
meta.SetStatusCondition(&hypervisor.Status.Conditions, metav1.Condition{
Type: kvmv1.ConditionTypeEvicting,
Status: metav1.ConditionFalse,
Reason: "dontcare",
Message: "dontcare",
})
Expect(k8sClient.Status().Update(ctx, hypervisor)).To(Succeed())
})
})

})
})
Loading