Skip to content

Commit

Permalink
test: Add testing for propagating the kubelet annotation on the NodeC…
Browse files Browse the repository at this point in the history
…laim (#7013)
  • Loading branch information
jonathan-innis committed Sep 18, 2024
1 parent c86e28e commit 0174360
Show file tree
Hide file tree
Showing 12 changed files with 54 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.2
controller-gen.kubebuilder.io/version: v0.16.3
name: nodeclaims.karpenter.sh
spec:
group: karpenter.sh
Expand Down
4 changes: 3 additions & 1 deletion charts/karpenter-crd/templates/karpenter.sh_nodepools.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.2
controller-gen.kubebuilder.io/version: v0.16.3
name: nodepools.karpenter.sh
spec:
group: karpenter.sh
Expand Down Expand Up @@ -71,6 +71,8 @@ spec:
from a combination of nodepool and pod scheduling constraints.
properties:
disruption:
default:
consolidateAfter: 0s
description: Disruption contains the parameters that relate to Karpenter's disruption logic
properties:
budgets:
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ require (
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd
sigs.k8s.io/controller-runtime v0.18.4
sigs.k8s.io/karpenter v1.0.1
sigs.k8s.io/karpenter v1.0.2-0.20240918012643-8761b2d3add5
sigs.k8s.io/yaml v1.4.0
)

Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -761,8 +761,8 @@ sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHv
sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/karpenter v1.0.1 h1:IUx0DJ+NcdHyDnGhgbHtIlEzMIb38ElJOKmZ/oUpuqA=
sigs.k8s.io/karpenter v1.0.1/go.mod h1:3NLmsnHHw8p4VutpjTOPUZyhE3qH6yGTs8O94Lsu8uw=
sigs.k8s.io/karpenter v1.0.2-0.20240918012643-8761b2d3add5 h1:Z25Su8k1kfQN78arO9AKqoKrgyvo8DiO2l2I83gQ2aI=
sigs.k8s.io/karpenter v1.0.2-0.20240918012643-8761b2d3add5/go.mod h1:3NLmsnHHw8p4VutpjTOPUZyhE3qH6yGTs8O94Lsu8uw=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
Expand Down
2 changes: 1 addition & 1 deletion pkg/apis/crds/karpenter.sh_nodeclaims.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.2
controller-gen.kubebuilder.io/version: v0.16.3
name: nodeclaims.karpenter.sh
spec:
group: karpenter.sh
Expand Down
4 changes: 3 additions & 1 deletion pkg/apis/crds/karpenter.sh_nodepools.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.2
controller-gen.kubebuilder.io/version: v0.16.3
name: nodepools.karpenter.sh
spec:
group: karpenter.sh
Expand Down Expand Up @@ -71,6 +71,8 @@ spec:
from a combination of nodepool and pod scheduling constraints.
properties:
disruption:
default:
consolidateAfter: 0s
description: Disruption contains the parameters that relate to Karpenter's disruption logic
properties:
budgets:
Expand Down
6 changes: 1 addition & 5 deletions pkg/cloudprovider/cloudprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,7 @@ func (c *CloudProvider) Create(ctx context.Context, nodeClaim *karpv1.NodeClaim)
return nil, cloudprovider.NewNodeClassNotReadyError(fmt.Errorf("EC2NodeClass %q is incompatible with Karpenter v1, specify your Ubuntu AMIs in your AMISelectorTerms", nodeClass.Name))
}
// TODO: Remove this after v1
nodePool, err := utils.ResolveNodePoolFromNodeClaim(ctx, c.kubeClient, nodeClaim)
if err != nil {
return nil, err
}
kubeletHash, err := utils.GetHashKubelet(nodePool, nodeClass)
kubeletHash, err := utils.GetHashKubeletWithNodeClaim(nodeClaim, nodeClass)
if err != nil {
return nil, err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/cloudprovider/drift.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func (c *CloudProvider) areStaticFieldsDrifted(nodeClaim *karpv1.NodeClaim, node

// Remove once v1beta1 is dropped
func (c *CloudProvider) isKubeletConfigurationDrifted(nodeClaim *karpv1.NodeClaim, nodeClass *v1.EC2NodeClass, nodePool *karpv1.NodePool) (cloudprovider.DriftReason, error) {
kubeletHash, err := utils.GetHashKubelet(nodePool, nodeClass)
kubeletHash, err := utils.GetHashKubeletWithNodePool(nodePool, nodeClass)
if err != nil {
return "", err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclass/hash/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func (c *Controller) updateNodeClaimHash(ctx context.Context, nodeClass *v1.EC2N
if err != nil {
return err
}
kubeletHash, err := utils.GetHashKubelet(nodePool, nodeClass)
kubeletHash, err := utils.GetHashKubeletWithNodePool(nodePool, nodeClass)
if err != nil {
return err
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/controllers/nodeclass/hash/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ var _ = Describe("NodeClass Hash Controller", func() {
},
})
ExpectApplied(ctx, env.Client, nodeClass, nodeClaim, nodePool)
expectedHash, _ := utils.GetHashKubelet(nodePool, nodeClass)
expectedHash, _ := utils.GetHashKubeletWithNodePool(nodePool, nodeClass)

ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass)
nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)
Expand Down Expand Up @@ -201,7 +201,7 @@ var _ = Describe("NodeClass Hash Controller", func() {
PodsPerCore: lo.ToPtr(int32(9334283)),
}
ExpectApplied(ctx, env.Client, nodeClass, nodeClaim, nodePool)
expectedHash, _ := utils.GetHashKubelet(nodePool, nodeClass)
expectedHash, _ := utils.GetHashKubeletWithNodePool(nodePool, nodeClass)

ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass)
nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)
Expand Down Expand Up @@ -435,7 +435,7 @@ var _ = Describe("NodeClass Hash Controller", func() {
PodsPerCore: lo.ToPtr(int32(9334283)),
}
ExpectApplied(ctx, env.Client, nodeClass, nodeClaim, nodePool)
expectedHash, _ := utils.GetHashKubelet(nil, nodeClass)
expectedHash, _ := utils.GetHashKubeletWithNodePool(nil, nodeClass)

ExpectObjectReconciled(ctx, env.Client, hashController, nodeClass)
nodeClaim = ExpectExists(ctx, env.Client, nodeClaim)
Expand Down
14 changes: 13 additions & 1 deletion pkg/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,19 @@ func parseKubeletConfiguration(annotation string) (*v1.KubeletConfiguration, err
}, nil
}

func GetHashKubelet(nodePool *karpv1.NodePool, nodeClass *v1.EC2NodeClass) (string, error) {
func GetHashKubeletWithNodeClaim(nodeClaim *karpv1.NodeClaim, nodeClass *v1.EC2NodeClass) (string, error) {
kubelet, err := GetKubeletConfigurationWithNodeClaim(nodeClaim, nodeClass)
if err != nil {
return "", err
}
return fmt.Sprint(lo.Must(hashstructure.Hash(kubelet, hashstructure.FormatV2, &hashstructure.HashOptions{
SlicesAsSets: true,
IgnoreZeroValue: true,
ZeroNil: true,
}))), nil
}

func GetHashKubeletWithNodePool(nodePool *karpv1.NodePool, nodeClass *v1.EC2NodeClass) (string, error) {
kubelet, err := GetKubletConfigurationWithNodePool(nodePool, nodeClass)
if err != nil {
return "", err
Expand Down
26 changes: 24 additions & 2 deletions test/suites/scheduling/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ limitations under the License.
package scheduling_test

import (
"encoding/json"
"fmt"
"testing"
"time"
Expand All @@ -26,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"

karpv1 "sigs.k8s.io/karpenter/pkg/apis/v1"
"sigs.k8s.io/karpenter/pkg/test"
Expand Down Expand Up @@ -99,6 +101,28 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() {
env.ExpectCreatedNodeCount("==", 1)
Expect(env.GetNode(pod.Spec.NodeName).Annotations).To(And(HaveKeyWithValue("foo", "bar"), HaveKeyWithValue(karpv1.DoNotDisruptAnnotationKey, "true")))
})
It("should ensure the NodePool and the NodeClaim use the same view of the v1beta1 kubelet configuration", func() {
v1beta1NodePool := &v1beta1.NodePool{}
Expect(nodePool.ConvertTo(env.Context, v1beta1NodePool)).To(Succeed())

// Allow a reasonable number of pods to schedule
v1beta1NodePool.Spec.Template.Spec.Kubelet = &v1beta1.KubeletConfiguration{
PodsPerCore: lo.ToPtr(int32(2)),
}
kubeletHash, err := json.Marshal(v1beta1NodePool.Spec.Template.Spec.Kubelet)
Expect(err).ToNot(HaveOccurred())
// Ensure that the v1 version of the NodeClass has a configuration that won't let pods schedule
nodeClass.Spec.Kubelet = &v1.KubeletConfiguration{
MaxPods: lo.ToPtr(int32(0)),
}
pod := test.Pod()
env.ExpectCreated(nodeClass, v1beta1NodePool, pod)

nodeClaim := env.EventuallyExpectCreatedNodeClaimCount("==", 1)[0]
Expect(nodeClaim.Annotations).To(HaveKeyWithValue(karpv1.KubeletCompatibilityAnnotationKey, string(kubeletHash)))
env.EventuallyExpectCreatedNodeCount("==", 1)
env.EventuallyExpectHealthy(pod)
})

Context("Labels", func() {
It("should support well-known labels for instance type selection", func() {
Expand Down Expand Up @@ -487,7 +511,6 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() {
Expect(lo.FromPtr(env.GetInstance(pod.Spec.NodeName).InstanceType)).To(Equal("c5.large"))
Expect(env.GetNode(pod.Spec.NodeName).Labels[karpv1.NodePoolLabelKey]).To(Equal(nodePoolHighPri.Name))
})

DescribeTable(
"should provision a right-sized node when a pod has InitContainers (cpu)",
func(expectedNodeCPU string, containerRequirements corev1.ResourceRequirements, initContainers ...corev1.Container) {
Expand Down Expand Up @@ -624,7 +647,6 @@ var _ = Describe("Scheduling", Ordered, ContinueOnFailure, func() {
env.ExpectCreated(nodePool, nodeClass, pod)
env.EventuallyExpectHealthy(pod)
})

It("should provision a node for a pod with overlapping zone and zone-id requirements", func() {
subnetInfo := lo.UniqBy(env.GetSubnetInfo(map[string]string{"karpenter.sh/discovery": env.ClusterName}), func(s environmentaws.SubnetInfo) string {
return s.Zone
Expand Down

0 comments on commit 0174360

Please sign in to comment.