diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index e32e8f9db3..88a21128b8 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -3772,9 +3772,7 @@ func TestSnapshotError(t *testing.T) { features.SetFeatureGateDuringTest(t, features.TopologyAwareScheduling, true) ctx, _ := utiltesting.ContextWithLog(t) - topology := *utiltesting.MakeTopology("default"). - Levels(corev1.LabelHostname). - Obj() + topology := *utiltesting.MakeDefaultOneLevelTopology("default") flavor := *utiltesting.MakeResourceFlavor("tas-default"). TopologyName("default"). Obj() diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index e56f011449..1afaed1ae5 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -3815,9 +3815,7 @@ func TestScheduleForTAS(t *testing.T) { Ready(). Obj(), } - defaultSingleLevelTopology := *utiltesting.MakeTopology("tas-single-level"). - Levels(corev1.LabelHostname). - Obj() + defaultSingleLevelTopology := *utiltesting.MakeDefaultOneLevelTopology("tas-single-level") defaultTwoLevelTopology := *utiltesting.MakeTopology("tas-two-level"). Levels(tasRackLabel, corev1.LabelHostname). Obj() diff --git a/pkg/util/testing/defaults.go b/pkg/util/testing/defaults.go new file mode 100644 index 0000000000..188f389b66 --- /dev/null +++ b/pkg/util/testing/defaults.go @@ -0,0 +1,49 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + corev1 "k8s.io/api/core/v1" + + kueuealpha "sigs.k8s.io/kueue/apis/kueue/v1alpha1" +) + +const ( + DefaultRackTopologyLevel = "cloud.provider.com/topology-rack" + DefaultBlockTopologyLevel = "cloud.provider.com/topology-block" +) + +// MakeDefaultOneLevelTopology creates a default topology with hostname level. +func MakeDefaultOneLevelTopology(name string) *kueuealpha.Topology { + return MakeTopology(name). + Levels(corev1.LabelHostname). + Obj() +} + +// MakeDefaultTwoLevelTopology creates a default topology with block and rack levels. +func MakeDefaultTwoLevelTopology(name string) *kueuealpha.Topology { + return MakeTopology(name). + Levels(DefaultBlockTopologyLevel, DefaultRackTopologyLevel). + Obj() +} + +// MakeDefaultThreeLevelTopology creates a default topology with block, rack and hostname levels. +func MakeDefaultThreeLevelTopology(name string) *kueuealpha.Topology { + return MakeTopology(name). + Levels(DefaultBlockTopologyLevel, DefaultRackTopologyLevel, corev1.LabelHostname). + Obj() +} diff --git a/test/e2e/singlecluster/tas_test.go b/test/e2e/singlecluster/tas_test.go index c41225d8ed..061e66b964 100644 --- a/test/e2e/singlecluster/tas_test.go +++ b/test/e2e/singlecluster/tas_test.go @@ -63,9 +63,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling", func() { clusterQueue *kueue.ClusterQueue ) ginkgo.BeforeEach(func() { - topology = testing.MakeTopology("hostname").Levels( - corev1.LabelHostname, - ).Obj() + topology = testing.MakeDefaultOneLevelTopology("hostname") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) onDemandRF = testing.MakeResourceFlavor("on-demand"). @@ -157,7 +155,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling", func() { localQueue *kueue.LocalQueue ) ginkgo.BeforeEach(func() { - topology = testing.MakeTopology("hostname").Levels(corev1.LabelHostname).Obj() + topology = testing.MakeDefaultOneLevelTopology("hostname") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) onDemandRF = testing.MakeResourceFlavor("on-demand"). @@ -292,7 +290,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling", func() { localQueue *kueue.LocalQueue ) ginkgo.BeforeEach(func() { - topology = testing.MakeTopology("hostname").Levels(corev1.LabelHostname).Obj() + topology = testing.MakeDefaultOneLevelTopology("hostname") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) onDemandRF = testing.MakeResourceFlavor("on-demand"). diff --git a/test/e2e/tas/job_test.go b/test/e2e/tas/job_test.go index dafa7c7b67..dfd1c8f7b7 100644 --- a/test/e2e/tas/job_test.go +++ b/test/e2e/tas/job_test.go @@ -39,11 +39,9 @@ import ( ) const ( - instanceType = "tas-group" - tasNodeGroupLabel = "cloud.provider.com/node-group" - topologyLevelRack = "cloud.provider.com/topology-rack" - topologyLevelBlock = "cloud.provider.com/topology-block" - extraResource = "example.com/gpu" + instanceType = "tas-group" + tasNodeGroupLabel = "cloud.provider.com/node-group" + extraResource = "example.com/gpu" ) var _ = ginkgo.Describe("TopologyAwareScheduling for Job", func() { @@ -68,11 +66,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for Job", func() { clusterQueue *kueue.ClusterQueue ) ginkgo.BeforeEach(func() { - topology = testing.MakeTopology("datacenter").Levels( - topologyLevelBlock, - topologyLevelRack, - corev1.LabelHostname, - ).Obj() + topology = testing.MakeDefaultThreeLevelTopology("datacenter") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -110,7 +104,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for Job", func() { Limit(extraResource, "1"). Obj() sampleJob = (&testingjob.JobWrapper{Job: *sampleJob}). - PodAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, topologyLevelRack). + PodAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, testing.DefaultRackTopologyLevel). Image(util.E2eTestSleepImage, []string{"100ms"}). Obj() gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed()) @@ -134,7 +128,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for Job", func() { Limit(extraResource, "1"). Obj() sampleJob = (&testingjob.JobWrapper{Job: *sampleJob}). - PodAnnotation(kueuealpha.PodSetPreferredTopologyAnnotation, topologyLevelRack). + PodAnnotation(kueuealpha.PodSetPreferredTopologyAnnotation, testing.DefaultRackTopologyLevel). Image(util.E2eTestSleepImage, []string{"100ms"}). Obj() gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed()) @@ -187,7 +181,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for Job", func() { Limit(extraResource, "1"). Obj() sampleJob = (&testingjob.JobWrapper{Job: *sampleJob}). - PodAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, topologyLevelBlock). + PodAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, testing.DefaultBlockTopologyLevel). Image(util.E2eTestSleepImage, []string{"100ms"}). Obj() gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed()) @@ -241,7 +235,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for Job", func() { Limit(extraResource, "1"). Obj() sampleJob = (&testingjob.JobWrapper{Job: *sampleJob}). - PodAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, topologyLevelBlock). + PodAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, testing.DefaultBlockTopologyLevel). Image(util.E2eTestSleepImage, []string{"10ms"}). Obj() gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed()) @@ -270,7 +264,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for Job", func() { Limit(extraResource, "1"). Obj() sampleJob = (&testingjob.JobWrapper{Job: *sampleJob}). - PodAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, topologyLevelBlock). + PodAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, testing.DefaultBlockTopologyLevel). Image(util.E2eTestSleepImage, []string{"60s"}). Obj() gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed()) diff --git a/test/e2e/tas/jobset_test.go b/test/e2e/tas/jobset_test.go index a36e6f8629..7b55b964a2 100644 --- a/test/e2e/tas/jobset_test.go +++ b/test/e2e/tas/jobset_test.go @@ -58,11 +58,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for JobSet", func() { clusterQueue *kueue.ClusterQueue ) ginkgo.BeforeEach(func() { - topology = testing.MakeTopology("datacenter").Levels( - topologyLevelBlock, - topologyLevelRack, - corev1.LabelHostname, - ).Obj() + topology = testing.MakeDefaultThreeLevelTopology("datacenter") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -106,7 +102,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for JobSet", func() { Image: util.E2eTestSleepImage, Args: []string{"60s"}, PodAnnotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: topologyLevelBlock, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultBlockTopologyLevel, }, }, ). diff --git a/test/e2e/tas/mpijob_test.go b/test/e2e/tas/mpijob_test.go index 358b0defb3..9cb3ede079 100644 --- a/test/e2e/tas/mpijob_test.go +++ b/test/e2e/tas/mpijob_test.go @@ -53,9 +53,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for MPIJob", func() { } gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) - topology = testing.MakeTopology("datacenter"). - Levels(topologyLevelBlock, topologyLevelRack, corev1.LabelHostname). - Obj() + topology = testing.MakeDefaultThreeLevelTopology("datacenter") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -106,7 +104,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for MPIJob", func() { ReplicaCount: launcherReplicas, RestartPolicy: corev1.RestartPolicyOnFailure, Annotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: topologyLevelRack, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultRackTopologyLevel, }, }, testingmpijob.MPIJobReplicaSpecRequirement{ @@ -116,7 +114,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for MPIJob", func() { ReplicaCount: workerReplicas, RestartPolicy: corev1.RestartPolicyOnFailure, Annotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: topologyLevelBlock, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultBlockTopologyLevel, }, }, ). diff --git a/test/e2e/tas/pod_group_test.go b/test/e2e/tas/pod_group_test.go index b921048024..27c70f1d59 100644 --- a/test/e2e/tas/pod_group_test.go +++ b/test/e2e/tas/pod_group_test.go @@ -53,11 +53,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for Pod group", func() { clusterQueue *kueue.ClusterQueue ) ginkgo.BeforeEach(func() { - topology = testing.MakeTopology("datacenter").Levels( - topologyLevelBlock, - topologyLevelRack, - corev1.LabelHostname, - ).Obj() + topology = testing.MakeDefaultThreeLevelTopology("datacenter") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -93,7 +89,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for Pod group", func() { Queue("test-queue"). Request(extraResource, "1"). Limit(extraResource, "1"). - Annotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.provider.com/topology-block") + Annotation(kueuealpha.PodSetRequiredTopologyAnnotation, testing.DefaultBlockTopologyLevel) podGroup := basePod.MakeIndexedGroup(numPods) for _, pod := range podGroup { diff --git a/test/e2e/tas/pytorch_test.go b/test/e2e/tas/pytorch_test.go index 6d5e7c942d..bdcd1bf294 100644 --- a/test/e2e/tas/pytorch_test.go +++ b/test/e2e/tas/pytorch_test.go @@ -52,9 +52,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for PyTorchJob", func() { } gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) - topology = testing.MakeTopology("datacenter"). - Levels(topologyLevelBlock, topologyLevelRack, corev1.LabelHostname). - Obj() + topology = testing.MakeDefaultThreeLevelTopology("datacenter") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -105,7 +103,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for PyTorchJob", func() { ReplicaCount: masterReplicas, RestartPolicy: kftraining.RestartPolicyOnFailure, Annotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: topologyLevelRack, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultRackTopologyLevel, }, }, testingpytorchjob.PyTorchReplicaSpecRequirement{ @@ -115,7 +113,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for PyTorchJob", func() { ReplicaCount: workerReplicas, RestartPolicy: kftraining.RestartPolicyOnFailure, Annotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: topologyLevelBlock, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultBlockTopologyLevel, }, }, ). diff --git a/test/e2e/tas/statefulset_test.go b/test/e2e/tas/statefulset_test.go index 3f82fbe981..78ce22588b 100644 --- a/test/e2e/tas/statefulset_test.go +++ b/test/e2e/tas/statefulset_test.go @@ -45,9 +45,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for StatefulSet", func() { ns = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "e2e-tas-sts-"}} gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) - topology = testing.MakeTopology("datacenter"). - Levels(topologyLevelBlock, topologyLevelRack, corev1.LabelHostname). - Obj() + topology = testing.MakeDefaultThreeLevelTopology("datacenter") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -85,7 +83,7 @@ var _ = ginkgo.Describe("TopologyAwareScheduling for StatefulSet", func() { Limit(extraResource, "1"). Replicas(replicas). Queue(localQueue.Name). - PodTemplateSpecAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, "cloud.provider.com/topology-block"). + PodTemplateSpecAnnotation(kueuealpha.PodSetRequiredTopologyAnnotation, testing.DefaultBlockTopologyLevel). Obj() gomega.Expect(k8sClient.Create(ctx, sts)).Should(gomega.Succeed()) diff --git a/test/integration/controller/jobs/jobset/jobset_controller_test.go b/test/integration/controller/jobs/jobset/jobset_controller_test.go index 637bae408a..6d591c207e 100644 --- a/test/integration/controller/jobs/jobset/jobset_controller_test.go +++ b/test/integration/controller/jobs/jobset/jobset_controller_test.go @@ -1156,8 +1156,6 @@ var _ = ginkgo.Describe("JobSet controller interacting with scheduler", ginkgo.O var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled", ginkgo.Ordered, ginkgo.ContinueOnFailure, func() { const ( nodeGroupLabel = "node-group" - tasBlockLabel = "cloud.com/topology-block" - tasRackLabel = "cloud.com/topology-rack" ) var ( @@ -1190,8 +1188,8 @@ var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled" nodes = []corev1.Node{ *testingnode.MakeNode("b1r1"). Label(nodeGroupLabel, "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -1201,9 +1199,7 @@ var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled" } util.CreateNodes(ctx, k8sClient, nodes) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -1241,7 +1237,7 @@ var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled" Parallelism: 1, Completions: 1, PodAnnotations: map[string]string{ - kueuealpha.PodSetRequiredTopologyAnnotation: tasBlockLabel, + kueuealpha.PodSetRequiredTopologyAnnotation: testing.DefaultBlockTopologyLevel, }, Image: util.E2eTestSleepImage, Args: []string{"1ms"}, @@ -1252,7 +1248,7 @@ var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled" Parallelism: 1, Completions: 1, PodAnnotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: tasRackLabel, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultRackTopologyLevel, }, Image: util.E2eTestSleepImage, Args: []string{"1ms"}, @@ -1279,7 +1275,7 @@ var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled" Name: "rj1", Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasBlockLabel), + Required: ptr.To(testing.DefaultBlockTopologyLevel), PodIndexLabel: ptr.To(batchv1.JobCompletionIndexAnnotation), SubGroupIndexLabel: ptr.To(jobsetapi.JobIndexKey), SubGroupCount: ptr.To[int32](1), @@ -1289,7 +1285,7 @@ var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled" Name: "rj2", Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Preferred: ptr.To(tasRackLabel), + Preferred: ptr.To(testing.DefaultRackTopologyLevel), PodIndexLabel: ptr.To(batchv1.JobCompletionIndexAnnotation), SubGroupIndexLabel: ptr.To(jobsetapi.JobIndexKey), SubGroupCount: ptr.To[int32](1), @@ -1311,13 +1307,13 @@ var _ = ginkgo.Describe("JobSet controller when TopologyAwareScheduling enabled" g.Expect(wl.Status.Admission.PodSetAssignments).Should(gomega.HaveLen(2)) g.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) g.Expect(wl.Status.Admission.PodSetAssignments[1].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) diff --git a/test/integration/controller/jobs/mpijob/mpijob_controller_test.go b/test/integration/controller/jobs/mpijob/mpijob_controller_test.go index 25bb74d53a..ae359317d9 100644 --- a/test/integration/controller/jobs/mpijob/mpijob_controller_test.go +++ b/test/integration/controller/jobs/mpijob/mpijob_controller_test.go @@ -929,8 +929,6 @@ var _ = ginkgo.Describe("Job controller interacting with scheduler", ginkgo.Orde var _ = ginkgo.Describe("MPIJob controller when TopologyAwareScheduling enabled", ginkgo.Ordered, ginkgo.ContinueOnFailure, func() { const ( nodeGroupLabel = "node-group" - tasBlockLabel = "cloud.com/topology-block" - tasRackLabel = "cloud.com/topology-rack" ) var ( @@ -963,8 +961,8 @@ var _ = ginkgo.Describe("MPIJob controller when TopologyAwareScheduling enabled" nodes = []corev1.Node{ *testingnode.MakeNode("b1r1"). Label(nodeGroupLabel, "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -974,9 +972,7 @@ var _ = ginkgo.Describe("MPIJob controller when TopologyAwareScheduling enabled" } util.CreateNodes(ctx, k8sClient, nodes) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -1008,8 +1004,8 @@ var _ = ginkgo.Describe("MPIJob controller when TopologyAwareScheduling enabled" mpiJob := testingmpijob.MakeMPIJob(jobName, ns.Name). Queue(localQueue.Name). GenericLauncherAndWorker(). - PodAnnotation(kfmpi.MPIReplicaTypeLauncher, kueuealpha.PodSetRequiredTopologyAnnotation, tasBlockLabel). - PodAnnotation(kfmpi.MPIReplicaTypeWorker, kueuealpha.PodSetPreferredTopologyAnnotation, tasRackLabel). + PodAnnotation(kfmpi.MPIReplicaTypeLauncher, kueuealpha.PodSetRequiredTopologyAnnotation, testing.DefaultBlockTopologyLevel). + PodAnnotation(kfmpi.MPIReplicaTypeWorker, kueuealpha.PodSetPreferredTopologyAnnotation, testing.DefaultRackTopologyLevel). Request(kfmpi.MPIReplicaTypeLauncher, corev1.ResourceCPU, "100m"). Request(kfmpi.MPIReplicaTypeWorker, corev1.ResourceCPU, "100m"). Obj() @@ -1031,7 +1027,7 @@ var _ = ginkgo.Describe("MPIJob controller when TopologyAwareScheduling enabled" Name: strings.ToLower(string(kfmpi.MPIReplicaTypeLauncher)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasBlockLabel), + Required: ptr.To(testing.DefaultBlockTopologyLevel), PodIndexLabel: ptr.To(kfmpi.ReplicaIndexLabel), }, }, @@ -1039,7 +1035,7 @@ var _ = ginkgo.Describe("MPIJob controller when TopologyAwareScheduling enabled" Name: strings.ToLower(string(kfmpi.MPIReplicaTypeWorker)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Preferred: ptr.To(tasRackLabel), + Preferred: ptr.To(testing.DefaultRackTopologyLevel), PodIndexLabel: ptr.To(kfmpi.ReplicaIndexLabel), }, }, @@ -1059,13 +1055,13 @@ var _ = ginkgo.Describe("MPIJob controller when TopologyAwareScheduling enabled" g.Expect(wl.Status.Admission.PodSetAssignments).Should(gomega.HaveLen(2)) g.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) g.Expect(wl.Status.Admission.PodSetAssignments[1].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) diff --git a/test/integration/controller/jobs/mxjob/mxjob_controller_test.go b/test/integration/controller/jobs/mxjob/mxjob_controller_test.go index 461704f37a..7cfcc21a44 100644 --- a/test/integration/controller/jobs/mxjob/mxjob_controller_test.go +++ b/test/integration/controller/jobs/mxjob/mxjob_controller_test.go @@ -322,8 +322,6 @@ var _ = ginkgo.Describe("Job controller interacting with scheduler", framework.R var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled", framework.RedundantSpec, ginkgo.Ordered, ginkgo.ContinueOnFailure, func() { const ( nodeGroupLabel = "node-group" - tasBlockLabel = "cloud.com/topology-block" - tasRackLabel = "cloud.com/topology-rack" ) var ( @@ -356,8 +354,8 @@ var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled", nodes = []corev1.Node{ *testingnode.MakeNode("b1r1"). Label(nodeGroupLabel, "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -367,9 +365,7 @@ var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled", } util.CreateNodes(ctx, k8sClient, nodes) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -403,9 +399,9 @@ var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled", Request(kftraining.MXJobReplicaTypeScheduler, corev1.ResourceCPU, "100m"). Request(kftraining.MXJobReplicaTypeServer, corev1.ResourceCPU, "100m"). Request(kftraining.MXJobReplicaTypeWorker, corev1.ResourceCPU, "100m"). - PodAnnotation(kftraining.MXJobReplicaTypeScheduler, kueuealpha.PodSetRequiredTopologyAnnotation, tasBlockLabel). - PodAnnotation(kftraining.MXJobReplicaTypeServer, kueuealpha.PodSetRequiredTopologyAnnotation, tasBlockLabel). - PodAnnotation(kftraining.MXJobReplicaTypeWorker, kueuealpha.PodSetPreferredTopologyAnnotation, tasRackLabel). + PodAnnotation(kftraining.MXJobReplicaTypeScheduler, kueuealpha.PodSetRequiredTopologyAnnotation, testing.DefaultBlockTopologyLevel). + PodAnnotation(kftraining.MXJobReplicaTypeServer, kueuealpha.PodSetRequiredTopologyAnnotation, testing.DefaultBlockTopologyLevel). + PodAnnotation(kftraining.MXJobReplicaTypeWorker, kueuealpha.PodSetPreferredTopologyAnnotation, testing.DefaultRackTopologyLevel). Obj() ginkgo.By("creating a MXJob", func() { gomega.Expect(k8sClient.Create(ctx, mxJob)).Should(gomega.Succeed()) @@ -422,7 +418,7 @@ var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled", Name: strings.ToLower(string(kftraining.MXJobReplicaTypeScheduler)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasBlockLabel), + Required: ptr.To(testing.DefaultBlockTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -430,7 +426,7 @@ var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled", Name: strings.ToLower(string(kftraining.MXJobReplicaTypeServer)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasBlockLabel), + Required: ptr.To(testing.DefaultBlockTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -438,7 +434,7 @@ var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled", Name: strings.ToLower(string(kftraining.MXJobReplicaTypeWorker)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Preferred: ptr.To(tasRackLabel), + Preferred: ptr.To(testing.DefaultRackTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -458,19 +454,19 @@ var _ = ginkgo.Describe("MXJob controller when TopologyAwareScheduling enabled", g.Expect(wl.Status.Admission.PodSetAssignments).Should(gomega.HaveLen(3)) g.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) g.Expect(wl.Status.Admission.PodSetAssignments[1].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) g.Expect(wl.Status.Admission.PodSetAssignments[1].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) diff --git a/test/integration/controller/jobs/paddlejob/paddlejob_controller_test.go b/test/integration/controller/jobs/paddlejob/paddlejob_controller_test.go index eae401eeb2..0adfd11fc5 100644 --- a/test/integration/controller/jobs/paddlejob/paddlejob_controller_test.go +++ b/test/integration/controller/jobs/paddlejob/paddlejob_controller_test.go @@ -311,8 +311,6 @@ var _ = ginkgo.Describe("Job controller interacting with scheduler", framework.R var _ = ginkgo.Describe("PaddleJob controller when TopologyAwareScheduling enabled", framework.RedundantSpec, ginkgo.Ordered, ginkgo.ContinueOnFailure, func() { const ( nodeGroupLabel = "node-group" - tasBlockLabel = "cloud.com/topology-block" - tasRackLabel = "cloud.com/topology-rack" ) var ( @@ -345,8 +343,8 @@ var _ = ginkgo.Describe("PaddleJob controller when TopologyAwareScheduling enabl nodes = []corev1.Node{ *testingnode.MakeNode("b1r1"). Label(nodeGroupLabel, "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -356,9 +354,7 @@ var _ = ginkgo.Describe("PaddleJob controller when TopologyAwareScheduling enabl } util.CreateNodes(ctx, k8sClient, nodes) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -393,14 +389,14 @@ var _ = ginkgo.Describe("PaddleJob controller when TopologyAwareScheduling enabl ReplicaType: kftraining.PaddleJobReplicaTypeMaster, ReplicaCount: 1, Annotations: map[string]string{ - kueuealpha.PodSetRequiredTopologyAnnotation: tasRackLabel, + kueuealpha.PodSetRequiredTopologyAnnotation: testing.DefaultRackTopologyLevel, }, }, testingpaddlejob.PaddleReplicaSpecRequirement{ ReplicaType: kftraining.PaddleJobReplicaTypeWorker, ReplicaCount: 1, Annotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: tasBlockLabel, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultBlockTopologyLevel, }, }, ). @@ -423,7 +419,7 @@ var _ = ginkgo.Describe("PaddleJob controller when TopologyAwareScheduling enabl Name: strings.ToLower(string(kftraining.PaddleJobReplicaTypeMaster)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -431,7 +427,7 @@ var _ = ginkgo.Describe("PaddleJob controller when TopologyAwareScheduling enabl Name: strings.ToLower(string(kftraining.PaddleJobReplicaTypeWorker)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Preferred: ptr.To(tasBlockLabel), + Preferred: ptr.To(testing.DefaultBlockTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -451,13 +447,13 @@ var _ = ginkgo.Describe("PaddleJob controller when TopologyAwareScheduling enabl g.Expect(wl.Status.Admission.PodSetAssignments).Should(gomega.HaveLen(2)) g.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) g.Expect(wl.Status.Admission.PodSetAssignments[1].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) diff --git a/test/integration/controller/jobs/pytorchjob/pytorchjob_controller_test.go b/test/integration/controller/jobs/pytorchjob/pytorchjob_controller_test.go index 70ead7ac5d..c1f82757a9 100644 --- a/test/integration/controller/jobs/pytorchjob/pytorchjob_controller_test.go +++ b/test/integration/controller/jobs/pytorchjob/pytorchjob_controller_test.go @@ -614,8 +614,6 @@ var _ = ginkgo.Describe("Job controller interacting with scheduler", ginkgo.Orde var _ = ginkgo.Describe("PyTorchJob controller when TopologyAwareScheduling enabled", ginkgo.Ordered, ginkgo.ContinueOnFailure, func() { const ( nodeGroupLabel = "node-group" - tasBlockLabel = "cloud.com/topology-block" - tasRackLabel = "cloud.com/topology-rack" ) var ( @@ -648,8 +646,8 @@ var _ = ginkgo.Describe("PyTorchJob controller when TopologyAwareScheduling enab nodes = []corev1.Node{ *testingnode.MakeNode("b1r1"). Label(nodeGroupLabel, "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -659,9 +657,7 @@ var _ = ginkgo.Describe("PyTorchJob controller when TopologyAwareScheduling enab } util.CreateNodes(ctx, k8sClient, nodes) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -696,14 +692,14 @@ var _ = ginkgo.Describe("PyTorchJob controller when TopologyAwareScheduling enab ReplicaType: kftraining.PyTorchJobReplicaTypeMaster, ReplicaCount: 1, Annotations: map[string]string{ - kueuealpha.PodSetRequiredTopologyAnnotation: tasRackLabel, + kueuealpha.PodSetRequiredTopologyAnnotation: testing.DefaultRackTopologyLevel, }, }, testingpytorchjob.PyTorchReplicaSpecRequirement{ ReplicaType: kftraining.PyTorchJobReplicaTypeWorker, ReplicaCount: 1, Annotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: tasBlockLabel, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultBlockTopologyLevel, }, }, ). @@ -726,7 +722,7 @@ var _ = ginkgo.Describe("PyTorchJob controller when TopologyAwareScheduling enab Name: strings.ToLower(string(kftraining.PyTorchJobReplicaTypeMaster)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -734,7 +730,7 @@ var _ = ginkgo.Describe("PyTorchJob controller when TopologyAwareScheduling enab Name: strings.ToLower(string(kftraining.PyTorchJobReplicaTypeWorker)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Preferred: ptr.To(tasBlockLabel), + Preferred: ptr.To(testing.DefaultBlockTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -754,13 +750,13 @@ var _ = ginkgo.Describe("PyTorchJob controller when TopologyAwareScheduling enab g.Expect(wl.Status.Admission.PodSetAssignments).Should(gomega.HaveLen(2)) g.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) g.Expect(wl.Status.Admission.PodSetAssignments[1].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) diff --git a/test/integration/controller/jobs/tfjob/tfjob_controller_test.go b/test/integration/controller/jobs/tfjob/tfjob_controller_test.go index 1eacedda8d..504bc2eed9 100644 --- a/test/integration/controller/jobs/tfjob/tfjob_controller_test.go +++ b/test/integration/controller/jobs/tfjob/tfjob_controller_test.go @@ -325,8 +325,6 @@ var _ = ginkgo.Describe("Job controller interacting with scheduler", framework.R var _ = ginkgo.Describe("TFJob controller when TopologyAwareScheduling enabled", framework.RedundantSpec, ginkgo.Ordered, ginkgo.ContinueOnFailure, func() { const ( nodeGroupLabel = "node-group" - tasBlockLabel = "cloud.com/topology-block" - tasRackLabel = "cloud.com/topology-rack" ) var ( @@ -359,8 +357,8 @@ var _ = ginkgo.Describe("TFJob controller when TopologyAwareScheduling enabled", nodes = []corev1.Node{ *testingnode.MakeNode("b1r1"). Label(nodeGroupLabel, "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -370,9 +368,7 @@ var _ = ginkgo.Describe("TFJob controller when TopologyAwareScheduling enabled", } util.CreateNodes(ctx, k8sClient, nodes) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -407,21 +403,21 @@ var _ = ginkgo.Describe("TFJob controller when TopologyAwareScheduling enabled", ReplicaType: kftraining.TFJobReplicaTypeChief, ReplicaCount: 1, Annotations: map[string]string{ - kueuealpha.PodSetRequiredTopologyAnnotation: tasRackLabel, + kueuealpha.PodSetRequiredTopologyAnnotation: testing.DefaultRackTopologyLevel, }, }, testingtfjob.TFReplicaSpecRequirement{ ReplicaType: kftraining.TFJobReplicaTypePS, ReplicaCount: 1, Annotations: map[string]string{ - kueuealpha.PodSetRequiredTopologyAnnotation: tasRackLabel, + kueuealpha.PodSetRequiredTopologyAnnotation: testing.DefaultRackTopologyLevel, }, }, testingtfjob.TFReplicaSpecRequirement{ ReplicaType: kftraining.TFJobReplicaTypeWorker, ReplicaCount: 1, Annotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: tasBlockLabel, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultBlockTopologyLevel, }, }, ). @@ -445,7 +441,7 @@ var _ = ginkgo.Describe("TFJob controller when TopologyAwareScheduling enabled", Name: strings.ToLower(string(kftraining.TFJobReplicaTypeChief)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -453,7 +449,7 @@ var _ = ginkgo.Describe("TFJob controller when TopologyAwareScheduling enabled", Name: strings.ToLower(string(kftraining.TFJobReplicaTypePS)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -461,7 +457,7 @@ var _ = ginkgo.Describe("TFJob controller when TopologyAwareScheduling enabled", Name: strings.ToLower(string(kftraining.TFJobReplicaTypeWorker)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Preferred: ptr.To(tasBlockLabel), + Preferred: ptr.To(testing.DefaultBlockTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -481,19 +477,19 @@ var _ = ginkgo.Describe("TFJob controller when TopologyAwareScheduling enabled", g.Expect(wl.Status.Admission.PodSetAssignments).Should(gomega.HaveLen(3)) g.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) g.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) g.Expect(wl.Status.Admission.PodSetAssignments[1].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) diff --git a/test/integration/controller/jobs/xgboostjob/xgboostjob_controller_test.go b/test/integration/controller/jobs/xgboostjob/xgboostjob_controller_test.go index 4bfd02e91a..c48c930c64 100644 --- a/test/integration/controller/jobs/xgboostjob/xgboostjob_controller_test.go +++ b/test/integration/controller/jobs/xgboostjob/xgboostjob_controller_test.go @@ -308,8 +308,6 @@ var _ = ginkgo.Describe("Job controller interacting with scheduler", framework.R var _ = ginkgo.Describe("XGBoostJob controller when TopologyAwareScheduling enabled", framework.RedundantSpec, ginkgo.Ordered, ginkgo.ContinueOnFailure, func() { const ( nodeGroupLabel = "node-group" - tasBlockLabel = "cloud.com/topology-block" - tasRackLabel = "cloud.com/topology-rack" ) var ( @@ -342,8 +340,8 @@ var _ = ginkgo.Describe("XGBoostJob controller when TopologyAwareScheduling enab nodes = []corev1.Node{ *testingnode.MakeNode("b1r1"). Label(nodeGroupLabel, "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -353,9 +351,7 @@ var _ = ginkgo.Describe("XGBoostJob controller when TopologyAwareScheduling enab } util.CreateNodes(ctx, k8sClient, nodes) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -390,14 +386,14 @@ var _ = ginkgo.Describe("XGBoostJob controller when TopologyAwareScheduling enab ReplicaType: kftraining.XGBoostJobReplicaTypeMaster, ReplicaCount: 1, Annotations: map[string]string{ - kueuealpha.PodSetRequiredTopologyAnnotation: tasRackLabel, + kueuealpha.PodSetRequiredTopologyAnnotation: testing.DefaultRackTopologyLevel, }, }, testingxgboostjob.XGBReplicaSpecRequirement{ ReplicaType: kftraining.XGBoostJobReplicaTypeWorker, ReplicaCount: 1, Annotations: map[string]string{ - kueuealpha.PodSetPreferredTopologyAnnotation: tasBlockLabel, + kueuealpha.PodSetPreferredTopologyAnnotation: testing.DefaultBlockTopologyLevel, }, }, ). @@ -420,7 +416,7 @@ var _ = ginkgo.Describe("XGBoostJob controller when TopologyAwareScheduling enab Name: strings.ToLower(string(kftraining.XGBoostJobReplicaTypeMaster)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -428,7 +424,7 @@ var _ = ginkgo.Describe("XGBoostJob controller when TopologyAwareScheduling enab Name: strings.ToLower(string(kftraining.XGBoostJobReplicaTypeWorker)), Count: 1, TopologyRequest: &kueue.PodSetTopologyRequest{ - Preferred: ptr.To(tasBlockLabel), + Preferred: ptr.To(testing.DefaultBlockTopologyLevel), PodIndexLabel: ptr.To(kftraining.ReplicaIndexLabel), }, }, @@ -448,13 +444,13 @@ var _ = ginkgo.Describe("XGBoostJob controller when TopologyAwareScheduling enab g.Expect(wl.Status.Admission.PodSetAssignments).Should(gomega.HaveLen(2)) g.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) g.Expect(wl.Status.Admission.PodSetAssignments[1].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{{Count: 1, Values: []string{"b1", "r1"}}}, }, )) diff --git a/test/integration/tas/tas_test.go b/test/integration/tas/tas_test.go index 4f80e1c764..0489a8a644 100644 --- a/test/integration/tas/tas_test.go +++ b/test/integration/tas/tas_test.go @@ -36,11 +36,6 @@ import ( "sigs.k8s.io/kueue/test/util" ) -const ( - tasBlockLabel = "cloud.com/topology-block" - tasRackLabel = "cloud.com/topology-rack" -) - var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { var ( ns *corev1.Namespace @@ -147,10 +142,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { ) ginkgo.BeforeEach(func() { - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, - tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -278,8 +270,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { nodes = []corev1.Node{ *testingnode.MakeNode("b1-r1"). Label("node-group", "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -288,8 +280,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { Obj(), *testingnode.MakeNode("b1-r2"). Label("node-group", "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r2"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r2"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -298,8 +290,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { Obj(), *testingnode.MakeNode("b2-r1"). Label("node-group", "tas"). - Label(tasBlockLabel, "b2"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b2"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -308,8 +300,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { Obj(), *testingnode.MakeNode("b2-r2"). Label("node-group", "tas"). - Label(tasBlockLabel, "b2"). - Label(tasRackLabel, "r2"). + Label(testing.DefaultBlockTopologyLevel, "b2"). + Label(testing.DefaultRackTopologyLevel, "r2"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -319,10 +311,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { } util.CreateNodes(ctx, k8sClient, nodes) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, - tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -357,7 +346,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { wl1 := testing.MakeWorkload("wl1-inadmissible", ns.Name). Queue(localQueue.Name).Request(corev1.ResourceCPU, "2").Obj() wl1.Spec.PodSets[0].TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), } gomega.Expect(k8sClient.Create(ctx, wl1)).Should(gomega.Succeed()) }) @@ -374,7 +363,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { Queue(localQueue.Name).Request(corev1.ResourceCPU, "1").Obj() wl1.Spec.PodSets[0].Count = 2 wl1.Spec.PodSets[0].TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasBlockLabel), + Required: ptr.To(testing.DefaultBlockTopologyLevel), } gomega.Expect(k8sClient.Create(ctx, wl1)).Should(gomega.Succeed()) }) @@ -389,8 +378,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { gomega.Expect(wl1.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ Levels: []string{ - tasBlockLabel, - tasRackLabel, + testing.DefaultBlockTopologyLevel, + testing.DefaultRackTopologyLevel, }, Domains: []kueue.TopologyDomainAssignment{ { @@ -416,7 +405,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { wl2 = testing.MakeWorkload("wl2", ns.Name). Queue(localQueue.Name).Request(corev1.ResourceCPU, "1").Obj() wl2.Spec.PodSets[0].TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), } gomega.Expect(k8sClient.Create(ctx, wl2)).Should(gomega.Succeed()) }) @@ -431,8 +420,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { gomega.Expect(wl2.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ Levels: []string{ - tasBlockLabel, - tasRackLabel, + testing.DefaultBlockTopologyLevel, + testing.DefaultRackTopologyLevel, }, Domains: []kueue.TopologyDomainAssignment{ { @@ -451,7 +440,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { wl3 = testing.MakeWorkload("wl3", ns.Name). Queue(localQueue.Name).Request(corev1.ResourceCPU, "1").Obj() wl3.Spec.PodSets[0].TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), } gomega.Expect(k8sClient.Create(ctx, wl3)).Should(gomega.Succeed()) }) @@ -466,8 +455,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { gomega.Expect(wl3.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ Levels: []string{ - tasBlockLabel, - tasRackLabel, + testing.DefaultBlockTopologyLevel, + testing.DefaultRackTopologyLevel, }, Domains: []kueue.TopologyDomainAssignment{ { @@ -486,7 +475,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { wl4 = testing.MakeWorkload("wl4", ns.Name). Queue(localQueue.Name).Request(corev1.ResourceCPU, "1").Obj() wl4.Spec.PodSets[0].TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), } gomega.Expect(k8sClient.Create(ctx, wl4)).Should(gomega.Succeed()) util.ExpectWorkloadsToBePending(ctx, k8sClient, wl4) @@ -507,8 +496,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { gomega.Expect(wl4.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ Levels: []string{ - tasBlockLabel, - tasRackLabel, + testing.DefaultBlockTopologyLevel, + testing.DefaultRackTopologyLevel, }, Domains: []kueue.TopologyDomainAssignment{ { @@ -541,7 +530,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { Queue(localQueue.Name).Request(corev1.ResourceCPU, "1").Obj() wl.Spec.PodSets[0].Count = 2 wl.Spec.PodSets[0].TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasBlockLabel), + Required: ptr.To(testing.DefaultBlockTopologyLevel), } gomega.Expect(k8sClient.Create(ctx, wl)).Should(gomega.Succeed()) }) @@ -550,7 +539,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { util.ExpectPendingWorkloadsMetric(clusterQueue, 0, 1) }) - topology = testing.MakeTopology("default").Levels(tasBlockLabel, tasRackLabel).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) ginkgo.By("verify the workload is admitted", func() { @@ -562,7 +551,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), wl)).To(gomega.Succeed()) gomega.Expect(wl.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ - Levels: []string{tasBlockLabel, tasRackLabel}, + Levels: []string{testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel}, Domains: []kueue.TopologyDomainAssignment{ {Count: 1, Values: []string{"b1", "r1"}}, {Count: 1, Values: []string{"b1", "r2"}}, @@ -581,8 +570,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { nodes = []corev1.Node{ *testingnode.MakeNode("x1"). Label("node-group", "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). Label(corev1.LabelHostname, "x1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), @@ -592,8 +581,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { Obj(), *testingnode.MakeNode("x2"). Label("node-group", "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r2"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r2"). Label(corev1.LabelHostname, "x2"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), @@ -603,8 +592,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { Obj(), *testingnode.MakeNode("x3"). Label("node-group", "tas"). - Label(tasBlockLabel, "b2"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b2"). + Label(testing.DefaultRackTopologyLevel, "r1"). Label(corev1.LabelHostname, "x3"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), @@ -614,8 +603,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { Obj(), *testingnode.MakeNode("x4"). Label("node-group", "tas"). - Label(tasBlockLabel, "b2"). - Label(tasRackLabel, "r2"). + Label(testing.DefaultBlockTopologyLevel, "b2"). + Label(testing.DefaultRackTopologyLevel, "r2"). Label(corev1.LabelHostname, "x4"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), @@ -626,11 +615,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { } util.CreateNodes(ctx, k8sClient, nodes) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, - tasRackLabel, - corev1.LabelHostname, - ).Obj() + topology = testing.MakeDefaultThreeLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -664,7 +649,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { ginkgo.By("creating a workload which requires rack, but does not fit in any", func() { wl1 := testing.MakeWorkload("wl1-inadmissible", ns.Name). PodSets(*testing.MakePodSet("worker", 4). - PreferredTopologyRequest(tasRackLabel). + PreferredTopologyRequest(testing.DefaultRackTopologyLevel). Obj()). Queue(localQueue.Name).Request(corev1.ResourceCPU, "2").Obj() gomega.Expect(k8sClient.Create(ctx, wl1)).Should(gomega.Succeed()) @@ -680,7 +665,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { ginkgo.By("creating a workload which can fit", func() { wl1 = testing.MakeWorkload("wl1", ns.Name). PodSets(*testing.MakePodSet("worker", 1). - PreferredTopologyRequest(tasBlockLabel). + PreferredTopologyRequest(testing.DefaultBlockTopologyLevel). Obj()). Queue(localQueue.Name).Request(corev1.ResourceCPU, "1").Obj() gomega.Expect(k8sClient.Create(ctx, wl1)).Should(gomega.Succeed()) @@ -694,7 +679,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { ginkgo.By("creating second a workload which cannot fit", func() { wl2 = testing.MakeWorkload("wl2", ns.Name). PodSets(*testing.MakePodSet("worker-2", 4). - PreferredTopologyRequest(tasBlockLabel). + PreferredTopologyRequest(testing.DefaultBlockTopologyLevel). Obj()). Queue(localQueue.Name).Request(corev1.ResourceCPU, "1").Obj() gomega.Expect(k8sClient.Create(ctx, wl2)).Should(gomega.Succeed()) @@ -720,10 +705,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { } gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, - tasRackLabel, - ).Obj() + topology = testing.MakeDefaultTwoLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -762,7 +744,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { wl1 = testing.MakeWorkload("wl1", ns.Name). Queue(localQueue.Name).Request(corev1.ResourceCPU, "1").Obj() wl1.Spec.PodSets[0].TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), } gomega.Expect(k8sClient.Create(ctx, wl1)).Should(gomega.Succeed()) }) @@ -775,8 +757,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { nodes = []corev1.Node{ *testingnode.MakeNode("b1-r1"). Label("node-group", "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), corev1.ResourceMemory: resource.MustParse("1Gi"), @@ -808,11 +790,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { } gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) - topology = testing.MakeTopology("default").Levels( - tasBlockLabel, - tasRackLabel, - corev1.LabelHostname, - ).Obj() + topology = testing.MakeDefaultThreeLevelTopology("default") gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) tasFlavor = testing.MakeResourceFlavor("tas-flavor"). @@ -851,8 +829,8 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { nodes = []corev1.Node{ *testingnode.MakeNode("b1-r1-x1"). Label("node-group", "tas"). - Label(tasBlockLabel, "b1"). - Label(tasRackLabel, "r1"). + Label(testing.DefaultBlockTopologyLevel, "b1"). + Label(testing.DefaultRackTopologyLevel, "r1"). Label(corev1.LabelHostname, "b1-r1-x1"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("1"), @@ -873,7 +851,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { wl1 = testing.MakeWorkload("wl1", ns.Name). Queue(localQueue.Name).Request(corev1.ResourceCPU, "1").Obj() wl1.Spec.PodSets[0].TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), } gomega.Expect(k8sClient.Create(ctx, wl1)).Should(gomega.Succeed()) }) @@ -919,7 +897,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { nodes = []corev1.Node{ *testingnode.MakeNode("cpu-node"). Label("node.kubernetes.io/instance-type", "cpu-node"). - Label(tasRackLabel, "cpu-rack"). + Label(testing.DefaultRackTopologyLevel, "cpu-rack"). StatusAllocatable(corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("5"), }). @@ -927,7 +905,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { Obj(), *testingnode.MakeNode("gpu-node"). Label("node.kubernetes.io/instance-type", "gpu-node"). - Label(tasRackLabel, "gpu-rack"). + Label(testing.DefaultRackTopologyLevel, "gpu-rack"). StatusAllocatable(corev1.ResourceList{ gpuResName: resource.MustParse("4"), }). @@ -937,7 +915,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { util.CreateNodes(ctx, k8sClient, nodes) topology = testing.MakeTopology("default").Levels( - tasRackLabel, + testing.DefaultRackTopologyLevel, ).Obj() gomega.Expect(k8sClient.Create(ctx, topology)).Should(gomega.Succeed()) @@ -986,13 +964,13 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { map[string]string{"node.kubernetes.io/instance-type": "cpu-node"}, ).Request(corev1.ResourceCPU, "5").Obj() ps1.TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), } ps2 := *testing.MakePodSet("worker", 2).NodeSelector( map[string]string{"node.kubernetes.io/instance-type": "gpu-node"}, ).Request(gpuResName, "2").Obj() ps2.TopologyRequest = &kueue.PodSetTopologyRequest{ - Required: ptr.To(tasRackLabel), + Required: ptr.To(testing.DefaultRackTopologyLevel), } wl1.Spec.PodSets = []kueue.PodSet{ps1, ps2} gomega.Expect(k8sClient.Create(ctx, wl1)).Should(gomega.Succeed()) @@ -1008,7 +986,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { gomega.Expect(wl1.Status.Admission.PodSetAssignments[0].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ Levels: []string{ - tasRackLabel, + testing.DefaultRackTopologyLevel, }, Domains: []kueue.TopologyDomainAssignment{ { @@ -1023,7 +1001,7 @@ var _ = ginkgo.Describe("Topology Aware Scheduling", ginkgo.Ordered, func() { gomega.Expect(wl1.Status.Admission.PodSetAssignments[1].TopologyAssignment).Should(gomega.BeComparableTo( &kueue.TopologyAssignment{ Levels: []string{ - tasRackLabel, + testing.DefaultRackTopologyLevel, }, Domains: []kueue.TopologyDomainAssignment{ { @@ -1053,22 +1031,22 @@ var _ = ginkgo.Describe("Topology validations", func() { gomega.Expect(err).Should(matcher) }, ginkgo.Entry("valid Topology", - testing.MakeTopology("valid").Levels(corev1.LabelHostname).Obj(), + testing.MakeDefaultOneLevelTopology("valid"), gomega.Succeed()), ginkgo.Entry("invalid levels", testing.MakeTopology("invalid-level").Levels("@invalid").Obj(), testing.BeInvalidError()), ginkgo.Entry("non-unique levels", - testing.MakeTopology("default").Levels(tasBlockLabel, tasBlockLabel).Obj(), + testing.MakeTopology("default").Levels(testing.DefaultBlockTopologyLevel, testing.DefaultBlockTopologyLevel).Obj(), testing.BeInvalidError()), ginkgo.Entry("kubernetes.io/hostname first", - testing.MakeTopology("default").Levels(corev1.LabelHostname, tasBlockLabel, tasRackLabel).Obj(), + testing.MakeTopology("default").Levels(corev1.LabelHostname, testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel).Obj(), testing.BeInvalidError()), ginkgo.Entry("kubernetes.io/hostname middle", - testing.MakeTopology("default").Levels(tasBlockLabel, corev1.LabelHostname, tasRackLabel).Obj(), + testing.MakeTopology("default").Levels(testing.DefaultBlockTopologyLevel, corev1.LabelHostname, testing.DefaultRackTopologyLevel).Obj(), testing.BeInvalidError()), ginkgo.Entry("kubernetes.io/hostname last", - testing.MakeTopology("default").Levels(tasBlockLabel, tasRackLabel, corev1.LabelHostname).Obj(), + testing.MakeTopology("default").Levels(testing.DefaultBlockTopologyLevel, testing.DefaultRackTopologyLevel, corev1.LabelHostname).Obj(), gomega.Succeed()), ) }) @@ -1087,7 +1065,7 @@ var _ = ginkgo.Describe("Topology validations", func() { gomega.Expect(k8sClient.Update(ctx, topology)).Should(matcher) }, ginkgo.Entry("succeed to update topology", - testing.MakeTopology("valid").Levels(corev1.LabelHostname).Obj(), + testing.MakeDefaultOneLevelTopology("valid"), func(topology *kueuealpha.Topology) { topology.ObjectMeta.Labels = map[string]string{ "alpha": "beta", @@ -1095,7 +1073,7 @@ var _ = ginkgo.Describe("Topology validations", func() { }, gomega.Succeed()), ginkgo.Entry("updating levels is prohibited", - testing.MakeTopology("valid").Levels(corev1.LabelHostname).Obj(), + testing.MakeDefaultOneLevelTopology("valid"), func(topology *kueuealpha.Topology) { topology.Spec.Levels = append(topology.Spec.Levels, kueuealpha.TopologyLevel{ NodeLabel: "added", @@ -1103,7 +1081,7 @@ var _ = ginkgo.Describe("Topology validations", func() { }, testing.BeInvalidError()), ginkgo.Entry("updating levels order is prohibited", - testing.MakeTopology("default").Levels(tasRackLabel, tasBlockLabel, corev1.LabelHostname).Obj(), + testing.MakeDefaultThreeLevelTopology("default"), func(topology *kueuealpha.Topology) { topology.Spec.Levels[0], topology.Spec.Levels[1] = topology.Spec.Levels[1], topology.Spec.Levels[0] },