Skip to content

Commit 3cbaa98

Browse files
lowang-bhtenzen-y
andauthored
add volcano gang-schedule integration and e2e test (#569)
* add volcano gang-schedule integration test Signed-off-by: lowang_bh <lhui_wang@163.com> * add e2e test for volcano scheduler Signed-off-by: lowang_bh <lhui_wang@163.com> * merge #576: Increase the timeout for E2E tests Signed-off-by: lowang_bh <lhui_wang@163.com> * Update test/e2e/mpi_job_test.go Co-authored-by: Yuki Iwai <yuki.iwai.tz@gmail.com> * refact e2e test function Signed-off-by: lowang_bh <lhui_wang@163.com> --------- Signed-off-by: lowang_bh <lhui_wang@163.com> Co-authored-by: Yuki Iwai <yuki.iwai.tz@gmail.com>
1 parent f8d815c commit 3cbaa98

File tree

5 files changed

+313
-29
lines changed

5 files changed

+313
-29
lines changed

Makefile

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ GOARCH=$(shell go env GOARCH)
4040
GOOS=$(shell go env GOOS)
4141
# Use go.mod go version as a single source of truth of scheduler-plugins version.
4242
SCHEDULER_PLUGINS_VERSION?=$(shell awk '/scheduler-plugins/{print $$2}' go.mod|head -n1)
43+
VOLCANO_SCHEDULER_VERSION?=$(shell go list -m -f "{{.Version}}" volcano.sh/apis)
4344

4445
CRD_OPTIONS ?= "crd:generateEmbeddedObjectMeta=true"
4546

@@ -64,7 +65,7 @@ vet:
6465

6566
.PHONY: test
6667
test:
67-
test: bin/envtest scheduler-plugins-crd
68+
test: bin/envtest scheduler-plugins-crd volcano-scheduler-crd
6869
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test -v -covermode atomic -coverprofile=profile.cov $(shell go list ./... | grep -v '/test/e2e')
6970

7071
# Only works with CONTROLLER_VERSION=v2
@@ -73,7 +74,7 @@ test_e2e: export TEST_MPI_OPERATOR_IMAGE=${IMAGE_NAME}:${RELEASE_VERSION}
7374
test_e2e: export TEST_OPENMPI_IMAGE=mpioperator/mpi-pi:${RELEASE_VERSION}-openmpi
7475
test_e2e: export TEST_INTELMPI_IMAGE=mpioperator/mpi-pi:${RELEASE_VERSION}-intel
7576
test_e2e: export TEST_MPICH_IMAGE=mpioperator/mpi-pi:${RELEASE_VERSION}-mpich
76-
test_e2e: bin/kubectl kind helm images test_images dev_manifest scheduler-plugins-chart
77+
test_e2e: bin/kubectl kind helm images test_images dev_manifest scheduler-plugins-chart volcano-scheduler-deploy
7778
go test -timeout 20m -v ./test/e2e/...
7879

7980
.PHONY: dev_manifest
@@ -192,3 +193,17 @@ scheduler-plugins-chart: scheduler-plugins-crd
192193
cp -f $(PROJECT_DIR)/dep-crds/scheduler-plugins/crd.yaml $(PROJECT_DIR)/dep-manifests/scheduler-plugins/crds/scheduling.x-k8s.io_podgroups.yaml
193194
cp -f /tmp/pkg/mod/sigs.k8s.io/scheduler-plugins@$(SCHEDULER_PLUGINS_VERSION)/manifests/noderesourcetopology/crd.yaml $(PROJECT_DIR)/dep-manifests/scheduler-plugins/crds/topology.node.k8s.io_noderesourcetopologies.yaml
194195
chmod -R 760 $(PROJECT_DIR)/dep-manifests/scheduler-plugins
196+
197+
.PHONY: volcano-scheduler
198+
volcano-scheduler:
199+
-@GOPATH=/tmp go install volcano.sh/volcano/cmd/scheduler@$(VOLCANO_SCHEDULER_VERSION)
200+
201+
.PHONY: volcano-scheduler-crd
202+
volcano-scheduler-crd: volcano-scheduler
203+
mkdir -p $(PROJECT_DIR)/dep-crds/volcano-scheduler/
204+
cp -f /tmp/pkg/mod/volcano.sh/volcano@$(VOLCANO_SCHEDULER_VERSION)/config/crd/bases/* $(PROJECT_DIR)/dep-crds/volcano-scheduler
205+
206+
.PHONY: volcano-scheduler-deploy
207+
volcano-scheduler-deploy: volcano-scheduler-crd
208+
mkdir -p $(PROJECT_DIR)/dep-manifests/volcano-scheduler/
209+
cp -f /tmp/pkg/mod/volcano.sh/volcano@$(VOLCANO_SCHEDULER_VERSION)/installer/volcano-development.yaml $(PROJECT_DIR)/dep-manifests/volcano-scheduler/

test/e2e/e2e_suite_test.go

Lines changed: 38 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
"k8s.io/client-go/kubernetes"
3131
controllerruntime "sigs.k8s.io/controller-runtime"
3232
schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned"
33+
volcanoclient "volcano.sh/apis/pkg/client/clientset/versioned"
3334

3435
clientset "github.com/kubeflow/mpi-operator/pkg/client/clientset/versioned"
3536
)
@@ -43,6 +44,7 @@ const (
4344
envTestMPICHImage = "TEST_MPICH_IMAGE"
4445
envTestKindImage = "TEST_KIND_IMAGE"
4546
envSchedulerPluginsVersion = "SCHEDULER_PLUGINS_VERSION"
47+
envVolcanoSchedulerVersion = "VOLCANO_SCHEDULER_VERSION"
4648

4749
defaultMPIOperatorImage = "mpioperator/mpi-operator:local"
4850
defaultKindImage = "kindest/node:v1.25.8"
@@ -56,8 +58,11 @@ const (
5658
operatorManifestsPath = rootPath + "/manifests/overlays/dev"
5759

5860
schedulerPluginsManifestPath = rootPath + "/dep-manifests/scheduler-plugins/"
61+
volcanoSchedulerManifestPath = rootPath + "/dep-manifests/volcano-scheduler/" // all in one yaml of volcano-development.yaml
5962
envUseExistingSchedulerPlugins = "USE_EXISTING_SCHEDULER_PLUGINS"
63+
envUseExistingVolcanoScheduler = "USE_EXISTING_VOLCANO_SCHEDULER"
6064
defaultSchedulerPluginsVersion = "v0.25.7"
65+
defaultVolcanoSchedulerVersion = "v1.7.0"
6166

6267
mpiOperator = "mpi-operator"
6368
schedulerPlugins = "scheduler-plugins"
@@ -70,28 +75,33 @@ var (
7075
useExistingCluster bool
7176
useExistingOperator bool
7277
useExistingSchedulerPlugins bool
78+
useExistingVolcanoScheduler bool
7379
mpiOperatorImage string
7480
openMPIImage string
7581
intelMPIImage string
7682
mpichImage string
7783
kindImage string
7884
schedulerPluginsVersion string
85+
volcanoSchedulerVersion string
7986

80-
k8sClient kubernetes.Interface
81-
mpiClient clientset.Interface
82-
schedClient schedclientset.Interface
87+
k8sClient kubernetes.Interface
88+
mpiClient clientset.Interface
89+
schedClient schedclientset.Interface
90+
volcanoClient volcanoclient.Interface
8391
)
8492

8593
func init() {
8694
useExistingCluster = getEnvDefault(envUseExistingCluster, "false") == "true"
8795
useExistingOperator = getEnvDefault(envUseExistingOperator, "false") == "true"
8896
useExistingSchedulerPlugins = getEnvDefault(envUseExistingSchedulerPlugins, "false") == "true"
97+
useExistingVolcanoScheduler = getEnvDefault(envUseExistingVolcanoScheduler, "false") == "true"
8998
mpiOperatorImage = getEnvDefault(envTestMPIOperatorImage, defaultMPIOperatorImage)
9099
openMPIImage = getEnvDefault(envTestOpenMPIImage, defaultOpenMPIImage)
91100
intelMPIImage = getEnvDefault(envTestIntelMPIImage, defaultIntelMPIImage)
92101
mpichImage = getEnvDefault(envTestMPICHImage, defaultMPICHImage)
93102
kindImage = getEnvDefault(envTestKindImage, defaultKindImage)
94103
schedulerPluginsVersion = getEnvDefault(envSchedulerPluginsVersion, defaultSchedulerPluginsVersion)
104+
volcanoSchedulerVersion = getEnvDefault(envVolcanoSchedulerVersion, defaultVolcanoSchedulerVersion)
95105
}
96106

97107
func TestE2E(t *testing.T) {
@@ -118,6 +128,9 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
118128
schedClient, err = schedclientset.NewForConfig(restConfig)
119129
gomega.Expect(err).ToNot(gomega.HaveOccurred())
120130

131+
volcanoClient, err = volcanoclient.NewForConfig(restConfig)
132+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
133+
121134
if !useExistingOperator {
122135
ginkgo.By("Installing operator")
123136
err = installOperator()
@@ -194,6 +207,28 @@ func installSchedulerPlugins() error {
194207
})
195208
}
196209

210+
func installVolcanoScheduler() error {
211+
err := runCommand(kubectlPath, "apply", "-f", volcanoSchedulerManifestPath)
212+
if err != nil {
213+
return fmt.Errorf("failed to install volcano scheduler : %w", err)
214+
}
215+
216+
volcanoNamespace := "volcano-system"
217+
ctx := context.Background()
218+
return wait.Poll(waitInterval, foreverTimeout, func() (bool, error) {
219+
if ok, err := ensureDeploymentAvailableReplicas(ctx, volcanoNamespace, "volcano-scheduler"); !ok || err != nil {
220+
return false, err
221+
}
222+
if ok, err := ensureDeploymentAvailableReplicas(ctx, volcanoNamespace, "volcano-controllers"); !ok || err != nil {
223+
return false, err
224+
}
225+
if ok, err := ensureDeploymentAvailableReplicas(ctx, volcanoNamespace, "volcano-admission"); !ok || err != nil {
226+
return false, err
227+
}
228+
return true, nil
229+
})
230+
}
231+
197232
func runCommand(name string, args ...string) error {
198233
cmd := exec.Command(name, args...)
199234
cmd.Stderr = os.Stderr

test/e2e/mpi_job_test.go

Lines changed: 141 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -317,42 +317,109 @@ var _ = ginkgo.Describe("MPIJob", func() {
317317
// Set up the scheduler-plugins.
318318
setUpSchedulerPlugins()
319319
// Set up the mpi-operator so that the scheduler-plugins is used as gang-scheduler.
320-
ginkgo.By("Scale-In the deployment to 0")
320+
setupMPIOperator(ctx, mpiJob, enableGangSchedulingFlag, unschedulableResources)
321+
})
322+
323+
ginkgo.AfterEach(func() {
321324
operator, err := k8sClient.AppsV1().Deployments(mpiOperator).Get(ctx, mpiOperator, metav1.GetOptions{})
325+
oldOperator := operator.DeepCopy()
322326
gomega.Expect(err).Should(gomega.Succeed())
323-
operator.Spec.Replicas = newInt32(0)
324-
_, err = k8sClient.AppsV1().Deployments(mpiOperator).Update(ctx, operator, metav1.UpdateOptions{})
325-
gomega.Expect(err).Should(gomega.Succeed())
326-
gomega.Eventually(func() bool {
327-
isNotZero, err := ensureDeploymentAvailableReplicas(ctx, mpiOperator, mpiOperator)
327+
for i, arg := range operator.Spec.Template.Spec.Containers[0].Args {
328+
if arg == enableGangSchedulingFlag {
329+
operator.Spec.Template.Spec.Containers[0].Args = append(
330+
operator.Spec.Template.Spec.Containers[0].Args[:i], operator.Spec.Template.Spec.Containers[0].Args[i+1:]...)
331+
break
332+
}
333+
}
334+
if diff := cmp.Diff(oldOperator, operator); len(diff) != 0 {
335+
_, err = k8sClient.AppsV1().Deployments(mpiOperator).Update(ctx, operator, metav1.UpdateOptions{})
328336
gomega.Expect(err).Should(gomega.Succeed())
329-
return isNotZero
330-
}, foreverTimeout, waitInterval).Should(gomega.BeFalse())
337+
gomega.Eventually(func() bool {
338+
ok, err := ensureDeploymentAvailableReplicas(ctx, mpiOperator, mpiOperator)
339+
gomega.Expect(err).Should(gomega.Succeed())
340+
return ok
341+
}, foreverTimeout, waitInterval).Should(gomega.BeTrue())
342+
}
343+
// Clean up the scheduler-plugins.
344+
cleanUpSchedulerPlugins()
345+
})
346+
347+
ginkgo.It("should create pending pods", func() {
348+
ginkgo.By("Creating MPIJob")
349+
mpiJob := createJob(ctx, mpiJob)
350+
var jobCondition *kubeflow.JobCondition
351+
gomega.Eventually(func() *kubeflow.JobCondition {
352+
updatedMPIJob, err := mpiClient.KubeflowV2beta1().MPIJobs(mpiJob.Namespace).Get(ctx, mpiJob.Name, metav1.GetOptions{})
353+
gomega.Expect(err).Should(gomega.Succeed())
354+
jobCondition = getJobCondition(updatedMPIJob, kubeflow.JobCreated)
355+
return jobCondition
356+
}, foreverTimeout, waitInterval).ShouldNot(gomega.BeNil())
357+
gomega.Expect(jobCondition.Status).To(gomega.Equal(corev1.ConditionTrue))
358+
359+
ginkgo.By("Waiting for Pods to created")
360+
var pods *corev1.PodList
361+
gomega.Eventually(func() error {
362+
var err error
363+
pods, err = k8sClient.CoreV1().Pods(mpiJob.Namespace).List(ctx, metav1.ListOptions{
364+
LabelSelector: labels.FormatLabels(map[string]string{
365+
schedv1alpha1.PodGroupLabel: mpiJob.Name,
366+
}),
367+
})
368+
return err
369+
}, foreverTimeout, waitInterval).Should(gomega.BeNil())
370+
for _, pod := range pods.Items {
371+
gomega.Expect(pod.Status.Phase).Should(gomega.Equal(corev1.PodPending))
372+
}
373+
pg, err := schedClient.SchedulingV1alpha1().PodGroups(mpiJob.Namespace).Get(ctx, mpiJob.Name, metav1.GetOptions{})
374+
gomega.Expect(err).NotTo(gomega.HaveOccurred())
375+
gomega.Expect(pg.Spec.MinResources.Cpu().String()).Should(gomega.BeComparableTo(unschedulableResources.Cpu().String()))
376+
gomega.Expect(pg.Spec.MinResources.Memory().String()).Should(gomega.BeComparableTo(unschedulableResources.Memory().String()))
331377

332-
ginkgo.By("Update the replicas and args")
378+
ginkgo.By("Updating MPIJob with schedulable schedulingPolicies")
333379
gomega.Eventually(func() error {
334-
updatedOperator, err := k8sClient.AppsV1().Deployments(mpiOperator).Get(ctx, mpiOperator, metav1.GetOptions{})
380+
updatedJob, err := mpiClient.KubeflowV2beta1().MPIJobs(mpiJob.Namespace).Get(ctx, mpiJob.Name, metav1.GetOptions{})
335381
gomega.Expect(err).Should(gomega.Succeed())
336-
updatedOperator.Spec.Template.Spec.Containers[0].Args = append(updatedOperator.Spec.Template.Spec.Containers[0].Args, enableGangSchedulingFlag)
337-
updatedOperator.Spec.Replicas = newInt32(1)
338-
_, err = k8sClient.AppsV1().Deployments(mpiOperator).Update(ctx, updatedOperator, metav1.UpdateOptions{})
382+
updatedJob.Spec.RunPolicy.SchedulingPolicy.MinResources = nil
383+
_, err = mpiClient.KubeflowV2beta1().MPIJobs(updatedJob.Namespace).Update(ctx, updatedJob, metav1.UpdateOptions{})
339384
return err
340385
}, foreverTimeout, waitInterval).Should(gomega.BeNil())
341386

342-
ginkgo.By("Should be replicas is 1")
343-
gomega.Eventually(func() bool {
344-
isNotZero, err := ensureDeploymentAvailableReplicas(ctx, mpiOperator, mpiOperator)
387+
ginkgo.By("Waiting for MPIJob to running")
388+
gomega.Eventually(func() corev1.ConditionStatus {
389+
updatedJob, err := mpiClient.KubeflowV2beta1().MPIJobs(mpiJob.Namespace).Get(ctx, mpiJob.Name, metav1.GetOptions{})
345390
gomega.Expect(err).Should(gomega.Succeed())
346-
return isNotZero
347-
}, foreverTimeout, waitInterval).Should(gomega.BeTrue())
348-
createMPIJobWithOpenMPI(mpiJob)
349-
mpiJob.Spec.RunPolicy.SchedulingPolicy = &kubeflow.SchedulingPolicy{MinResources: unschedulableResources}
391+
cond := getJobCondition(updatedJob, kubeflow.JobRunning)
392+
if cond == nil {
393+
return corev1.ConditionFalse
394+
}
395+
return cond.Status
396+
}, foreverTimeout, waitInterval).Should(gomega.Equal(corev1.ConditionTrue))
397+
})
398+
})
399+
400+
// volcano e2e tests
401+
ginkgo.Context("with volcano-scheduler", func() {
402+
const enableGangSchedulingFlag = "--gang-scheduling=volcano"
403+
var (
404+
ctx = context.Background()
405+
unschedulableResources = &corev1.ResourceList{
406+
corev1.ResourceCPU: resource.MustParse("100000"), // unschedulable
407+
corev1.ResourceMemory: resource.MustParse("100000Gi"), // unschedulable
408+
}
409+
)
410+
411+
ginkgo.BeforeEach(func() {
412+
// Set up the volcano-scheduler.
413+
setupVolcanoScheduler()
414+
// Set up the mpi-operator so that the volcano scheduler is used as gang-scheduler.
415+
setupMPIOperator(ctx, mpiJob, enableGangSchedulingFlag, unschedulableResources)
350416
})
351417

352418
ginkgo.AfterEach(func() {
353419
operator, err := k8sClient.AppsV1().Deployments(mpiOperator).Get(ctx, mpiOperator, metav1.GetOptions{})
354420
oldOperator := operator.DeepCopy()
355421
gomega.Expect(err).Should(gomega.Succeed())
422+
// disable gang-scheduler in operator
356423
for i, arg := range operator.Spec.Template.Spec.Containers[0].Args {
357424
if arg == enableGangSchedulingFlag {
358425
operator.Spec.Template.Spec.Containers[0].Args = append(
@@ -369,8 +436,8 @@ var _ = ginkgo.Describe("MPIJob", func() {
369436
return ok
370437
}, foreverTimeout, waitInterval).Should(gomega.BeTrue())
371438
}
372-
// Clean up the scheduler-plugins.
373-
cleanUpSchedulerPlugins()
439+
// Clean up the volcano.
440+
cleanUpVolcanoScheduler()
374441
})
375442

376443
ginkgo.It("should create pending pods", func() {
@@ -391,15 +458,15 @@ var _ = ginkgo.Describe("MPIJob", func() {
391458
var err error
392459
pods, err = k8sClient.CoreV1().Pods(mpiJob.Namespace).List(ctx, metav1.ListOptions{
393460
LabelSelector: labels.FormatLabels(map[string]string{
394-
schedv1alpha1.PodGroupLabel: mpiJob.Name,
461+
common.JobNameLabel: mpiJob.Name,
395462
}),
396463
})
397464
return err
398465
}, foreverTimeout, waitInterval).Should(gomega.BeNil())
399466
for _, pod := range pods.Items {
400467
gomega.Expect(pod.Status.Phase).Should(gomega.Equal(corev1.PodPending))
401468
}
402-
pg, err := schedClient.SchedulingV1alpha1().PodGroups(mpiJob.Namespace).Get(ctx, mpiJob.Name, metav1.GetOptions{})
469+
pg, err := volcanoClient.SchedulingV1beta1().PodGroups(mpiJob.Namespace).Get(ctx, mpiJob.Name, metav1.GetOptions{})
403470
gomega.Expect(err).NotTo(gomega.HaveOccurred())
404471
gomega.Expect(pg.Spec.MinResources.Cpu().String()).Should(gomega.BeComparableTo(unschedulableResources.Cpu().String()))
405472
gomega.Expect(pg.Spec.MinResources.Memory().String()).Should(gomega.BeComparableTo(unschedulableResources.Memory().String()))
@@ -593,3 +660,53 @@ func cleanUpSchedulerPlugins() {
593660
gomega.Expect(err).Should(gomega.Succeed())
594661
}
595662
}
663+
664+
func setupVolcanoScheduler() {
665+
if !useExistingVolcanoScheduler {
666+
ginkgo.By("Installing volcano-scheduler")
667+
err := installVolcanoScheduler()
668+
gomega.Expect(err).Should(gomega.Succeed())
669+
}
670+
}
671+
672+
func cleanUpVolcanoScheduler() {
673+
if !useExistingVolcanoScheduler {
674+
ginkgo.By("Uninstalling volcano-scheduler")
675+
err := runCommand(kubectlPath, "delete", "-f", volcanoSchedulerManifestPath)
676+
gomega.Expect(err).Should(gomega.Succeed())
677+
}
678+
}
679+
680+
// setupMPIOperator scales down and scales up the MPIOperator replication so that set up gang-scheduler takes effect
681+
func setupMPIOperator(ctx context.Context, mpiJob *kubeflow.MPIJob, enableGangSchedulingFlag string, unschedulableResources *corev1.ResourceList) {
682+
ginkgo.By("Scale-In the deployment to 0")
683+
operator, err := k8sClient.AppsV1().Deployments(mpiOperator).Get(ctx, mpiOperator, metav1.GetOptions{})
684+
gomega.Expect(err).Should(gomega.Succeed())
685+
operator.Spec.Replicas = newInt32(0)
686+
_, err = k8sClient.AppsV1().Deployments(mpiOperator).Update(ctx, operator, metav1.UpdateOptions{})
687+
gomega.Expect(err).Should(gomega.Succeed())
688+
gomega.Eventually(func() bool {
689+
isNotZero, err := ensureDeploymentAvailableReplicas(ctx, mpiOperator, mpiOperator)
690+
gomega.Expect(err).Should(gomega.Succeed())
691+
return isNotZero
692+
}, foreverTimeout, waitInterval).Should(gomega.BeFalse())
693+
694+
ginkgo.By("Update the replicas and args")
695+
gomega.Eventually(func() error {
696+
updatedOperator, err := k8sClient.AppsV1().Deployments(mpiOperator).Get(ctx, mpiOperator, metav1.GetOptions{})
697+
gomega.Expect(err).Should(gomega.Succeed())
698+
updatedOperator.Spec.Template.Spec.Containers[0].Args = append(updatedOperator.Spec.Template.Spec.Containers[0].Args, enableGangSchedulingFlag)
699+
updatedOperator.Spec.Replicas = newInt32(1)
700+
_, err = k8sClient.AppsV1().Deployments(mpiOperator).Update(ctx, updatedOperator, metav1.UpdateOptions{})
701+
return err
702+
}, foreverTimeout, waitInterval).Should(gomega.BeNil())
703+
704+
ginkgo.By("Should be replicas is 1")
705+
gomega.Eventually(func() bool {
706+
isNotZero, err := ensureDeploymentAvailableReplicas(ctx, mpiOperator, mpiOperator)
707+
gomega.Expect(err).Should(gomega.Succeed())
708+
return isNotZero
709+
}, foreverTimeout, waitInterval).Should(gomega.BeTrue())
710+
createMPIJobWithOpenMPI(mpiJob)
711+
mpiJob.Spec.RunPolicy.SchedulingPolicy = &kubeflow.SchedulingPolicy{MinResources: unschedulableResources}
712+
}

test/integration/main_test.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ func TestMain(m *testing.M) {
4747
CRDDirectoryPaths: []string{
4848
filepath.Join("..", "..", "manifests", "base"),
4949
filepath.Join("..", "..", "dep-crds", "scheduler-plugins"),
50+
filepath.Join("..", "..", "dep-crds", "volcano-scheduler"),
5051
},
5152
}
5253
var err error

0 commit comments

Comments
 (0)