Skip to content

e2e,gpu,fpga: divide single It() to have multiple layers #1491

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Aug 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 23 additions & 10 deletions test/e2e/fpga/fpga.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ const (
)

func init() {
ginkgo.Describe("FPGA Plugin E2E tests", describe)
ginkgo.Describe("FPGA Plugin", describe)
}

func describe() {
Expand All @@ -64,16 +64,27 @@ func describe() {
fmw := framework.NewDefaultFramework("fpgaplugin-e2e")
fmw.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged

ginkgo.It("Run FPGA plugin tests", func(ctx context.Context) {
// Run region test case twice to ensure that device is reprogrammed at least once
runTestCase(ctx, fmw, pluginKustomizationPath, mappingsCollectionPath, "region", arria10NodeResource, nlb3PodResource, "nlb3", "nlb0")
runTestCase(ctx, fmw, pluginKustomizationPath, mappingsCollectionPath, "region", arria10NodeResource, nlb0PodResource, "nlb0", "nlb3")
// Run af test case
runTestCase(ctx, fmw, pluginKustomizationPath, mappingsCollectionPath, "af", nlb0NodeResource, nlb0PodResourceAF, "nlb0", "nlb3")
ginkgo.Context("When FPGA plugin is running in region mode", func() {
ginkgo.BeforeEach(func(ctx context.Context) {
runDevicePlugin(ctx, fmw, pluginKustomizationPath, mappingsCollectionPath, arria10NodeResource, "region")
})
ginkgo.It("runs an opae-nlb-demo pod two times", func(ctx context.Context) {
runTestCase(ctx, fmw, "region", nlb3PodResource, "nlb3", "nlb0")
runTestCase(ctx, fmw, "region", nlb0PodResource, "nlb0", "nlb3")
})
})

ginkgo.Context("When FPGA plugin is running in af mode", func() {
ginkgo.BeforeEach(func(ctx context.Context) {
runDevicePlugin(ctx, fmw, pluginKustomizationPath, mappingsCollectionPath, nlb0NodeResource, "af")
})
ginkgo.It("runs an opae-nlb-demo pod", func(ctx context.Context) {
runTestCase(ctx, fmw, "af", nlb0PodResourceAF, "nlb0", "nlb3")
})
})
}

func runTestCase(ctx context.Context, fmw *framework.Framework, pluginKustomizationPath, mappingsCollectionPath, pluginMode, nodeResource, podResource, cmd1, cmd2 string) {
func runDevicePlugin(ctx context.Context, fmw *framework.Framework, pluginKustomizationPath, mappingsCollectionPath, nodeResource, pluginMode string) {
tmpDir, err := os.MkdirTemp("", "fpgaplugine2etest-"+fmw.Namespace.Name)
if err != nil {
framework.Failf("unable to create temp directory: %v", err)
Expand Down Expand Up @@ -101,8 +112,10 @@ func runTestCase(ctx context.Context, fmw *framework.Framework, pluginKustomizat
if err = utils.WaitForNodesWithResource(ctx, fmw.ClientSet, resource, 30*time.Second); err != nil {
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
}
}

resource = v1.ResourceName(podResource)
func runTestCase(ctx context.Context, fmw *framework.Framework, pluginMode, podResource, cmd1, cmd2 string) {
resource := v1.ResourceName(podResource)
image := "intel/opae-nlb-demo:devel"

ginkgo.By("submitting a pod requesting correct FPGA resources")
Expand All @@ -111,7 +124,7 @@ func runTestCase(ctx context.Context, fmw *framework.Framework, pluginKustomizat

ginkgo.By("waiting the pod to finish successfully")

err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, fmw.ClientSet, pod.ObjectMeta.Name, fmw.Namespace.Name, 60*time.Second)
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, fmw.ClientSet, pod.ObjectMeta.Name, fmw.Namespace.Name, 60*time.Second)
gomega.Expect(err).To(gomega.BeNil(), utils.GetPodLogs(ctx, fmw, pod.ObjectMeta.Name, "testcontainer"))

ginkgo.By("submitting a pod requesting incorrect FPGA resources")
Expand Down
77 changes: 41 additions & 36 deletions test/e2e/gpu/gpu.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func describe() {
framework.Failf("unable to locate %q: %v", kustomizationYaml, errFailedToLocateRepoFile)
}

ginkgo.It("checks availability of GPU resources", func(ctx context.Context) {
ginkgo.BeforeEach(func(ctx context.Context) {
ginkgo.By("deploying GPU plugin")
e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(kustomizationPath))

Expand All @@ -69,49 +69,54 @@ func describe() {
if err = utils.TestPodsFileSystemInfo(podList.Items); err != nil {
framework.Failf("container filesystem info checks failed: %v", err)
}
})

ginkgo.By("checking if the resource is allocatable")
if err = utils.WaitForNodesWithResource(ctx, f.ClientSet, "gpu.intel.com/i915", 30*time.Second); err != nil {
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
}

ginkgo.By("submitting a pod requesting GPU resources")
podSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "gpuplugin-tester"},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Args: []string{"-c", "ls /dev/dri"},
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("1")},
Limits: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("1")},
ginkgo.Context("When GPU resources are available", func() {
ginkgo.BeforeEach(func(ctx context.Context) {
ginkgo.By("checking if the resource is allocatable")
if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "gpu.intel.com/i915", 30*time.Second); err != nil {
framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err)
}
})
ginkgo.It("checks availability of GPU resources", func(ctx context.Context) {
ginkgo.By("submitting a pod requesting GPU resources")
podSpec := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "gpuplugin-tester"},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Args: []string{"-c", "ls /dev/dri"},
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("1")},
Limits: v1.ResourceList{"gpu.intel.com/i915": resource.MustParse("1")},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
RestartPolicy: v1.RestartPolicyNever,
},
}
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{})
framework.ExpectNoError(err, "pod Create API error")
}
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{})
framework.ExpectNoError(err, "pod Create API error")

ginkgo.By("waiting the pod to finish successfully")
e2epod.NewPodClient(f).WaitForSuccess(ctx, pod.ObjectMeta.Name, 60*time.Second)
ginkgo.By("waiting the pod to finish successfully")
e2epod.NewPodClient(f).WaitForSuccess(ctx, pod.ObjectMeta.Name, 60*time.Second)

ginkgo.By("checking log output")
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)
ginkgo.By("checking log output")
log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName)

if err != nil {
framework.Failf("unable to get log from pod: %v", err)
}
if err != nil {
framework.Failf("unable to get log from pod: %v", err)
}

if !strings.Contains(log, "card") || !strings.Contains(log, "renderD") {
framework.Logf("log output: %s", log)
framework.Failf("device mounts not found from log")
}
if !strings.Contains(log, "card") || !strings.Contains(log, "renderD") {
framework.Logf("log output: %s", log)
framework.Failf("device mounts not found from log")
}

framework.Logf("found card and renderD from the log")
framework.Logf("found card and renderD from the log")
})
})
}