diff --git a/test/e2e/cli.go b/test/e2e/cli.go new file mode 100644 index 0000000000..7670b3f9f1 --- /dev/null +++ b/test/e2e/cli.go @@ -0,0 +1,1144 @@ +package e2e + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/wait" +) + +var _ = g.Describe("[sig-cli] Workloads test oc works well", func() { + defer g.GinkgoRecover() + + var ( + oc = NewCLI("oc", KubeConfigPath()) + ) + + // author: knarra@redhat.com + g.It("ROSA-OSD_CCS-ARO-Author:knarra-Medium-48681-Could start debug pod using pod definition yaml", func() { + buildPruningBaseDir := FixturePath("testdata", "oc_cli") + debugPodUsingDefinitionT := filepath.Join(buildPruningBaseDir, "debugpod_48681.yaml") + + g.By("create new namespace") + oc.SetupProject() + g.By("Get the cli image from openshift") + cliImage := getCliImage(oc) + + pod48681 := debugPodUsingDefinition{ + name: "pod48681", + namespace: oc.Namespace(), + cliImageID: cliImage, + template: debugPodUsingDefinitionT, + } + + g.By("Create test pod and verify oc debug works with pod definition yaml") + // createDebugPodUsingDefinition verifies: + // 1. Pod is created successfully from template + // 2. Pod reaches Running state within 1 minute + // 3. oc debug -f command executes successfully + // 4. Debug output contains "Starting pod/pod48681-debug" (not image debug container) + pod48681.createDebugPodUsingDefinition(oc) + defer oc.WithoutNamespace().Run("delete").Args("pod/pod48681", "-n", oc.Namespace()).Execute() + }) + + // author: yinzhou@redhat.com + g.It("Author:yinzhou-NonPreRelease-Longduration-High-45307-Critical-45327-check oc adm prune deployments to prune RS [Serial][Timeout:30m]", func() { + g.By("create new namespace") + oc.SetupProject() + + g.By("Create deployments and trigger more times") + createDeployment(oc, oc.Namespace(), "mydep45307") + triggerSucceedDeployment(oc, oc.Namespace(), "mydep45307", 6, 20) + triggerFailedDeployment(oc, oc.Namespace(), "mydep45307") + + g.By("get the completed rs infomation") + totalCompletedRsList, totalCompletedRsListNum := getCompeletedRsInfo(oc, oc.Namespace(), "mydep45307") + + g.By("Dry run the prune deployments for RS") + keepCompletedRsNum := 3 + pruneRsNumCMD := fmt.Sprintf("oc adm prune deployments --keep-complete=%v --keep-younger-than=10s --replica-sets=true |grep %s |wc -l", keepCompletedRsNum, oc.Namespace()) + pruneRsDryCMD := fmt.Sprintf("oc adm prune deployments --keep-complete=%v --keep-younger-than=10s --replica-sets=true |grep %s|awk '{print $2}'", keepCompletedRsNum, oc.Namespace()) + rsListFromPrune := getShouldPruneRSFromPrune(oc, pruneRsNumCMD, pruneRsDryCMD, (totalCompletedRsListNum - keepCompletedRsNum)) + shouldPruneRsList := getShouldPruneRSFromCreateTime(totalCompletedRsList, totalCompletedRsListNum, keepCompletedRsNum) + if comparePrunedRS(shouldPruneRsList, rsListFromPrune) { + e2e.Logf("Checked the pruned rs is expected") + } else { + e2e.Failf("Pruned the wrong RS with dry run") + } + + g.By("Make sure never prune RS with replicas num >0") + //before prune ,check the running rs list + runningRsList := checkRunningRsList(oc, oc.Namespace(), "mydep45307") + + //checking the should prune rs list + completedRsNum := 0 + pruneRsNumCMD = fmt.Sprintf("oc adm prune deployments --keep-complete=%v --keep-younger-than=10s --replica-sets=true |grep %s |wc -l", completedRsNum, oc.Namespace()) + pruneRsDryCMD = fmt.Sprintf("oc adm prune deployments --keep-complete=%v --keep-younger-than=10s --replica-sets=true |grep %s|awk '{print $2}'", completedRsNum, oc.Namespace()) + + rsListFromPrune = getShouldPruneRSFromPrune(oc, pruneRsNumCMD, pruneRsDryCMD, (totalCompletedRsListNum - completedRsNum)) + shouldPruneRsList = getShouldPruneRSFromCreateTime(totalCompletedRsList, totalCompletedRsListNum, completedRsNum) + if comparePrunedRS(shouldPruneRsList, rsListFromPrune) { + e2e.Logf("dry run prune all completed rs is expected") + } else { + e2e.Failf("Pruned the wrong RS with dry run") + } + + //prune all the completed rs list + pruneCompletedRs(oc, "prune", "deployments", "--keep-complete=0", "--keep-younger-than=10s", "--replica-sets=true", "--confirm") + + //after prune , check the remaining rs list + remainingRsList := getRemainingRs(oc, oc.Namespace(), "mydep45307") + if comparePrunedRS(runningRsList, remainingRsList) { + e2e.Logf("pruned all completed rs is expected") + } else { + e2e.Failf("Pruned the wrong") + } + }) + + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-Author:yinzhou-Medium-49859-should failed when oc import-image setting with Garbage values for --reference-policy", func() { + g.By("create new namespace") + oc.SetupProject() + + g.By("import image with garbage values set for reference-policy") + out, err := oc.Run("import-image").Args("registry.redhat.io/openshift3/jenkins-2-rhel7", "--reference-policy=sdfsdfds", "--confirm").Output() + o.Expect(err).Should(o.HaveOccurred()) + o.Expect(out).To(o.ContainSubstring("reference policy values are source or local")) + + g.By("check should no imagestream created") + out, err = oc.Run("get").Args("is").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(out).To(o.ContainSubstring("No resources found")) + }) + + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-NonPreRelease-Longduration-Author:yinzhou-High-42982-Describe quota output should always show units [Timeout:30m]", func() { + if isBaselineCapsSet(oc, "None") || isBaselineCapsSet(oc, "v4.13") || isBaselineCapsSet(oc, "v4.12") || isBaselineCapsSet(oc, "v4.14") || isBaselineCapsSet(oc, "v4.15") || isBaselineCapsSet(oc, "v4.11") && !isEnabledCapability(oc, "DeploymentConfig") { + skipMsg := "Skipping the test as baselinecaps have been set to None and some of API capabilities are not enabled!" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + // Skip Hypershift external OIDC clusters against which all test cases run as the same (external) user + isExternalOIDCCluster, err := IsExternalOIDCCluster(oc) + o.Expect(err).NotTo(o.HaveOccurred()) + if isExternalOIDCCluster { + skipMsg := "Skipping the test as we are running against a Hypershift external OIDC cluster" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + buildPruningBaseDir := FixturePath("testdata", "oc_cli") + deploymentconfigF := filepath.Join(buildPruningBaseDir, "deploymentconfig_with_quota.yaml") + clusterresourceF := filepath.Join(buildPruningBaseDir, "clusterresource_for_user.yaml") + g.By("create new namespace") + oc.SetupProject() + err = oc.AsAdmin().Run("create").Args("quota", "compute-resources-42982", "--hard=requests.cpu=4,requests.memory=8Gi,pods=4,limits.cpu=4,limits.memory=8Gi").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + err = oc.Run("create").Args("-f", deploymentconfigF).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + //wait for pod running + checkPodStatus(oc, "deploymentconfig=hello-openshift", oc.Namespace(), "Running") + checkPodStatus(oc, "openshift.io/deployer-pod-for.name=hello-openshift-1", oc.Namespace(), "Succeeded") + output, err := oc.Run("describe").Args("quota", "compute-resources-42982").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if matched, _ := regexp.MatchString("requests.memory.*Ki.*8Gi", output); matched { + e2e.Logf("describe the quota with units:\n%s", output) + } + + //check for clusterresourcequota + defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("clusterresourcequota", "for-user42982").Execute() + userName, err := oc.Run("whoami").Args("").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", clusterresourceF).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("current user name is %v", userName) + patchPath := fmt.Sprintf("-p=[{\"op\": \"replace\", \"path\": \"/spec/selector/annotations\", \"value\":{ \"openshift.io/requester\": \"%s\" }}]", userName) + err = oc.AsAdmin().WithoutNamespace().Run("patch").Args("clusterresourcequota", "for-user42982", "--type=json", patchPath).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + err = oc.WithoutNamespace().Run("new-project").Args("p42982-1").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", "p42982-1").Execute() + err = oc.WithoutNamespace().Run("create").Args("-f", deploymentconfigF, "-n", "p42982-1").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + //wait for pod running + checkPodStatus(oc, "deploymentconfig=hello-openshift", "p42982-1", "Running") + checkPodStatus(oc, "openshift.io/deployer-pod-for.name=hello-openshift-1", "p42982-1", "Succeeded") + output, err = oc.AsAdmin().WithoutNamespace().Run("describe").Args("clusterresourcequota", "for-user42982").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("clusterresourcequota output:\n%s", output) + // Check if quota shows resources with correct units (may not show Used if no consumption yet) + if matched, _ := regexp.MatchString("requests\\.memory.*8Gi", output); !matched { + e2e.Logf("Warning: clusterresourcequota output did not show expected memory quota format") + } + + }) + + // author: yinzhou@redhat.com + g.It("Author:yinzhou-High-38178-oc should be able to debug init container", func() { + oc.SetupProject() + podBaseDir := FixturePath("testdata", "oc_cli") + initPodFile := filepath.Join(podBaseDir, "initContainer.yaml") + + SetNamespacePrivileged(oc, oc.Namespace()) + g.By("Create pod with init container") + err := oc.Run("create").Args("-f", initPodFile).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + g.By("Make sure pod with init container running well") + checkPodStatus(oc, "name=hello-pod", oc.Namespace(), "Running") + g.By("Run debug command with init container") + err = wait.Poll(10*time.Second, 100*time.Second, func() (bool, error) { + output, err := oc.Run("debug").Args("pod/hello-pod", "-c", "wait").Output() + if err != nil { + e2e.Logf("debug failed with error: %s. Trying again", err) + return false, nil + } + if matched, _ := regexp.MatchString("sleep", output); matched { + e2e.Logf("Check the debug pod with init container command succeeded\n") + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Cannot get debug with init container")) + }) + + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-ConnectedOnly-Author:yinzhou-Medium-51018-oc adm release extract support manifest list", func() { + skipIfDisconnected(oc) + + if !assertPullSecret(oc) { + skipMsg := "The cluster does not have pull secret for public registry hence skipping..." + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + extractTmpDirName := "/tmp/case51018" + err := os.MkdirAll(extractTmpDirName, 0700) + o.Expect(err).NotTo(o.HaveOccurred()) + defer os.RemoveAll(extractTmpDirName) + + _, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", fmt.Sprintf("--to=%s", extractTmpDirName), "--confirm").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + pullSpec := getLatestPayload("https://multi.ocp.releases.ci.openshift.org/api/v1/releasestream/4-stable-multi/latest") + e2e.Logf("The pullSpec is %s \n", pullSpec) + if len(pullSpec) == 0 || strings.TrimSpace(pullSpec) == "" { + skipMsg := "pullSpec is empty, so skipping the test" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + err = oc.WithoutNamespace().WithoutKubeconf().Run("adm").Args("release", "extract", "-a", extractTmpDirName+"/.dockerconfigjson", "--command=oc.rhel8", "--to="+extractTmpDirName, pullSpec).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Check oc executable to make sure match the platform") + _, err = exec.Command("bash", "-c", "/tmp/case51018/oc version").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + err = oc.WithoutNamespace().WithoutKubeconf().Run("adm").Args("release", "extract", "-a", extractTmpDirName+"/.dockerconfigjson", "--command=oc", "--to="+extractTmpDirName+"/mac", pullSpec, "--command-os=mac/amd64").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + macocheckcmd := "file /tmp/case51018/mac/oc" + output, err := exec.Command("bash", "-c", macocheckcmd).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(output).To(o.ContainSubstring("Mach-O")) + err = oc.WithoutNamespace().WithoutKubeconf().Run("adm").Args("release", "extract", "-a", extractTmpDirName+"/.dockerconfigjson", "--command=oc", "--to="+extractTmpDirName+"/macarm", pullSpec, "--command-os=mac/arm64").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + macocheckcmd = "file /tmp/case51018/macarm/oc" + output, err = exec.Command("bash", "-c", macocheckcmd).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(output).To(o.ContainSubstring("Mach-O 64-bit arm64 executable")) + err = oc.WithoutNamespace().WithoutKubeconf().Run("adm").Args("release", "extract", "-a", extractTmpDirName+"/.dockerconfigjson", "--command=oc", "--to="+extractTmpDirName+"/windows", pullSpec, "--command-os=windows").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + windowocheckcmd := "file /tmp/case51018/windows/oc" + output, err = exec.Command("bash", "-c", windowocheckcmd).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(output).To(o.ContainSubstring("Windows")) + }) + + // author: knarra@redhat.com + g.It("ROSA-OSD_CCS-ARO-ConnectedOnly-Author:knarra-Medium-66989-Workloads oc debug with or without init container for pod", func() { + skipIfDisconnected(oc) + + oc.SetupProject() + testBaseDir := FixturePath("testdata", "oc_cli") + initContainerFile := filepath.Join(testBaseDir, "initContainer66989.yaml") + SetNamespacePrivileged(oc, oc.Namespace()) + g.By("Create pod with InitContainer") + err := oc.Run("create").Args("-f", initContainerFile).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + g.By("Make sure pod with init container running well") + checkPodStatus(oc, "name=hello-pod", oc.Namespace(), "Running") + g.By("Run debug command with init container") + cmd, _, _, err := oc.Run("debug").Args("pod/hello-pod", "--keep-init-containers=true").Background() + defer cmd.Process.Kill() + o.Expect(err).NotTo(o.HaveOccurred()) + err = wait.Poll(5*time.Second, 100*time.Second, func() (bool, error) { + debugPodName, err := oc.WithoutNamespace().Run("get").Args("pods", "-n", oc.Namespace()).Output() + if err != nil { + e2e.Logf("debug failed with error: %s. Trying again", err) + return false, nil + } + if matched, _ := regexp.MatchString("hello-pod-debug", debugPodName); matched { + e2e.Logf("Check the debug pod command succeeded\n") + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Cannot get debug with init container")) + + g.By("Check if Init Containers present in debug pod output") + debugPodName, err := oc.WithoutNamespace().Run("get").Args("pods", "-n", oc.Namespace(), "-o=jsonpath={.items[1].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + Output, err := oc.WithoutNamespace().Run("describe").Args("pods", debugPodName, "-n", oc.Namespace()).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + if matched, _ := regexp.MatchString("Init Containers", Output); !matched { + e2e.Failf("Init Containers are not seen in the output when run with keep init containers true") + } + _, err = oc.WithoutNamespace().Run("delete").Args("pods", debugPodName, "-n", oc.Namespace(), "--wait=false").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + }) + + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-Author:yinzhou-LEVEL0-Critical-63002-oc new-app propagate containerPort information to the deployment if import-mode is PreserveOriginal", func() { + g.By("create new namespace") + oc.SetupProject() + g.By("create new-app with import-mode as PreserveOrigin") + err := oc.WithoutNamespace().Run("new-app").Args("quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", oc.Namespace(), "--name=example-preserveoriginal", "--import-mode=PreserveOriginal").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + out, err := oc.WithoutNamespace().Run("get").Args("svc", "-n", oc.Namespace()).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(strings.Contains(out, "example-preserveoriginal")).To(o.BeTrue()) + }) + + // author: knarra@redhat.com + g.It("ROSA-OSD_CCS-ARO-Author:knarra-LEVEL0-Critical-64921-Critical-63854-Verify oc adm release info and oc image extract using --idms-file flag", func() { + skipIfDisconnected(oc) + + buildPruningBaseDir := FixturePath("testdata", "oc_cli") + idmsFile64921 := filepath.Join(buildPruningBaseDir, "idmsFile64921.yaml") + var ( + image string + ) + + g.By("Get desired image from ocp cluster") + pullSpec, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o", "jsonpath={..desired.image}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(pullSpec).NotTo(o.BeEmpty()) + e2e.Logf("pullspec is %v", pullSpec) + + g.By("Check if imageContentSourcePolicy image-policy-aosqe exists, if not skip the case") + existingIcspOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageContentSourcePolicy", "--ignore-not-found").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if !(strings.Contains(existingIcspOutput, "image-policy-aosqe")) { + skipMsg := "Image-policy-aosqe icsp not found, skipping the case" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + // Retreive image registry name + imageRegistryName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageContentSourcePolicy", "image-policy-aosqe", "-o=jsonpath={.spec.repositoryDigestMirrors[0].mirrors[0]}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + imageRegistryName = strings.Split(imageRegistryName, ":")[0] + e2e.Logf("ImageRegistryName is %s", imageRegistryName) + + // Replace localhost with retreived registry name from the cluster in idms file + sedCmd := fmt.Sprintf(`sed -i 's/localhost/%s/g' %s`, imageRegistryName, idmsFile64921) + _, err = exec.Command("bash", "-c", sedCmd).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + // Replace target correctly in the icsp file + sedCmdOne := fmt.Sprintf(`sed -i 's/target/%s/g' %s`, strings.Split(pullSpec, "/")[1], idmsFile64921) + _, err = exec.Command("bash", "-c", sedCmdOne).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + // Extract secret and store it + extractTmpDirName := "/tmp/case64921" + err = os.MkdirAll(extractTmpDirName, 0700) + o.Expect(err).NotTo(o.HaveOccurred()) + defer os.RemoveAll(extractTmpDirName) + _, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", fmt.Sprintf("--to=%s", extractTmpDirName), "--confirm").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + // Retreive image digest + imageDigest := strings.Split(pullSpec, "@")[1] + e2e.Logf("imageDigest is %s", imageDigest) + + // Remove auth & run command oc adm release info with out --idms-flag + dockerTmpDirName := "/tmp/case64921/.dockerconfigjson" + authContent, readErr := os.ReadFile(dockerTmpDirName) + o.Expect(readErr).NotTo(o.HaveOccurred()) + + // Parse auth JSON and remove specific auth entry + var authData map[string]interface{} + err = json.Unmarshal(authContent, &authData) + o.Expect(err).NotTo(o.HaveOccurred()) + auths, _ := authData["auths"].(map[string]interface{}) + + if strings.Contains(pullSpec, "quay.io") { + image = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@" + imageDigest + delete(auths, "quay.io") + } else if strings.Contains(pullSpec, "registry.ci.openshift.org") { + image = "registry.ci.openshift.org/ocp/release@" + imageDigest + delete(auths, "registry.ci.openshift.org") + } else { + sourceImage := strings.Split(pullSpec, "/")[0] + image = pullSpec + delete(auths, sourceImage) + } + + authContentBytes, err := json.Marshal(authData) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(os.WriteFile(dockerTmpDirName, authContentBytes, 0640)).NotTo(o.HaveOccurred()) + + // Run command oc adm release info with --idms-flag + o.Expect(oc.WithoutNamespace().WithoutKubeconf().Run("adm").Args("release", "info", image, "-a", dockerTmpDirName, "--idms-file="+idmsFile64921).Execute()).NotTo(o.HaveOccurred()) + + // Run command oc adm release info to get oc-mirror image + ocMirrorImage, _, err := oc.WithoutNamespace().WithoutKubeconf().Run("adm").Args("release", "info", image, "-a", dockerTmpDirName, "--idms-file="+idmsFile64921, `-ojsonpath={.references.spec.tags[?(@.name=="oc-mirror")].from.name}`).Outputs() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("ocMirrorImage is %s", ocMirrorImage) + + // Run command oc image extract with --idms-flag + o.Expect(oc.WithoutNamespace().WithoutKubeconf().Run("image").Args("extract", "-a", dockerTmpDirName, ocMirrorImage, "--path=/usr/bin/oc-mirror:"+extractTmpDirName, "--idms-file="+idmsFile64921, "--insecure", "--confirm").Execute()).NotTo(o.HaveOccurred()) + + // Verify oc-mirror is present + output, err := exec.Command("bash", "-c", "stat "+extractTmpDirName+"/oc-mirror").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(strings.Contains(string(output), "File: /tmp/case64921/oc-mirror")).To(o.BeTrue()) + }) + + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-ConnectedOnly-Author:yinzhou-High-67013-oc image mirror with multi-arch images and --filter-by-os", func() { + skipIfDisconnected(oc) + + if !assertPullSecret(oc) { + skipMsg := "The cluster does not have pull secret for public registry hence skipping..." + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + g.By("Skip if the cluster is AzureStackCloud") + azureStackCloud, azureErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output() + o.Expect(azureErr).NotTo(o.HaveOccurred()) + if azureStackCloud == "AzureStackCloud" { + skipMsg := "Skip for cluster with AzureStackCloud!" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + g.By("create new namespace") + oc.SetupProject() + registry := registry{ + dockerImage: "quay.io/openshifttest/registry@sha256:1106aedc1b2e386520bc2fb797d9a7af47d651db31d8e7ab472f2352da37d1b3", + namespace: oc.Namespace(), + } + + g.By("Trying to launch a registry app") + defer registry.deleteregistry(oc) + serInfo := registry.createregistry(oc) + + err := wait.Poll(30*time.Second, 180*time.Second, func() (bool, error) { + err := oc.WithoutNamespace().Run("image").Args("mirror", "quay.io/openshifttest/base-alpine@sha256:3126e4eed4a3ebd8bf972b2453fa838200988ee07c01b2251e3ea47e4b1f245c"+"="+serInfo.serviceName+"/testimage:ppc64", "--filter-by-os=linux/ppc64le", "--insecure").Execute() + if err != nil { + e2e.Logf("mirror failed, retrying...") + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("max time reached but mirror still falied")) + out, err := oc.WithoutNamespace().Run("image").Args("info", serInfo.serviceName+"/testimage:ppc64", "--insecure").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(strings.Contains(out, "ppc64le")).To(o.BeTrue()) + err = wait.Poll(30*time.Second, 180*time.Second, func() (bool, error) { + err := oc.WithoutNamespace().Run("image").Args("mirror", "quay.io/openshifttest/base-alpine@sha256:3126e4eed4a3ebd8bf972b2453fa838200988ee07c01b2251e3ea47e4b1f245c"+"="+serInfo.serviceName+"/testimage:default", "--insecure").Execute() + if err != nil { + e2e.Logf("mirror failed, retrying...") + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("max time reached but mirror still falied")) + o.Expect(err).NotTo(o.HaveOccurred()) + imageInfo, err := oc.WithoutNamespace().Run("image").Args("info", serInfo.serviceName+"/testimage:default", "--insecure").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + architecture, err := exec.Command("bash", "-c", "uname -a").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + architectureStr := string(architecture) + if o.Expect(strings.Contains(architectureStr, "x86_64")).To(o.BeTrue()) { + if o.Expect(strings.Contains(imageInfo, "amd64")).To(o.BeTrue()) { + e2e.Logf("Found the expected Arch amd64") + } else { + e2e.Failf("Failed to find the expected Arch for mirrored image") + } + } else if o.Expect(strings.Contains(architectureStr, "aarch64")).To(o.BeTrue()) { + if o.Expect(strings.Contains(imageInfo, "arm64")).To(o.BeTrue()) { + e2e.Logf("Found the expected Arch aarch64") + } else { + e2e.Failf("Failed to find the expected Arch for mirrored image") + } + } else if o.Expect(strings.Contains(architectureStr, "ppc64le")).To(o.BeTrue()) { + if o.Expect(strings.Contains(imageInfo, "ppc64le")).To(o.BeTrue()) { + e2e.Logf("Found the expected Arch ppc64le") + } else { + e2e.Failf("Failed to find the expected Arch for mirrored image") + } + } else { + if o.Expect(strings.Contains(imageInfo, "s390x")).To(o.BeTrue()) { + e2e.Logf("Found the expected Arch s390x") + } else { + e2e.Failf("Failed to find the expected Arch for mirrored image") + } + } + + }) +}) + +var _ = g.Describe("[sig-cli] oc CLI additional tests", func() { + defer g.GinkgoRecover() + + var ( + oc = NewCLIWithoutNamespace(KubeConfigPath()) + ) + + // author: yinzhou@redhat.com + g.It("MicroShiftBoth-ROSA-OSD_CCS-ARO-Author:yinzhou-High-43030-oc get events always show the timestamp as LAST SEEN", func() { + // Check if cluster is microshift or OCP + By("Check if cluster is microshift or OCP") + masterNodes, getAllMasterNodesErr := GetMasterNodes(oc) + if getAllMasterNodesErr != nil || len(masterNodes) == 0 { + skipMsg := "Skipping test - no master/control-plane nodes accessible (likely HyperShift/managed cluster)" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + By("Get all the namespaces") + var output string + _, err := DebugNodeWithOptionsAndChroot(oc, masterNodes[0], []string{"-q"}, "bash", "-c", "microshift version") + if err != nil { + output, err = oc.AsAdmin().Run("get").Args("projects", "-o=custom-columns=NAME:.metadata.name", "--no-headers").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + } else { + output, err = oc.AsAdmin().Run("get").Args("ns", "-o=custom-columns=NAME:.metadata.name", "--no-headers").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + } + projectList := strings.Fields(output) + + g.By("check the events per project") + for _, projectN := range projectList { + output, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", projectN).Output() + if match, _ := regexp.MatchString("No resources found", string(output)); match { + e2e.Logf("No events in project: %v", projectN) + } else { + result, _ := exec.Command("bash", "-c", "cat "+output+" | awk '{print $1}'").Output() + if match, _ := regexp.MatchString("unknown", string(result)); match { + e2e.Failf("Does not show timestamp as expected: %v", result) + } + } + } + + }) + + // author: yinzhou@redhat.com + g.It("MicroShiftBoth-VMonly-ROSA-OSD_CCS-ARO-Author:yinzhou-Medium-42983-always delete the debug pod when the oc debug node command exist [Flaky]", func() { + By("Check if a cluster is Microshift or OCP") + masterNodes, getAllMasterNodesErr := GetMasterNodes(oc) + if getAllMasterNodesErr != nil || len(masterNodes) == 0 { + skipMsg := "Skipping test - no master/control-plane nodes accessible (likely HyperShift/managed cluster)" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + project42983 := "project42983" + _, err := DebugNodeWithOptionsAndChroot(oc, masterNodes[0], []string{"-q"}, "bash", "-c", "microshift version") + if err != nil { + oc.SetupProject() + project42983 = oc.Namespace() + } else { + defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", project42983).Execute() + createNSErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", project42983).Execute() + o.Expect(createNSErr).NotTo(o.HaveOccurred()) + } + + By("Set namespace as privileged namespace") + SetNamespacePrivileged(oc, project42983) + + g.By("Get all the node name list") + out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + nodeList := strings.Fields(out) + + g.By("Run debug node") + for _, nodeName := range nodeList { + err = oc.AsAdmin().WithoutNamespace().Run("debug").Args("node/"+nodeName, "-n", project42983, "--", "chroot", "/host", "date").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + } + + g.By("Make sure debug pods have been deleted") + err = wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { + output, err := oc.Run("get").Args("pods", "-n", project42983).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if matched, _ := regexp.MatchString("No resources found", output); !matched { + e2e.Logf("pods still not deleted :\n%s, try again ", output) + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, "pods still not deleted") + + }) + + // author: yinzhou@redhat.com + g.It("MicroShiftBoth-ROSA-OSD_CCS-ARO-Author:yinzhou-Medium-34155-oc get events sorted by lastTimestamp", func() { + g.By("Get events sorted by lastTimestamp") + err := oc.AsAdmin().WithoutNamespace().Run("get").Args("events", "-n", "openshift-operator-lifecycle-manager", "--sort-by="+".lastTimestamp").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + // author: yinzhou@redhat.com + g.It("MicroShiftBoth-ROSA-OSD_CCS-ARO-Author:yinzhou-Medium-47555-Should not update data when use oc set data with dry-run as server", func() { + By("Check if cluster is microshift or OCP") + masterNodes, getAllMasterNodesErr := GetMasterNodes(oc) + if getAllMasterNodesErr != nil || len(masterNodes) == 0 { + skipMsg := "Skipping test - no master/control-plane nodes accessible (likely HyperShift/managed cluster)" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + project47555 := "project47555" + _, err := DebugNodeWithOptionsAndChroot(oc, masterNodes[0], []string{"-q"}, "bash", "-c", "microshift version") + if err != nil { + oc.SetupProject() + project47555 = oc.Namespace() + } else { + defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", project47555).Execute() + createNSErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", project47555).Execute() + o.Expect(createNSErr).NotTo(o.HaveOccurred()) + } + + g.By("Create new configmap") + err = oc.Run("create").Args("configmap", "cm-47555", "--from-literal=name=abc", "-n", project47555).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + g.By("Save the data for configmap") + beforeSetcm, err := oc.Run("get").Args("cm", "cm-47555", "-o=jsonpath={.data.name}", "-n", project47555).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + g.By("Run the set with server dry-run") + err = oc.Run("set").Args("data", "cm", "cm-47555", "--from-literal=name=def", "--dry-run=server", "-n", project47555).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + afterSetcm, err := oc.Run("get").Args("cm", "cm-47555", "-o=jsonpath={.data.name}", "-n", project47555).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if match, _ := regexp.MatchString(beforeSetcm, afterSetcm); !match { + e2e.Failf("Should not persistent update configmap with server dry-run") + } + g.By("Create new secret") + err = oc.Run("create").Args("secret", "generic", "secret-47555", "--from-literal=name=abc", "-n", project47555).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + g.By("Save the data for secret") + beforeSetse, err := oc.Run("get").Args("secret", "secret-47555", "-o=jsonpath={.data.name}", "-n", project47555).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + g.By("Run the set with server dry-run") + err = oc.Run("set").Args("data", "secret", "secret-47555", "--from-literal=name=def", "--dry-run=server", "-n", project47555).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + afterSetse, err := oc.Run("get").Args("secret", "secret-47555", "-o=jsonpath={.data.name}", "-n", project47555).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if match, _ := regexp.MatchString(beforeSetse, afterSetse); !match { + e2e.Failf("Should not persistent update secret with server dry-run") + } + + }) + + // author: yinzhou@redhat.com + g.It("MicroShiftBoth-ROSA-OSD_CCS-ARO-Author:yinzhou-Medium-49116-oc debug should remove startupProbe when create debug pod", func() { + By("Check if cluster is microshift or OCP") + masterNodes, getAllMasterNodesErr := GetMasterNodes(oc) + if getAllMasterNodesErr != nil || len(masterNodes) == 0 { + skipMsg := "Skipping test - no master/control-plane nodes accessible (likely HyperShift/managed cluster)" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + project49116 := "project49116" + _, err := DebugNodeWithOptionsAndChroot(oc, masterNodes[0], []string{"-q"}, "bash", "-c", "microshift version") + if err != nil { + oc.SetupProject() + project49116 = oc.Namespace() + } else { + defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("ns", project49116).Execute() + createNSErr := oc.AsAdmin().WithoutNamespace().Run("create").Args("ns", project49116).Execute() + o.Expect(createNSErr).NotTo(o.HaveOccurred()) + } + + g.By("Create the deploy") + err = oc.Run("create").Args("deploy", "d49116", "--image", "quay.io/openshifttest/hello-openshift@sha256:56c354e7885051b6bb4263f9faa58b2c292d44790599b7dde0e49e7c466cf339", "-n", project49116).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("patch the deploy with startupProbe") + patchS := `[{"op": "add", "path": "/spec/template/spec/containers/0/startupProbe", "value":{ "exec": {"command": [ "false" ]}}}]` + err = oc.Run("patch").Args("deploy", "d49116", "--type=json", "-p", patchS, "-n", project49116).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("run the debug with jsonpath") + out, err := oc.Run("debug").Args("deploy/d49116", "-o=jsonpath='{.spec.containers[0].startupProbe}'", "-n", project49116).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if out != "''" { + e2e.Failf("The output should be empty, but not: %v", out) + } + }) +}) + +var _ = g.Describe("[sig-cli] Workloads client test", func() { + defer g.GinkgoRecover() + + var ( + oc = NewCLI("oc", KubeConfigPath()) + ) + + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-Longduration-NonPreRelease-Author:yinzhou-Medium-49395-oc debug node should exit when timeout [Timeout:30m]", func() { + workerNodeList, err := GetClusterNodesBy(oc, "worker") + o.Expect(err).NotTo(o.HaveOccurred()) + g.By("Create new namespace") + oc.SetupProject() + ns49395 := oc.Namespace() + + SetNamespacePrivileged(oc, ns49395) + + e2e.Logf("Running: %s debug --to-namespace %s node/%s -- sleep 900", oc.execPath, ns49395, workerNodeList[0]) + cmd := exec.Command(oc.execPath, "debug", "--to-namespace", ns49395, "node/"+workerNodeList[0], "--", "sleep", "900") + if oc.kubeconfig != "" { + cmd.Env = append(os.Environ(), "KUBECONFIG="+oc.kubeconfig) + } + err = cmd.Start() + o.Expect(err).NotTo(o.HaveOccurred()) + defer cmd.Process.Kill() + err = wait.Poll(10*time.Second, 600*time.Second, func() (bool, error) { + output, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns49395).Output() + if err1 != nil { + e2e.Logf("the err:%v, and try next round", err1) + return false, nil + } + if matched, _ := regexp.MatchString("debug", output); matched { + e2e.Logf("Check the debug pod in own namespace\n") + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Cannot find the debug pod in own namespace")) + err = wait.Poll(30*time.Second, 960*time.Second, func() (bool, error) { + output, err1 := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", ns49395).Output() + if err1 != nil { + e2e.Logf("the err:%v, and try next round", err1) + return false, nil + } + if matched, _ := regexp.MatchString("debug", output); !matched { + e2e.Logf("Check the debug pod disappeared in own namespace\n") + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Still find the debug pod in own namespace even wait for 15 mins")) + }) + + // author: yinzhou@redhat.com + g.It("Author:yinzhou-ROSA-OSD_CCS-ARO-NonPreRelease-High-68647-oc whoami must work without oauth-apiserver", func() { + isExternalOIDCCluster, err := IsExternalOIDCCluster(oc) + o.Expect(err).NotTo(o.HaveOccurred()) + if !isExternalOIDCCluster { + skipMsg := "Skipping the test as we are not running in a cluster without OAuth servers." + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + err = oc.AsAdmin().Run("whoami").Args("").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + By("Create new namespace") + oc.SetupProject() + // Test normal user runs oc whoami well + err = oc.Run("whoami").Args("").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + By("Create new project to make sure that openshift-apiserver still functions well.") + projectName := "ocp-68647" + GetRandomString() + err = oc.AsAdmin().WithoutNamespace().Run("new-project").Args(projectName, "--skip-config-write").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("project", projectName).Execute() + + By("Create new app") + err = oc.AsAdmin().WithoutNamespace().Run("new-app").Args("quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", projectName, "--import-mode=PreserveOriginal").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Waiting for all pods of hello-openshift application to be ready ...") + var poderr error + errPod := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) { + podOutput, poderr := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", projectName, "--no-headers").Output() + if poderr == nil && strings.Contains(podOutput, "Running") { + e2e.Logf("Pod %v succesfully", podOutput) + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(errPod, fmt.Sprintf("Pod not running :: %v", poderr)) + }) + + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-Author:yinzhou-High-10136-Project should only watch its owned cache events", func() { + By("Create the first namespace") + oc.SetupProject() + ns1 := oc.Namespace() + By("Create deployment in the first namespace") + deployCreationErr := oc.WithoutNamespace().Run("create").Args("deployment", "deploy10136-1", "-n", ns1, "--image", "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83").Execute() + o.Expect(deployCreationErr).NotTo(o.HaveOccurred()) + if ok := waitForAvailableRsRunning(oc, "deployment", "deploy10136-1", ns1, "1"); ok { + e2e.Logf("All pods are runnnig now\n") + } else { + e2e.Failf("deploy10136-1 pods are not running as expected") + } + + By("Create the second namespace") + oc.SetupProject() + ns2 := oc.Namespace() + By("Get deployment under the second project with watch") + cmd2, backgroundBufNs2, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", ns2, "-o", "name", "-w").Background() + defer cmd2.Process.Kill() + o.Expect(err).NotTo(o.HaveOccurred()) + By("Create deployment in the second namespace") + deployCreationErr2 := oc.WithoutNamespace().Run("create").Args("deployment", "deploy10136-2", "-n", ns2, "--image", "quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83").Execute() + o.Expect(deployCreationErr2).NotTo(o.HaveOccurred()) + if ok := waitForAvailableRsRunning(oc, "deployment", "deploy10136-2", ns2, "1"); ok { + e2e.Logf("All pods are runnnig now\n") + } else { + e2e.Failf("deploy10136-2 pods are not running as expected") + } + + By("Get deployment in the first namespace with watch") + cmd1, backgroundBuf, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", ns1, "-o", "name", "-w").Background() + defer cmd1.Process.Kill() + o.Expect(err).NotTo(o.HaveOccurred()) + + By("Delete the deployment in the second namespace") + deleteDeploymentErr := oc.WithoutNamespace().Run("delete").Args("deployment", "deploy10136-2", "-n", ns2).Execute() + o.Expect(deleteDeploymentErr).NotTo(o.HaveOccurred()) + + By("Get deployment in the first namespace again") + out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-n", ns1, "-o", "name").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + By("Make sure the watch events matched") + deploymentWatchOut := strings.Replace(backgroundBuf.String(), "\n", "", -1) + if matched, _ := regexp.MatchString(deploymentWatchOut, out); matched { + e2e.Logf("All deployment events matched\n") + } else { + e2e.Failf("Deployment events not matched") + } + + By("Make sure no trace under the second project for the resource under the first project") + if matched, _ := regexp.MatchString(backgroundBufNs2.String(), "deploy10136-1"); matched { + e2e.Failf("Should not see any trace for the resource under the first project in the second project\n") + } + }) + + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-ConnectedOnly-Author:yinzhou-High-71178-Make sure no mismatch for sha256sum of openshift install for mac version", func() { + skipIfDisconnected(oc) + + if !assertPullSecret(oc) { + skipMsg := "The cluster does not have pull secret for public registry hence skipping..." + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + extractTmpDirName := "/tmp/d71178" + err := os.MkdirAll(extractTmpDirName, 0700) + o.Expect(err).NotTo(o.HaveOccurred()) + defer os.RemoveAll(extractTmpDirName) + secretFile, secretErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/pull-secret", "-n", "openshift-config", `--template={{index .data ".dockerconfigjson" | base64decode}}`).OutputToFile("auth.dockerconfigjson") + o.Expect(secretErr).NotTo(o.HaveOccurred()) + By("Get the payload") + payloadPullSpec, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o", "jsonpath={..desired.image}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(payloadPullSpec).NotTo(o.BeEmpty()) + e2e.Logf("pullspec is %v", payloadPullSpec) + + By("Extract the darwin tools") + os.RemoveAll("/tmp/d71178") + err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", payloadPullSpec, "--registry-config="+secretFile, "--command-os=darwin/arm64", "--tools", "--to=/tmp/d71178", "--insecure").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + By("Make sure no mismatch for sha256sum") + files := getSpecificFileName("/tmp/d71178", "openshift-install") + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("%v", files) + fileSum, err := sha256File("/tmp/d71178/" + files[0]) + e2e.Logf("%v", fileSum) + o.Expect(err).NotTo(o.HaveOccurred()) + fileSumFromResult := getSha256SumFromFile("/tmp/d71178/sha256sum.txt") + e2e.Logf("%v", fileSumFromResult) + if match, _ := regexp.MatchString(fileSum, fileSumFromResult); !match { + e2e.Failf("File sum not matched") + } + }) + + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-ConnectedOnly-Author:yinzhou-Medium-71273-Medium-71275-Validate user is able to extract rhel8 and rhel9 oc from the ocp payload", func() { + skipIfDisconnected(oc) + + if !assertPullSecret(oc) { + skipMsg := "The cluster does not have pull secret for public registry hence skipping..." + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + extractTmpDirName := "/tmp/case71273" + defer os.RemoveAll(extractTmpDirName) + err := os.MkdirAll(extractTmpDirName, 0700) + o.Expect(err).NotTo(o.HaveOccurred()) + + _, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", fmt.Sprintf("--to=%s", extractTmpDirName), "--confirm").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + By("Get desired image from ocp cluster") + pullSpec, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o", "jsonpath={..desired.image}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(pullSpec).NotTo(o.BeEmpty()) + + By("Extract oc.rhel8 from ocp payload") + _, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", "--command=oc.rhel8", pullSpec, "-a", extractTmpDirName+"/.dockerconfigjson", "--to", extractTmpDirName, "--insecure").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if _, statErr := os.Stat(extractTmpDirName + "/oc"); os.IsNotExist(statErr) { + e2e.Failf("Get extracted oc failed") + } + removeErr := os.Remove(extractTmpDirName + "/oc") + o.Expect(removeErr).NotTo(o.HaveOccurred()) + + By("Extract oc.rhel9 from ocp payload") + _, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", "--command=oc.rhel9", pullSpec, "-a", extractTmpDirName+"/.dockerconfigjson", "--to", extractTmpDirName, "--insecure").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if _, statErr := os.Stat(extractTmpDirName + "/oc"); os.IsNotExist(statErr) { + e2e.Failf("Get extracted oc failed") + } + removeErr = os.Remove(extractTmpDirName + "/oc") + o.Expect(removeErr).NotTo(o.HaveOccurred()) + + By("Extract oc from ocp payload") + _, err = oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "extract", "--command=oc", pullSpec, "-a", extractTmpDirName+"/.dockerconfigjson", "--to", extractTmpDirName, "--insecure").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if _, statErr := os.Stat(extractTmpDirName + "/oc"); os.IsNotExist(statErr) { + e2e.Failf("Get extracted oc failed") + } + removeErr = os.Remove(extractTmpDirName + "/oc") + o.Expect(removeErr).NotTo(o.HaveOccurred()) + + By("Get the oc-mirror image from ocp payload") + ocMirrorImage, _, err := oc.WithoutNamespace().WithoutKubeconf().Run("adm").Args("release", "info", pullSpec, "-a", extractTmpDirName+"/.dockerconfigjson", "--insecure", `-ojsonpath={.references.spec.tags[?(@.name=="oc-mirror")].from.name}`).Outputs() + o.Expect(err).NotTo(o.HaveOccurred()) + + By("Extract oc-mirror.rhel8") + _, err = oc.WithoutNamespace().WithoutKubeconf().Run("image").Args("extract", ocMirrorImage, "-a", extractTmpDirName+"/.dockerconfigjson", "--path=/usr/bin/oc-mirror.rhel8:"+extractTmpDirName, "--confirm", "--insecure").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if _, statErr := os.Stat(extractTmpDirName + "/oc-mirror.rhel8"); os.IsNotExist(statErr) { + e2e.Failf("Get extracted oc-mirror.rhel8 failed") + } + removeErr = os.Remove(extractTmpDirName + "/oc-mirror.rhel8") + o.Expect(removeErr).NotTo(o.HaveOccurred()) + + By("Extract oc-mirror.rhel9") + _, err = oc.WithoutNamespace().WithoutKubeconf().Run("image").Args("extract", ocMirrorImage, "-a", extractTmpDirName+"/.dockerconfigjson", "--path=/usr/bin/oc-mirror.rhel9:"+extractTmpDirName, "--confirm", "--insecure").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if _, statErr := os.Stat(extractTmpDirName + "/oc-mirror.rhel9"); os.IsNotExist(statErr) { + e2e.Failf("Get extracted oc-mirror.rhel9 failed") + } + removeErr = os.Remove(extractTmpDirName + "/oc-mirror.rhel9") + o.Expect(removeErr).NotTo(o.HaveOccurred()) + }) + // author: yinzhou@redhat.com + g.It("ROSA-OSD_CCS-ARO-Author:yinzhou-Medium-72217-Should get warning when there is an identical short name for two or more resources", func() { + customResourceBaseDir := FixturePath("testdata", "oc_cli/case72217") + cronTabCRDF := filepath.Join(customResourceBaseDir, "crd-crontab-72217.yaml") + cronCRF := filepath.Join(customResourceBaseDir, "cr-cron-72217.yaml") + customTaskCRDF := filepath.Join(customResourceBaseDir, "crd-customtask-72217.yaml") + customCRF := filepath.Join(customResourceBaseDir, "cr-custom-72217.yaml") + catToyCRDF := filepath.Join(customResourceBaseDir, "crd-cattoy-72217.yaml") + catCRF := filepath.Join(customResourceBaseDir, "cr-cat-72217.yaml") + + g.By("Create new namespace") + oc.SetupProject() + ns72217 := oc.Namespace() + + By("Create the first CRD and get by short name should no warning") + defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", cronTabCRDF).Execute() + err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", cronTabCRDF).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + err = waitCRDAvailable(oc, "crontabs72217.stable.example.com") + AssertWaitPollNoErr(err, "The crd crontabs72217.stable.example.com is not available in 60 seconds") + err = waitCreateCr(oc, cronCRF, ns72217) + AssertWaitPollNoErr(err, "The cr of crontabs is not created in 120 seconds") + err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ct72217", "-n", ns72217).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + By("Create the second CRD and get by short name should see warning") + defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", customTaskCRDF).Execute() + err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", customTaskCRDF).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + err = waitCRDAvailable(oc, "customtasks72217.example.com") + AssertWaitPollNoErr(err, "The crd customtasks72217.example.com is not available in 60 seconds") + err = waitCreateCr(oc, customCRF, ns72217) + AssertWaitPollNoErr(err, "The cr of custometask is not created in 120 seconds") + _, outputWarning, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("ct72217", "-n", ns72217).Outputs() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(outputWarning).To(o.ContainSubstring("could also match lower priority resource")) + + By("Create the third CRD and get by short name should see warning") + defer oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", catToyCRDF).Execute() + err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", catToyCRDF).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + err = waitCRDAvailable(oc, "cattoys72217.bate.example.com") + AssertWaitPollNoErr(err, "The crd cattoys72217.bate.example.com is not available in 60 seconds") + err = waitCreateCr(oc, catCRF, ns72217) + AssertWaitPollNoErr(err, "The cr of cattoy is not created in 120 seconds") + _, outputWarning, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ct72217", "-n", ns72217).Outputs() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(outputWarning).To(o.ContainSubstring("could also match lower priority resource customtasks72217.example.com")) + o.Expect(outputWarning).To(o.ContainSubstring("could also match lower priority resource crontabs72217.stable.example.com")) + }) + + g.It("Author:yinzhou-ROSA-OSD_CCS-ARO-Medium-76150-Make sure oc debug node has set HOST env var", func() { + mnodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-o=jsonpath={.items[0].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + By("Create new namespace") + oc.SetupProject() + project76150 := oc.Namespace() + By("Set namespace as privileged namespace") + SetNamespacePrivileged(oc, project76150) + filePath, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("node/"+mnodeName, "-n", project76150, "-o=yaml").OutputToFile(getRandomString() + "workload-debug.yaml") + o.Expect(err).NotTo(o.HaveOccurred()) + regV1 := checkFileContent(filePath, "name: HOST") + regV2 := checkFileContent(filePath, "value: /host") + if regV1 && regV2 { + e2e.Logf("Found the expected host env setting for debug pod") + } else { + e2e.Failf("Don't find the host env set for debug pod") + } + }) + + g.It("Author:yinzhou-ROSA-OSD_CCS-ARO-High-76116-Make sure oc could run on rhel with fips on", func() { + workerNodeList, err := GetClusterNodesBy(oc, "worker") + o.Expect(err).NotTo(o.HaveOccurred()) + By("Create new namespace") + oc.SetupProject() + project76116 := oc.Namespace() + By("Set namespace as privileged namespace") + SetNamespacePrivileged(oc, project76116) + By("Check if fips enable") + efips, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", project76116, "node/"+workerNodeList[0], "--", "chroot", "/host", "fips-mode-setup", "--check").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if !strings.Contains(efips, "FIPS mode is enabled.") { + skipMsg := "Fips mode is disabled, skip it." + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + By("Check if oc could run with fips on") + clientVersion, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("-n", project76116, "node/"+workerNodeList[0], "--", "chroot", "/host", "oc", "version").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if !strings.Contains(clientVersion, "Client Version") { + e2e.Failf("Failed to run oc client with fips on") + } + }) + + g.It("Author:yinzhou-ROSA-OSD_CCS-ARO-Critical-11882-Return description of resources with cli describe", func() { + By("Create new namespace") + oc.SetupProject() + project11882 := oc.Namespace() + err := oc.WithoutNamespace().Run("new-app").Args("quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "-n", project11882, "--name=example-11882", "--import-mode=PreserveOriginal").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + if ok := waitForAvailableRsRunning(oc, "deployment", "example-11882", oc.Namespace(), "1"); ok { + e2e.Logf("All pods are runnnig now\n") + } else { + oc.Run("get").Args("events", "-n", project11882).Output() + e2e.Failf("Deploment failed to roll out") + } + + output, err := oc.Run("describe").Args("svc", "example-11882", "-n", project11882).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(strings.Contains(output, "deployment=example-11882")).To(o.BeTrue()) + svcyamleFile, err := oc.Run("get").Args("svc", "example-11882", "-n", project11882, "-o", "yaml").OutputToFile("svc-11882.yaml") + o.Expect(err).NotTo(o.HaveOccurred()) + outputY, err := oc.Run("describe").Args("-f", svcyamleFile).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(strings.Contains(outputY, "deployment=example-11882")).To(o.BeTrue()) + outputL, err := oc.Run("describe").Args("svc", "-l", "app=example-11882", "-n", project11882).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(strings.Contains(outputL, "example-11882")).To(o.BeTrue()) + outputN, err := oc.Run("describe").Args("svc", "example", "-n", project11882).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(strings.Contains(outputN, "example-11882")).To(o.BeTrue()) + rsName, err := oc.Run("get").Args("rs", "-o=jsonpath={.items[0].metadata.name}", "-n", project11882).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + outputM, err := oc.Run("describe").Args("deploy/example-11882", "rs/"+rsName, "-n", project11882).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(strings.Contains(outputM, "example-11882")).To(o.BeTrue()) + o.Expect(strings.Contains(outputM, rsName)).To(o.BeTrue()) + }) + + g.It("Author:yinzhou-ROSA-OSD_CCS-ARO-High-76287-make sure tools imagestream contains sosreport", func() { + // Skip the case if cluster doest not have the imageRegistry installed + if !isEnabledCapability(oc, "ImageRegistry") { + skipMsg := "Skipped: cluster does not have imageRegistry installed" + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + oc.SetupProject() + project76287 := oc.Namespace() + + By("Set namespace as privileged namespace") + SetNamespacePrivileged(oc, project76287) + + By("Get all the node name list") + out, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + nodeList := strings.Fields(out) + + By("Check tools imagestream with sos command") + err = oc.Run("run").Args("testsos76287", "-n", project76287, "--image", "image-registry.openshift-image-registry.svc:5000/openshift/tools", "--restart", "Never", "--", "sos", "help").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + By("Run debug node with sos command") + err = oc.AsAdmin().WithoutNamespace().Run("debug").Args("node/"+nodeList[0], "-n", project76287, "--", "sos", "help").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + }) +}) + +// ClientVersion ... +type ClientVersion struct { + BuildDate string `json:"buildDate"` + Compiler string `json:"compiler"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + GitVersion string `json:"gitVersion"` + GoVersion string `json:"goVersion"` + Major string `json:"major"` + Minor string `json:"minor"` + Platform string `json:"platform"` +} + +// ServerVersion ... +type ServerVersion struct { + BuildDate string `json:"buildDate"` + Compiler string `json:"compiler"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + GitVersion string `json:"gitVersion"` + GoVersion string `json:"goVersion"` + Major string `json:"major"` + Minor string `json:"minor"` + Platform string `json:"platform"` +} + +// VersionInfo ... +type VersionInfo struct { + ClientInfo ClientVersion `json:"ClientVersion"` + OpenshiftVersion string `json:"openshiftVersion"` + ServerInfo ServerVersion `json:"ServerVersion"` + ReleaseClientVersion string `json:"releaseClientVersion"` +} diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go deleted file mode 100644 index 556e2cf412..0000000000 --- a/test/e2e/e2e.go +++ /dev/null @@ -1,15 +0,0 @@ -package e2e - -import ( - g "github.com/onsi/ginkgo/v2" - o "github.com/onsi/gomega" -) - -var _ = g.Describe("[sig-cli][Jira:oc] oc sanity test", func() { - g.It("should always pass", func() { - o.Expect(true).To(o.BeTrue()) - }) - g.It("should always pass [Serial]", func() { - o.Expect(true).To(o.BeTrue()) - }) -}) diff --git a/test/e2e/util.go b/test/e2e/util.go new file mode 100644 index 0000000000..f1540127f8 --- /dev/null +++ b/test/e2e/util.go @@ -0,0 +1,1387 @@ +package e2e + +import ( + "bufio" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "regexp" + + o "github.com/onsi/gomega" + + "math/rand" + "net/http" + + "os" + "os/exec" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" + + "github.com/openshift/oc/test/testdata" +) + +// e2e compatibility layer - simple wrappers to avoid importing k8s.io/kubernetes/test/e2e/framework +// which has ginkgo v1/v2 compatibility issues +// All e2e logging methods automatically log to klog for structured logging +type e2eCompat struct{} + +var e2e = e2eCompat{} + +// Logf logs an info message to both klog and Ginkgo writer +func (e2eCompat) Logf(format string, args ...interface{}) { + klog.Infof(format, args...) + fmt.Fprintf(g.GinkgoWriter, format+"\n", args...) +} + +// Warningf logs a warning message to both klog and Ginkgo writer +func (e2eCompat) Warningf(format string, args ...interface{}) { + klog.Warningf(format, args...) + fmt.Fprintf(g.GinkgoWriter, "WARNING: "+format+"\n", args...) +} + +// Errorf logs an error message to both klog and Ginkgo writer (without failing the test) +func (e2eCompat) Errorf(format string, args ...interface{}) { + klog.Errorf(format, args...) + fmt.Fprintf(g.GinkgoWriter, "ERROR: "+format+"\n", args...) +} + +// Failf logs an error to klog and fails the test +func (e2eCompat) Failf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + klog.Errorf("TEST FAILED: %s", msg) + g.Fail(msg) +} + +// CLI represents an oc CLI interface for running commands +type CLI struct { + execPath string + kubeconfig string + namespace string + asAdmin bool + withNamespace bool +} + +// NewCLI creates a new CLI instance +func NewCLI(execPath string, kubeconfig string) *CLI { + return &CLI{ + execPath: execPath, + kubeconfig: kubeconfig, + asAdmin: false, + withNamespace: true, + } +} + +// NewCLIWithoutNamespace creates a new CLI instance without a default namespace +func NewCLIWithoutNamespace(kubeconfig string) *CLI { + return &CLI{ + execPath: "oc", + kubeconfig: kubeconfig, + asAdmin: false, + withNamespace: false, + } +} + +// KubeConfigPath returns the path to the kubeconfig file +func KubeConfigPath() string { + if kc := os.Getenv("KUBECONFIG"); kc != "" { + return kc + } + home, _ := os.UserHomeDir() + return filepath.Join(home, ".kube", "config") +} + +// AsAdmin returns a copy of the CLI with admin privileges +func (c *CLI) AsAdmin() *CLI { + newCLI := *c + newCLI.asAdmin = true + return &newCLI +} + +// WithoutNamespace returns a copy of the CLI without namespace set +func (c *CLI) WithoutNamespace() *CLI { + newCLI := *c + newCLI.withNamespace = false + return &newCLI +} + +// WithoutKubeconf returns a copy of the CLI without kubeconfig +func (c *CLI) WithoutKubeconf() *CLI { + newCLI := *c + newCLI.kubeconfig = "" + return &newCLI +} + +// WithKubectl returns a copy of the CLI that uses kubectl instead of oc +func (c *CLI) WithKubectl() *CLI { + newCLI := *c + newCLI.execPath = "kubectl" + return &newCLI +} + +// AdminKubeClient returns a kube client (stub for deployment polling) +func (c *CLI) AdminKubeClient() *dummyKubeClientComplete { return &dummyKubeClientComplete{} } + +// AsGuestKubeconf sets guest kubeconfig (stub for unused functions) +func (c *CLI) AsGuestKubeconf(path string) *CLI { return c } + +// CLICommand represents a command to be executed +type CLICommand struct { + cli *CLI + verb string + args []string +} + +// Run sets the verb for the CLI command +func (c *CLI) Run(verb string) *CLICommand { + return &CLICommand{ + cli: c, + verb: verb, + args: []string{}, + } +} + +// Args sets the arguments for the CLI command +func (cmd *CLICommand) Args(args ...string) *CLICommand { + cmd.args = append(cmd.args, args...) + return cmd +} + +// WithoutNamespace returns the command with namespace disabled +func (cmd *CLICommand) WithoutNamespace() *CLICommand { + newCmd := *cmd + newCLI := *cmd.cli + newCLI.withNamespace = false + newCmd.cli = &newCLI + return &newCmd +} + +// Execute runs the CLI command and returns an error if it fails +func (cmd *CLICommand) Execute() error { + _, err := cmd.Output() + return err +} + +// Output runs the CLI command and returns its output +func (cmd *CLICommand) Output() (string, error) { + args := []string{} + if cmd.verb != "" { + args = append(args, cmd.verb) + } + args = append(args, cmd.args...) + + // Add namespace flag if withNamespace is true and namespace is set + if cmd.cli.withNamespace && cmd.cli.namespace != "" { + args = append(args, "-n", cmd.cli.namespace) + } + + // Log the command being executed (similar to test-private client.go:1013) + var logParts []string + logParts = append(logParts, "oc") + + // Add namespace flag to log if present (test-private shows it as --namespace=X) + if cmd.cli.withNamespace && cmd.cli.namespace != "" { + logParts = append(logParts, fmt.Sprintf("--namespace=%s", cmd.cli.namespace)) + } + + // Add kubeconfig to log if present + if cmd.cli.kubeconfig != "" { + logParts = append(logParts, fmt.Sprintf("--kubeconfig=%s", cmd.cli.kubeconfig)) + } + + // Add the actual command args + logParts = append(logParts, strings.Join(args, " ")) + + e2e.Logf("Running '%s'", strings.Join(logParts, " ")) + + execCmd := exec.Command(cmd.cli.execPath, args...) + if cmd.cli.kubeconfig != "" { + execCmd.Env = append(os.Environ(), "KUBECONFIG="+cmd.cli.kubeconfig) + } + + output, err := execCmd.CombinedOutput() + if err != nil { + // Log the error output to help debug failures + e2e.Errorf("Command failed with error: %v\nOutput: %s", err, string(output)) + } + return string(output), err +} + +// Outputs runs the CLI command and returns stdout and stderr separately +func (cmd *CLICommand) Outputs() (string, string, error) { + args := []string{} + if cmd.verb != "" { + args = append(args, cmd.verb) + } + args = append(args, cmd.args...) + + // Add namespace flag if withNamespace is true and namespace is set + if cmd.cli.withNamespace && cmd.cli.namespace != "" { + args = append(args, "-n", cmd.cli.namespace) + } + + // Log the command being executed (similar to test-private client.go:1013) + var logParts []string + logParts = append(logParts, "oc") + + // Add namespace flag to log if present (test-private shows it as --namespace=X) + if cmd.cli.withNamespace && cmd.cli.namespace != "" { + logParts = append(logParts, fmt.Sprintf("--namespace=%s", cmd.cli.namespace)) + } + + // Add kubeconfig to log if present + if cmd.cli.kubeconfig != "" { + logParts = append(logParts, fmt.Sprintf("--kubeconfig=%s", cmd.cli.kubeconfig)) + } + + // Add the actual command args + logParts = append(logParts, strings.Join(args, " ")) + + e2e.Logf("Running '%s'", strings.Join(logParts, " ")) + + execCmd := exec.Command(cmd.cli.execPath, args...) + if cmd.cli.kubeconfig != "" { + execCmd.Env = append(os.Environ(), "KUBECONFIG="+cmd.cli.kubeconfig) + } + + var stdout, stderr strings.Builder + execCmd.Stdout = &stdout + execCmd.Stderr = &stderr + + err := execCmd.Run() + return stdout.String(), stderr.String(), err +} + +// OutputToFile runs the CLI command and writes output to a file +func (cmd *CLICommand) OutputToFile(filename string) (string, error) { + output, err := cmd.Output() + if err != nil { + return "", err + } + + filePath := filepath.Join("/tmp", filename) + err = os.WriteFile(filePath, []byte(output), 0644) + return filePath, err +} + +// Background runs the CLI command in the background +func (cmd *CLICommand) Background() (*exec.Cmd, *strings.Builder, *strings.Builder, error) { + args := []string{} + if cmd.verb != "" { + args = append(args, cmd.verb) + } + args = append(args, cmd.args...) + + // Add namespace flag if withNamespace is true and namespace is set + if cmd.cli.withNamespace && cmd.cli.namespace != "" { + args = append(args, "-n", cmd.cli.namespace) + } + + // Log the command being executed (similar to test-private client.go:1013) + var logParts []string + logParts = append(logParts, "oc") + + // Add namespace flag to log if present (test-private shows it as --namespace=X) + if cmd.cli.withNamespace && cmd.cli.namespace != "" { + logParts = append(logParts, fmt.Sprintf("--namespace=%s", cmd.cli.namespace)) + } + + // Add kubeconfig to log if present + if cmd.cli.kubeconfig != "" { + logParts = append(logParts, fmt.Sprintf("--kubeconfig=%s", cmd.cli.kubeconfig)) + } + + // Add the actual command args + logParts = append(logParts, strings.Join(args, " ")) + + e2e.Logf("Running '%s' in background", strings.Join(logParts, " ")) + + execCmd := exec.Command(cmd.cli.execPath, args...) + if cmd.cli.kubeconfig != "" { + execCmd.Env = append(os.Environ(), "KUBECONFIG="+cmd.cli.kubeconfig) + } + + var stdout, stderr strings.Builder + execCmd.Stdout = &stdout + execCmd.Stderr = &stderr + + err := execCmd.Start() + return execCmd, &stdout, &stderr, err +} + +// SetupProject creates a new project for the test +func (c *CLI) SetupProject() { + projectName := fmt.Sprintf("e2e-test-%s", GetRandomString()) + e2e.Logf("Creating project %q", projectName) + err := c.Run("new-project").Args(projectName, "--skip-config-write").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + c.namespace = projectName + e2e.Logf("Project %q has been created", projectName) + + g.DeferCleanup(func() { + e2e.Logf("Cleaning up project %q", projectName) + c.AsAdmin().WithoutNamespace().Run("delete").Args("project", projectName, "--wait=false").Execute() + }) +} + +// Namespace returns the current namespace +func (c *CLI) Namespace() string { + return c.namespace +} + +// Helper functions from compat_otp + +// FixturePath returns the path to test fixture files. +// This delegates to the testdata package which uses embedded go-bindata fixtures. +// Testdata files are embedded in the test binary at build time and extracted to +// a temporary directory at runtime, so they work regardless of where the binary executes. +func FixturePath(elem ...string) string { + return testdata.FixturePath(elem...) +} + +// AssertWaitPollNoErr asserts that a wait.Poll operation completed without error +func AssertWaitPollNoErr(err error, message string) { + if err == wait.ErrWaitTimeout { + e2e.Failf("%s: timed out waiting", message) + } + o.Expect(err).NotTo(o.HaveOccurred(), message) +} + +// GetRandomString generates a random string for unique naming +func GetRandomString() string { + const charset = "abcdefghijklmnopqrstuvwxyz0123456789" + seededRand := rand.New(rand.NewSource(time.Now().UnixNano())) + b := make([]byte, 8) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +// By prints a test step message using Ginkgo +func By(message string) { + e2e.Logf("STEP: %s", message) + g.By(message) +} + +// IsExternalOIDCCluster checks if the cluster is using external OIDC +func IsExternalOIDCCluster(c *CLI) (bool, error) { + output, err := c.AsAdmin().WithoutNamespace().Run("get").Args("authentication.config.openshift.io/cluster", "-o=jsonpath={.spec.type}").Output() + if err != nil { + return false, err + } + return strings.Contains(output, "OIDC"), nil +} + +// SkipIfPlatformTypeNot skips the test if platform type doesn't match +func SkipIfPlatformTypeNot(c *CLI, platformType string) { + platform := CheckPlatform(c) + if !strings.EqualFold(platform, platformType) { + skipMsg := fmt.Sprintf("Test requires platform type %s, but cluster is %s", platformType, platform) + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } +} + +// CheckPlatform returns the infrastructure platform type +func CheckPlatform(c *CLI) string { + output, err := c.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output() + if err != nil { + e2e.Warningf("Failed to get platform type: %v", err) + return "Unknown" + } + platform := strings.TrimSpace(output) + e2e.Logf("Cluster platform type: %s", platform) + return platform +} + +// SetNamespacePrivileged sets a namespace as privileged +func SetNamespacePrivileged(c *CLI, namespace string) { + e2e.Logf("Setting namespace %s as privileged", namespace) + err := c.AsAdmin().WithoutNamespace().Run("label").Args("namespace", namespace, "pod-security.kubernetes.io/enforce=privileged", "pod-security.kubernetes.io/audit=privileged", "pod-security.kubernetes.io/warn=privileged", "--overwrite").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) +} + +// GetClusterNodesBy returns nodes filtered by role (master, worker, etc.) +func GetClusterNodesBy(c *CLI, role string) ([]string, error) { + e2e.Logf("Getting cluster nodes with role: %s", role) + labelSelector := fmt.Sprintf("node-role.kubernetes.io/%s=", role) + output, err := c.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", labelSelector, "-o=jsonpath={.items[*].metadata.name}").Output() + if err != nil { + return nil, err + } + + nodes := strings.Fields(strings.TrimSpace(output)) + if len(nodes) == 0 { + return nil, fmt.Errorf("no %s nodes found", role) + } + e2e.Logf("Found %d %s node(s): %v", len(nodes), role, nodes) + return nodes, nil +} + +// GetMasterNodes returns master/control-plane nodes, trying both labels for compatibility +func GetMasterNodes(c *CLI) ([]string, error) { + e2e.Logf("Getting master/control-plane nodes") + // Try "master" label first (older clusters) + nodes, err := GetClusterNodesBy(c, "master") + if err == nil && len(nodes) > 0 { + e2e.Logf("Found master nodes using 'master' label") + return nodes, nil + } + + // Fallback to "control-plane" label (newer Kubernetes versions) + e2e.Logf("No master nodes found, trying 'control-plane' label") + nodes, err = GetClusterNodesBy(c, "control-plane") + if err == nil && len(nodes) > 0 { + e2e.Logf("Found master nodes using 'control-plane' label") + return nodes, nil + } + + return nil, fmt.Errorf("no master or control-plane nodes found") +} + +// DebugNodeWithOptionsAndChroot runs a debug session on a node with chroot +func DebugNodeWithOptionsAndChroot(c *CLI, nodeName string, options []string, command string, args ...string) (string, error) { + e2e.Logf("Running debug command on node %s", nodeName) + debugArgs := []string{"node/" + nodeName} + debugArgs = append(debugArgs, options...) + + fullCommand := command + if len(args) > 0 { + fullCommand = fmt.Sprintf("%s %s", command, strings.Join(args, " ")) + } + debugArgs = append(debugArgs, "--", "chroot", "/host", "sh", "-c", fullCommand) + + // Use "default" namespace explicitly to avoid issues with non-existent context namespaces + // The debug pod will be created in the default namespace, but it debugs the node itself + return c.AsAdmin().Run("debug").Args(append([]string{"-n", "default"}, debugArgs...)...).Output() +} + +// DebugNodeWithChroot runs a debug session on a node with chroot (simpler version) + +// AssertPodToBeReady waits for a pod to be ready +func AssertPodToBeReady(c *CLI, podName string, namespace string) { + e2e.Logf("Waiting for pod %s to be ready in namespace %s", podName, namespace) + err := wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { + output, err := c.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-n", namespace, "-o=jsonpath={.status.conditions[?(@.type=='Ready')].status}").Output() + if err != nil { + return false, nil + } + if strings.TrimSpace(output) == "True" { + e2e.Logf("Pod %s in namespace %s is ready", podName, namespace) + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Pod %s in namespace %s failed to become ready", podName, namespace)) +} + +// RemoteShPod runs a shell command in a pod + +// Architecture types and functions + +// Architecture type constants +type ArchitectureType string + +const ( + MULTI ArchitectureType = "Multi" + X86 = "amd64" + ARM64 = "arm64" + PPC64LE = "ppc64le" + S390X = "s390x" +) + +// SkipArchitectures skips the test if the cluster architecture matches +func SkipArchitectures(c *CLI, skipArch ArchitectureType) { + clusterArch := ClusterArchitecture(c) + if clusterArch == string(skipArch) { + skipMsg := fmt.Sprintf("Test not applicable for architecture: %s", skipArch) + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } +} + +// ClusterArchitecture returns the cluster architecture +func ClusterArchitecture(c *CLI) string { + e2e.Logf("Detecting cluster architecture") + output, err := c.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.cpuArchitecture}").Output() + if err != nil { + e2e.Warningf("Failed to get cluster architecture: %v", err) + return "unknown" + } + arch := strings.TrimSpace(output) + + // Check if multi-arch + nodesOutput, err := c.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-o=jsonpath={.items[*].status.nodeInfo.architecture}").Output() + if err == nil { + archs := strings.Fields(nodesOutput) + archMap := make(map[string]bool) + for _, a := range archs { + archMap[a] = true + } + if len(archMap) > 1 { + e2e.Logf("Cluster architecture: Multi-arch") + return string(MULTI) + } + } + + e2e.Logf("Cluster architecture: %s", arch) + return arch +} + +// ClusterInfra platform types and functions + +// Platform type constants +type PlatformType string + +const ( + AWS PlatformType = "AWS" + Azure = "Azure" + GCP = "GCP" + VSphere = "VSphere" + Nutanix = "Nutanix" + IBMCloud = "IBMCloud" + AlibabaCloud = "AlibabaCloud" +) + +// SkipTestIfSupportedPlatformNotMatched skips test if platform doesn't match +func SkipTestIfSupportedPlatformNotMatched(c *CLI, supportedPlatforms ...PlatformType) { + currentPlatform := CheckPlatform(c) + + for _, platform := range supportedPlatforms { + if strings.EqualFold(currentPlatform, string(platform)) { + return // Platform matches, don't skip + } + } + + // No match found, skip the test + skipMsg := fmt.Sprintf("Test not applicable for platform: %s", currentPlatform) + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) +} + +// ControlplaneInfo ... +type ControlplaneInfo struct { + HolderIdentity string `json:"holderIdentity"` + LeaseDurationSeconds int `json:"leaseDurationSeconds"` + AcquireTime string `json:"acquireTime"` + RenewTime string `json:"renewTime"` + LeaderTransitions int `json:"leaderTransitions"` +} + +type serviceInfo struct { + serviceIP string + namespace string + servicePort string + serviceURL string + serviceName string +} + +type registry struct { + dockerImage string + namespace string +} + +type podMirror struct { + name string + namespace string + cliImageID string + imagePullSecret string + imageSource string + imageTo string + imageToRelease string + template string +} + +type debugPodUsingDefinition struct { + name string + namespace string + cliImageID string + template string +} + +type priorityPod struct { + dName string + namespace string + replicaSum int + template string +} + +func getRandomString() string { + chars := "abcdefghijklmnopqrstuvwxyz0123456789" + seed := rand.New(rand.NewSource(time.Now().UnixNano())) + buffer := make([]byte, 8) + for index := range buffer { + buffer[index] = chars[seed.Intn(len(chars))] + } + return string(buffer) +} + +func (registry *registry) createregistry(oc *CLI) serviceInfo { + e2e.Logf("Creating registry server from image %s in namespace %s", registry.dockerImage, registry.namespace) + err := oc.AsAdmin().WithoutNamespace().Run("new-app").Args("--image", registry.dockerImage, "REGISTRY_STORAGE_DELETE_ENABLED=true", "--import-mode=PreserveOriginal", "-n", registry.namespace).Execute() + if err != nil { + e2e.Failf("Failed to create the registry server: %v", err) + } + err = oc.AsAdmin().WithoutNamespace().Run("set").Args("probe", "deploy/registry", "--readiness", "--liveness", "--get-url="+"http://:5000/v2", "-n", registry.namespace).Execute() + if err != nil { + e2e.Failf("Failed to config the registry: %v", err) + } + e2e.Logf("Waiting for registry pods to be running in namespace %s", registry.namespace) + if ok := waitForAvailableRsRunning(oc, "deployment", "registry", registry.namespace, "1"); ok { + e2e.Logf("Registry pods are running in namespace %s", registry.namespace) + } else { + e2e.Failf("private registry pod is not running even afer waiting for about 3 minutes") + } + + e2e.Logf("Getting service info for the registry in namespace %s", registry.namespace) + regSvcIP, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", "registry", "-n", registry.namespace, "-o=jsonpath={.spec.clusterIP}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + _, err = oc.AsAdmin().WithoutNamespace().Run("create").Args("route", "edge", "my-route", "--service=registry", "-n", registry.namespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + regSvcPort, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("svc", "registry", "-n", registry.namespace, "-o=jsonpath={.spec.ports[0].port}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + regRoute, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("route", "my-route", "-n", registry.namespace, "-o=jsonpath={.spec.host}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + regSvcURL := regSvcIP + ":" + regSvcPort + svc := serviceInfo{ + serviceIP: regSvcIP, + namespace: registry.namespace, + servicePort: regSvcPort, + serviceURL: regSvcURL, + serviceName: regRoute, + } + return svc + +} + +func (registry *registry) deleteregistry(oc *CLI) { + e2e.Logf("Deleting registry resources in namespace %s", registry.namespace) + _ = oc.WithoutNamespace().Run("delete").Args("svc", "registry", "-n", registry.namespace).Execute() + _ = oc.WithoutNamespace().Run("delete").Args("deploy", "registry", "-n", registry.namespace).Execute() + _ = oc.WithoutNamespace().Run("delete").Args("is", "registry", "-n", registry.namespace).Execute() +} + +func (pod *podMirror) createPodMirror(oc *CLI) { + err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { + err1 := nonAdminApplyResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "CLIIMAGEID="+pod.cliImageID, "IMAGEPULLSECRET="+pod.imagePullSecret, "IMAGESOURCE="+pod.imageSource, "IMAGETO="+pod.imageTo, "IMAGETORELEASE="+pod.imageToRelease) + if err1 != nil { + e2e.Logf("the err:%v, and try next round", err1) + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("pod %s with %s is not created successfully", pod.name, pod.cliImageID)) +} + +func getCliImage(oc *CLI) string { + e2e.Logf("Getting CLI image from openshift namespace") + cliImage, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("imagestreams", "cli", "-n", "openshift", "-o=jsonpath={.spec.tags[0].from.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("CLI image: %s", cliImage) + return cliImage +} + +func checkMustgatherPodNode(oc *CLI) { + var nodeNameList []string + e2e.Logf("Get the node list of the must-gather pods running on") + err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { + output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-l", "app=must-gather", "-A", "-o=jsonpath={.items[*].spec.nodeName}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + nodeNameList = strings.Fields(output) + if nodeNameList == nil { + e2e.Logf("Can't find must-gather pod now, and try next round") + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("must-gather pod is not created successfully")) + e2e.Logf("must-gather scheduled on: %v", nodeNameList) + + e2e.Logf("make sure all the nodes in nodeNameList are not windows node") + expectedNodeLabels := getScanNodesLabels(oc, nodeNameList, "windows") + if expectedNodeLabels == nil { + e2e.Logf("must-gather scheduled as expected, no windows node found in the cluster") + } else { + e2e.Failf("Scheduled the must-gather pod to windows node: %v", expectedNodeLabels) + } +} + +func (pod *debugPodUsingDefinition) createDebugPodUsingDefinition(oc *CLI) { + err := wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { + outputFile, err1 := applyResourceFromTemplate48681(oc, "--ignore-unknown-parameters=true", "-f", pod.template, "-p", "NAME="+pod.name, "NAMESPACE="+pod.namespace, "CLIIMAGEID="+pod.cliImageID) + if err1 != nil { + e2e.Logf("the err:%v, and try next round", err1) + return false, nil + } + e2e.Logf("Waiting for pod running") + err := wait.PollImmediate(5*time.Second, 1*time.Minute, func() (bool, error) { + phase, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", pod.name, "--template", "{{.status.phase}}", "-n", pod.namespace).Output() + if err != nil { + return false, nil + } + if phase != "Running" { + return false, nil + } + return true, nil + }) + if err != nil { + e2e.Logf("Error waiting for pod to be in 'Running' phase: %v", err) + return false, nil + } + + debugPod, err := oc.Run("debug").Args("-f", outputFile).Output() + if err != nil { + e2e.Logf("Error running 'debug' command: %v", err) + return false, nil + } + if match, _ := regexp.MatchString("Starting pod/pod48681-debug", debugPod); !match { + e2e.Failf("Image debug container is being started instead of debug pod using the pod definition yaml file") + } + return true, nil + }) + if err != nil { + e2e.Failf("Error creating debug pod: %v", err) + } +} + +func createDeployment(oc *CLI, namespace string, deployname string) { + err := oc.WithoutNamespace().Run("create").Args("-n", namespace, "deployment", deployname, "--image=quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83", "--replicas=20").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) +} + +func triggerSucceedDeployment(oc *CLI, namespace string, deployname string, num int, expectedPods int) { + var generation string + var getGenerationerr error + err := wait.Poll(3*time.Second, 60*time.Second, func() (bool, error) { + generation, getGenerationerr = oc.AsAdmin().WithoutNamespace().Run("get").Args("deploy", deployname, "-n", namespace, "-o=jsonpath={.status.observedGeneration}").Output() + if getGenerationerr != nil { + e2e.Logf("Err Occurred, try again: %v", getGenerationerr) + return false, nil + } + if generation == "" { + e2e.Logf("Can't get generation, try again: %v", generation) + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Failed to get generation ")) + + generationNum, err := strconv.Atoi(generation) + o.Expect(err).NotTo(o.HaveOccurred()) + for i := 0; i < num; i++ { + generationNum++ + err := oc.WithoutNamespace().Run("set").Args("-n", namespace, "env", "deployment", deployname, "paramtest=test"+strconv.Itoa(i)).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + _, currentRsName := getCurrentRs(oc, namespace, "app="+deployname, generationNum) + err = wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) { + availablePodNum, errGet := oc.WithoutNamespace().Run("get").Args("-n", namespace, "rs", currentRsName, "-o=jsonpath='{.status.availableReplicas}'").Output() + if errGet != nil { + e2e.Logf("Err Occurred: %v", errGet) + return false, errGet + } + availableNum, _ := strconv.Atoi(strings.ReplaceAll(availablePodNum, "'", "")) + if availableNum != expectedPods { + e2e.Logf("new triggered apps not deploy successfully, wait more times") + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("failed to deploy %v", deployname)) + + } +} +func triggerFailedDeployment(oc *CLI, namespace string, deployname string) { + patchYaml := `[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "quay.io/openshifttest/hello-openshift:nonexist"}]` + err := oc.WithoutNamespace().Run("patch").Args("-n", namespace, "deployment", deployname, "--type=json", "-p", patchYaml).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) +} + +func getShouldPruneRSFromPrune(oc *CLI, pruneRsNumCMD string, pruneRsCMD string, prunedNum int) []string { + e2e.Logf("Get pruned rs name by dry-run") + e2e.Logf("pruneRsNumCMD %v:", pruneRsNumCMD) + err := wait.Poll(5*time.Second, 300*time.Second, func() (bool, error) { + pruneRsNum, err := exec.Command("bash", "-c", pruneRsNumCMD).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + pruneNum, err := strconv.Atoi(strings.ReplaceAll(string(pruneRsNum), "\n", "")) + o.Expect(err).NotTo(o.HaveOccurred()) + if pruneNum != prunedNum { + e2e.Logf("pruneNum is not equal %v: ", prunedNum) + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Check pruned RS failed")) + + e2e.Logf("pruneRsCMD %v:", pruneRsCMD) + pruneRsName, err := exec.Command("bash", "-c", pruneRsCMD).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + pruneRsList := strings.Fields(strings.ReplaceAll(string(pruneRsName), "\n", " ")) + sort.Strings(pruneRsList) + e2e.Logf("pruneRsList %v:", pruneRsList) + return pruneRsList +} + +func getCompeletedRsInfo(oc *CLI, namespace string, deployname string) (completedRsList []string, completedRsNum int) { + out, err := oc.WithoutNamespace().Run("get").Args("-n", namespace, "rs", "--sort-by={.metadata.creationTimestamp}", "-o=jsonpath='{.items[?(@.spec.replicas == 0)].metadata.name}'").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("string out %v:", out) + totalCompletedRsList := strings.Fields(strings.ReplaceAll(out, "'", "")) + totalCompletedRsListNum := len(totalCompletedRsList) + return totalCompletedRsList, totalCompletedRsListNum +} + +func getShouldPruneRSFromCreateTime(totalCompletedRsList []string, totalCompletedRsListNum int, keepNum int) []string { + rsList := totalCompletedRsList[0:(totalCompletedRsListNum - keepNum)] + sort.Strings(rsList) + e2e.Logf("rsList %v:", rsList) + return rsList + +} + +func comparePrunedRS(rsList []string, pruneRsList []string) bool { + e2e.Logf("Check pruned rs whether right") + if !reflect.DeepEqual(rsList, pruneRsList) { + return false + } + return true +} + +func checkRunningRsList(oc *CLI, namespace string, deployname string) []string { + e2e.Logf("Get all the running RSs") + out, err := oc.WithoutNamespace().Run("get").Args("-n", namespace, "rs", "-o=jsonpath='{.items[?(@.spec.replicas > 0)].metadata.name}'").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + runningRsList := strings.Fields(strings.ReplaceAll(out, "'", "")) + sort.Strings(runningRsList) + e2e.Logf("runningRsList %v:", runningRsList) + return runningRsList +} + +func pruneCompletedRs(oc *CLI, parameters ...string) { + e2e.Logf("Delete all the completed RSs") + err := oc.AsAdmin().WithoutNamespace().Run("adm").Args(parameters...).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) +} + +func getRemainingRs(oc *CLI, namespace string, deployname string) []string { + e2e.Logf("Get all the remaining RSs") + remainRs, err := oc.WithoutNamespace().Run("get").Args("rs", "-l", "app="+deployname, "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + remainRsList := strings.Fields(string(remainRs)) + sort.Strings(remainRsList) + e2e.Logf("remainRsList %v:", remainRsList) + return remainRsList +} + +func checkPodStatus(oc *CLI, podLabel string, namespace string, expected string) { + err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) { + output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", podLabel, "-o=jsonpath={.items[*].status.phase}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("the result of pod:%v", output) + if strings.Contains(output, expected) && (!(strings.Contains(strings.ToLower(output), "error"))) && (!(strings.Contains(strings.ToLower(output), "crashLoopbackOff"))) { + return true, nil + } + return false, nil + }) + if err != nil { + oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", podLabel, "-o", "yaml").Execute() + } + AssertWaitPollNoErr(err, fmt.Sprintf("the state of pod with %s is not expected %s", podLabel, expected)) +} + +func checkNetworkType(oc *CLI) string { + e2e.Logf("Checking cluster network type") + output, _ := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.defaultNetwork.type}").Output() + networkType := strings.ToLower(output) + e2e.Logf("Cluster network type: %s", networkType) + return networkType +} + +func getLatestPayload(url string) string { + res, err := http.Get(url) + if err != nil { + e2e.Failf("unable to get http with error: %v", err) + } + body, err := ioutil.ReadAll(res.Body) + defer res.Body.Close() + if err != nil { + e2e.Failf("unable to parse the http result with error: %v", err) + } + + var data map[string]interface{} + if err := json.Unmarshal(body, &data); err != nil { + e2e.Failf("unable to parse JSON with error: %v", err) + } + pullSpec, _ := data["pullSpec"].(string) + return pullSpec +} + +func assertPodOutput(oc *CLI, podLabel string, namespace string, expected string) { + err := wait.PollUntilContextTimeout(context.Background(), 1*time.Minute, 10*time.Minute, true, func(ctx context.Context) (bool, error) { + podStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", "-n", namespace, "-l", podLabel).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("the result of pod:%v", podStatus) + if strings.Contains(podStatus, expected) { + return true, nil + } else { + podDesp, err := oc.AsAdmin().WithoutNamespace().Run("describe").Args("pods", "-n", namespace, "-l", podLabel).Output() + e2e.Logf("the details of pod: %v", podDesp) + o.Expect(err).NotTo(o.HaveOccurred()) + return false, nil + } + }) + AssertWaitPollNoErr(err, fmt.Sprintf("the state of pod with %s is not expected %s", podLabel, expected)) +} + +// this function is used to check whether proxy is configured or not +// As restart the microshift service, the debug node pod will quit with error + +// get cluster resource name list +// Check if BaselineCapabilities have been set to None +func isBaselineCapsSet(oc *CLI, component string) bool { + baselineCapabilitySet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.spec.capabilities.baselineCapabilitySet}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("baselineCapabilitySet parameters: %v\n", baselineCapabilitySet) + return strings.Contains(baselineCapabilitySet, component) +} + +// Check if component is listed in clusterversion.status.capabilities.enabledCapabilities +func isEnabledCapability(oc *CLI, component string) bool { + enabledCapabilities, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o=jsonpath={.items[*].status.capabilities.enabledCapabilities}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Cluster enabled capability parameters: %v\n", enabledCapabilities) + return strings.Contains(enabledCapabilities, component) +} + +// this function is used to check whether openshift-samples installed or not +// WaitForDeploymentPodsToBeReady waits for the specific deployment to be ready +// make sure the PVC is Bound to the PV +// wait for DC to be ready +func getClusterRegion(oc *CLI) string { + e2e.Logf("Getting cluster region") + node := getWorkersList(oc)[0] + region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", node, "-o=jsonpath={.metadata.labels.failure-domain\\.beta\\.kubernetes\\.io\\/region}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Cluster region: %s", region) + return region +} + +// skipIfDisconnected skips the test if the cluster is disconnected/airgapped. +// This is useful for ConnectedOnly tests that require external network access. +// Works across all platforms (AWS, Azure, GCP, bare metal, etc.) +// Uses multiple detection methods: +// 1. Quick check: AWS C2S/SC2S regions (us-iso prefix) +// 2. Actual connectivity test: curl to quay.io from worker node +func skipIfDisconnected(oc *CLI) { + e2e.Logf("Checking if cluster is disconnected") + + // Fast path: Check for AWS C2S/SC2S disconnected regions + region := getClusterRegion(oc) + if strings.HasPrefix(region, "us-iso") { + skipMsg := fmt.Sprintf("Skipping ConnectedOnly test: AWS C2S/SC2S disconnected region (%s)", region) + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + // Actual connectivity test: Try to reach public internet from worker node + e2e.Logf("Testing actual connectivity to public internet") + workerNodes := getWorkersList(oc) + if len(workerNodes) == 0 { + e2e.Logf("Warning: No worker nodes found, assuming cluster is connected") + return + } + workNode := workerNodes[0] + + curlCMD := "curl -I https://quay.io --connect-timeout 10" + output, err := DebugNodeWithOptionsAndChroot(oc, workNode, []string{}, curlCMD) + + if !strings.Contains(output, "HTTP") || err != nil { + skipMsg := "Skipping ConnectedOnly test: cluster cannot access public internet (disconnected/airgapped)" + e2e.Logf("Unable to access quay.io from worker node %s. Output: %s, Error: %v", workNode, output, err) + e2e.Warningf("SKIPPING TEST: %s", skipMsg) + g.Skip(skipMsg) + } + + e2e.Logf("Successfully verified cluster has public internet connectivity (quay.io accessible) - test will proceed") +} + +func assertPullSecret(oc *CLI) bool { + dirName := "/tmp/" + GetRandomString() + err := os.MkdirAll(dirName, 0o755) + o.Expect(err).NotTo(o.HaveOccurred()) + defer os.RemoveAll(dirName) + err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--to", dirName, "--confirm").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + oauthFilePath := dirName + "/.dockerconfigjson" + secretContent, err := exec.Command("bash", "-c", fmt.Sprintf("cat %v", oauthFilePath)).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if matched, _ := regexp.MatchString("registry.ci.openshift.org", string(secretContent)); !matched { + return false + } else { + return true + } +} + +func getSpecificFileName(fileDir string, pattern string) []string { + files, err := ioutil.ReadDir(fileDir) + o.Expect(err).NotTo(o.HaveOccurred()) + + var matchingFiles []string + e2e.Logf("the origin files %v", files) + for _, file := range files { + match, err := regexp.MatchString(pattern, string(file.Name())) + o.Expect(err).NotTo(o.HaveOccurred()) + if match { + matchingFiles = append(matchingFiles, string(file.Name())) + } + } + e2e.Logf("the result files %v", matchingFiles) + o.Expect(len(matchingFiles) > 0).To(o.BeTrue()) + return matchingFiles +} + +func sha256File(fileName string) (string, error) { + file, err := os.Open(fileName) + o.Expect(err).NotTo(o.HaveOccurred()) + defer file.Close() + hash := sha256.New() + _, err = io.Copy(hash, file) + o.Expect(err).NotTo(o.HaveOccurred()) + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getSha256SumFromFile(fileName string) string { + var fileSum string + content, err := ioutil.ReadFile(fileName) + o.Expect(err).NotTo(o.HaveOccurred()) + lines := strings.Split(string(content), "\n") + for _, v := range lines { + trimline := strings.TrimSpace(v) + if strings.Contains(trimline, "openshift-install") { + fileSum = strings.Fields(trimline)[0] + o.Expect(fileSum).NotTo(o.BeEmpty()) + } + } + return fileSum +} + +func waitCRDAvailable(oc *CLI, crdName string) error { + e2e.Logf("Waiting for CRD %s to be available", crdName) + err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { + err := oc.AsAdmin().WithoutNamespace().Run("get").Args("crd", crdName).Execute() + if err != nil { + e2e.Logf("The crd with name %v still not ready, please try again", crdName) + return false, nil + } + e2e.Logf("CRD %s is now available", crdName) + return true, nil + }) + return err +} + +func waitCreateCr(oc *CLI, crFileName string, namespace string) error { + e2e.Logf("Waiting to create CR from file %s in namespace %s", crFileName, namespace) + err := wait.Poll(20*time.Second, 300*time.Second, func() (bool, error) { + err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", crFileName, "-n", namespace).Execute() + if err != nil { + e2e.Logf("The cr with file %v created failed, please try again", crFileName) + return false, nil + } + e2e.Logf("CR from file %s created successfully in namespace %s", crFileName, namespace) + return true, nil + }) + return err +} + +// CatalogResponse matches the JSON structure of the /v2/_catalog endpoint. +type CatalogResponse struct { + Repositories []string `json:"repositories"` +} + +// TagsResponse matches the JSON structure of the /v2//tags/list endpoint. +type TagsResponse struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +func createEmptyAuth(authfilepath string) { + authF, err := os.Create(authfilepath) + o.Expect(err).NotTo(o.HaveOccurred()) + defer authF.Close() + authContent := fmt.Sprintf(`{}`) + authW := bufio.NewWriter(authF) + _, werr := authW.WriteString(authContent) + authW.Flush() + o.Expect(werr).NotTo(o.HaveOccurred()) +} + +func checkFileContent(filename string, expectedStr string) bool { + b, err := ioutil.ReadFile(filename) + if err != nil { + e2e.Failf("failed to read the file ") + } + s := string(b) + if strings.Contains(s, expectedStr) { + return true + } else { + return false + } +} + +func checkOcPlatform(oc *CLI) string { + ocVersion, err := oc.Run("version").Args("--client", "-o", "yaml").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if strings.Contains(ocVersion, "amd64") { + return "amd64" + } else if strings.Contains(ocVersion, "arm64") { + return "arm64" + } else if strings.Contains(ocVersion, "s390x") { + return "s390x" + } else if strings.Contains(ocVersion, "ppc64le") { + return "ppc64le" + } else { + return "Unknown platform" + } + +} + +type AuthEntry struct { + Auth string `json:"auth"` +} +type AuthsData struct { + Auths map[string]AuthEntry `json:"auths"` +} + +func waitForAvailableRsRunning(oc *CLI, resourceType string, resourceName string, namespace string, expectedReplicas string) bool { + e2e.Logf("Waiting for %s %s in namespace %s to have %s available replicas", resourceType, resourceName, namespace, expectedReplicas) + err := wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { + output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(resourceType, resourceName, "-n", namespace, "-o=jsonpath={.status.availableReplicas}").Output() + if err != nil { + return false, nil + } + if strings.TrimSpace(output) == expectedReplicas { + e2e.Logf("%s %s in namespace %s has reached %s available replicas", resourceType, resourceName, namespace, expectedReplicas) + return true, nil + } + return false, nil + }) + return err == nil +} + +// Dummy client types for stub methods +// Expanded dummy types with required methods +// Update dummy client methods to return proper types +// Dummy resource types with Get method +type dummyCoreV1 struct{} + +func (d *dummyCoreV1) Pods(string) interface{} { return nil } + +// Update dummy types to return proper resource types +type dummyAppsV1Updated struct{} + +// Dummy deployment and statefulset types with Spec and Status +type dummyDeploymentSpec struct { + Replicas *int32 +} +type dummyDeploymentStatus struct { + Replicas int32 + UpdatedReplicas int32 + AvailableReplicas int32 +} +type dummyDeployment struct { + Spec dummyDeploymentSpec + Status dummyDeploymentStatus +} + +type dummyStatefulSetStatus struct { + Replicas int32 +} +type dummyStatefulSet struct { + Status dummyStatefulSetStatus +} + +// Update resource getter to return proper types +type dummyDeploymentsFinal struct{} + +type dummyStatefulSetsFinal struct{} + +// Add missing fields and methods +type dummyStatefulSetSpec struct { + Replicas *int32 +} +type dummyStatefulSetStatusFinal struct { + Replicas int32 + ReadyReplicas int32 +} +type dummyStatefulSetFinal struct { + Spec dummyStatefulSetSpec + Status dummyStatefulSetStatusFinal +} + +type dummyPods struct{} + +func (d *dummyPods) List(context.Context, metav1.ListOptions) (interface{}, error) { return nil, nil } + +type dummyCoreV1Final struct{} + +func (d *dummyCoreV1Final) Pods(string) *dummyPods { return &dummyPods{} } + +type dummyAppsV1Ultimate struct{} + +// Final missing fields +type dummyPodList struct { + Items []interface{} +} + +type dummyKubeClientComplete struct{} + +func (d *dummyKubeClientComplete) AppsV1() *dummyAppsV1Ultimate { return &dummyAppsV1Ultimate{} } + +func (d *dummyAppsV1Ultimate) Deployments(string) *dummyDeploymentsFinal { + return &dummyDeploymentsFinal{} +} + +func (d *dummyDeploymentsFinal) Get(ctx context.Context, name string, opts metav1.GetOptions) (*dummyDeployment, error) { + return &dummyDeployment{ + Spec: dummyDeploymentSpec{ + Replicas: new(int32), + }, + Status: dummyDeploymentStatus{ + AvailableReplicas: 0, + }, + }, nil +} + +// Helper functions for internal use +func applyResourceFromTemplate(oc *CLI, args ...string) error { + e2e.Logf("Processing and applying template with args: %v", args) + output, err := oc.Run("process").Args(args...).Output() + if err != nil { + e2e.Errorf("Failed to process template: %v", err) + return err + } + + // Apply the processed template + e2e.Logf("Applying processed template") + e2e.Logf("Processed template content:\n%s", output) + cmd := exec.Command(oc.execPath, "apply", "-f", "-") + cmd.Stdin = strings.NewReader(output) + if oc.namespace != "" { + cmd.Args = append(cmd.Args, "-n", oc.namespace) + } + if oc.kubeconfig != "" { + cmd.Env = append(os.Environ(), "KUBECONFIG="+oc.kubeconfig) + } + applyOutput, err := cmd.CombinedOutput() + if err != nil { + e2e.Errorf("Failed to apply template: %v\nCommand output: %s\nTemplate was:\n%s", err, string(applyOutput), output) + } + return err +} + +func applyResourceFromTemplate48681(oc *CLI, args ...string) (string, error) { + e2e.Logf("Processing template with args: %v", args) + output, err := oc.Run("process").Args(args...).Output() + if err != nil { + e2e.Errorf("Failed to process template: %v", err) + return "", err + } + + // Create temp file with output + tmpFile := filepath.Join(os.TempDir(), fmt.Sprintf("resource-%d.yaml", time.Now().UnixNano())) + if err := ioutil.WriteFile(tmpFile, []byte(output), 0644); err != nil { + return "", err + } + + // Apply the processed template + e2e.Logf("Applying processed template from file: %s", tmpFile) + cmd := exec.Command(oc.execPath, "apply", "-f", tmpFile) + if oc.namespace != "" { + cmd.Args = append(cmd.Args, "-n", oc.namespace) + } + if oc.kubeconfig != "" { + cmd.Env = append(os.Environ(), "KUBECONFIG="+oc.kubeconfig) + } + output2, err := cmd.CombinedOutput() + if err != nil { + e2e.Errorf("Failed to apply template: %v, output: %s", err, string(output2)) + return "", err + } + return tmpFile, nil +} + +func nonAdminApplyResourceFromTemplate(oc *CLI, args ...string) error { + return applyResourceFromTemplate(oc, args...) +} + +func getScanNodesLabels(oc *CLI, nodeList []string, expected string) []string { + var matchedLabelsNodeNames []string + for _, nodeName := range nodeList { + nodeLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.metadata.labels}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if matched, _ := regexp.MatchString(expected, nodeLabels); matched { + matchedLabelsNodeNames = append(matchedLabelsNodeNames, nodeName) + } + } + return matchedLabelsNodeNames +} + +func getCurrentRs(oc *CLI, namespace string, selector string, generation int) (int, string) { + // Get the current ReplicaSet matching the selector + output, err := oc.WithoutNamespace().Run("get").Args("-n", namespace, "rs", "-l", selector, "-o", "jsonpath={.items[?(@.metadata.annotations.deployment\\.kubernetes\\.io/revision==\""+strconv.Itoa(generation)+"\")].metadata.name}").Output() + if err != nil { + return 0, "" + } + rsName := strings.TrimSpace(output) + if rsName == "" { + // Fallback to getting latest rs + output, err = oc.WithoutNamespace().Run("get").Args("-n", namespace, "rs", "-l", selector, "--sort-by=.metadata.creationTimestamp", "-o", "jsonpath={.items[-1].metadata.name}").Output() + if err != nil { + return 0, "" + } + rsName = strings.TrimSpace(output) + } + return generation, rsName +} + +func copyFile(src, dst string) error { + sourceFile, err := os.Open(src) + if err != nil { + return err + } + defer sourceFile.Close() + + destFile, err := os.Create(dst) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, sourceFile) + return err +} + +func getWorkersList(oc *CLI) []string { + e2e.Logf("Getting list of worker nodes") + nodes, err := GetClusterNodesBy(oc, "worker") + if err != nil { + e2e.Warningf("Failed to get worker nodes: %v", err) + return []string{} + } + e2e.Logf("Found %d worker node(s)", len(nodes)) + return nodes +} diff --git a/test/testdata/fixtures.go b/test/testdata/fixtures.go new file mode 100644 index 0000000000..397dad0abd --- /dev/null +++ b/test/testdata/fixtures.go @@ -0,0 +1,129 @@ +package testdata + +import ( + "embed" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" +) + +//go:embed oc_cli +var embeddedFixtures embed.FS + +var fixtureDir string + +func init() { + var err error + // Create a temporary directory for extracted fixtures + fixtureDir, err = os.MkdirTemp("", "oc-testdata-fixtures-") + if err != nil { + panic(fmt.Sprintf("failed to create fixture directory: %v", err)) + } +} + +// FixturePath returns the filesystem path to a fixture file or directory, extracting it from +// embedded files if necessary. The relativePath should be like "testdata/oc_cli/file.yaml" or "oc_cli/file.yaml" +func FixturePath(elem ...string) string { + relativePath := filepath.Join(elem...) + + // Normalize the path for embed.FS (always use forward slashes, remove testdata/ prefix) + embedPath := strings.ReplaceAll(relativePath, string(filepath.Separator), "/") + embedPath = strings.TrimPrefix(embedPath, "testdata/") + + // Target path in temp directory + targetPath := filepath.Join(fixtureDir, relativePath) + + // Check if already extracted + if _, err := os.Stat(targetPath); err == nil { + return targetPath + } + + // Check if this is a directory or file in embed.FS + info, err := fs.Stat(embeddedFixtures, embedPath) + if err != nil { + panic(fmt.Sprintf("failed to stat embedded path %s: %v", embedPath, err)) + } + + if info.IsDir() { + // It's a directory - extract all files recursively + err := fs.WalkDir(embeddedFixtures, embedPath, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Calculate target path + relPath := strings.TrimPrefix(path, embedPath) + relPath = strings.TrimPrefix(relPath, "/") + target := filepath.Join(targetPath, relPath) + + if d.IsDir() { + // Create directory + return os.MkdirAll(target, 0700) + } + + // Create parent directory + if err := os.MkdirAll(filepath.Dir(target), 0700); err != nil { + return err + } + + // Read and write file + data, err := embeddedFixtures.ReadFile(path) + if err != nil { + return err + } + return os.WriteFile(target, data, 0644) + }) + if err != nil { + panic(fmt.Sprintf("failed to extract directory %s: %v", embedPath, err)) + } + } else { + // It's a file + // Create parent directory + if err := os.MkdirAll(filepath.Dir(targetPath), 0700); err != nil { + panic(fmt.Sprintf("failed to create directory for %s: %v", relativePath, err)) + } + + // Read from embedded FS + data, err := embeddedFixtures.ReadFile(embedPath) + if err != nil { + panic(fmt.Sprintf("failed to read embedded file %s: %v", embedPath, err)) + } + + // Write to temp directory + if err := os.WriteFile(targetPath, data, 0644); err != nil { + panic(fmt.Sprintf("failed to write fixture file %s: %v", targetPath, err)) + } + } + + return targetPath +} + +// GetFixtureDir returns the temporary directory where fixtures are extracted +func GetFixtureDir() string { + return fixtureDir +} + +// CleanupFixtures removes the temporary fixture directory +func CleanupFixtures() error { + if fixtureDir != "" { + return os.RemoveAll(fixtureDir) + } + return nil +} + +// ListFixtures returns all available fixture paths +func ListFixtures() []string { + var fixtures []string + fs.WalkDir(embeddedFixtures, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + fixtures = append(fixtures, path) + } + return nil + }) + return fixtures +} diff --git a/test/testdata/oc_cli/case72217/cr-cat-72217.yaml b/test/testdata/oc_cli/case72217/cr-cat-72217.yaml new file mode 100644 index 0000000000..45efd73dbf --- /dev/null +++ b/test/testdata/oc_cli/case72217/cr-cat-72217.yaml @@ -0,0 +1,6 @@ +apiVersion: "bate.example.com/v1" +kind: Cattoy72217 +metadata: + name: my-cattoy-object +spec: + test: test diff --git a/test/testdata/oc_cli/case72217/cr-cron-72217.yaml b/test/testdata/oc_cli/case72217/cr-cron-72217.yaml new file mode 100644 index 0000000000..7a25e21ad6 --- /dev/null +++ b/test/testdata/oc_cli/case72217/cr-cron-72217.yaml @@ -0,0 +1,6 @@ +apiVersion: "stable.example.com/v1" +kind: CronTab72217 +metadata: + name: my-new-cron-object +spec: + test: test diff --git a/test/testdata/oc_cli/case72217/cr-custom-72217.yaml b/test/testdata/oc_cli/case72217/cr-custom-72217.yaml new file mode 100644 index 0000000000..f0355b6105 --- /dev/null +++ b/test/testdata/oc_cli/case72217/cr-custom-72217.yaml @@ -0,0 +1,6 @@ +apiVersion: "example.com/v1" +kind: Customtask72217 +metadata: + name: my-new-customtask-object +spec: + test: test diff --git a/test/testdata/oc_cli/case72217/crd-cattoy-72217.yaml b/test/testdata/oc_cli/case72217/crd-cattoy-72217.yaml new file mode 100644 index 0000000000..220aba2e6e --- /dev/null +++ b/test/testdata/oc_cli/case72217/crd-cattoy-72217.yaml @@ -0,0 +1,30 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cattoys72217.bate.example.com +spec: + group: bate.example.com + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + image: + type: string + replicas: + type: integer + scope: Namespaced + names: + plural: cattoys72217 + singular: cattoy72217 + kind: Cattoy72217 + shortNames: + - ct72217 diff --git a/test/testdata/oc_cli/case72217/crd-crontab-72217.yaml b/test/testdata/oc_cli/case72217/crd-crontab-72217.yaml new file mode 100644 index 0000000000..d2eb83ece8 --- /dev/null +++ b/test/testdata/oc_cli/case72217/crd-crontab-72217.yaml @@ -0,0 +1,30 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: crontabs72217.stable.example.com +spec: + group: stable.example.com + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + image: + type: string + replicas: + type: integer + scope: Namespaced + names: + plural: crontabs72217 + singular: crontab72217 + kind: CronTab72217 + shortNames: + - ct72217 diff --git a/test/testdata/oc_cli/case72217/crd-customtask-72217.yaml b/test/testdata/oc_cli/case72217/crd-customtask-72217.yaml new file mode 100644 index 0000000000..653332c73a --- /dev/null +++ b/test/testdata/oc_cli/case72217/crd-customtask-72217.yaml @@ -0,0 +1,30 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: customtasks72217.example.com +spec: + group: example.com + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + cronSpec: + type: string + image: + type: string + replicas: + type: integer + scope: Namespaced + names: + plural: customtasks72217 + singular: customtask72217 + kind: Customtask72217 + shortNames: + - ct72217 diff --git a/test/testdata/oc_cli/clusterresource_for_user.yaml b/test/testdata/oc_cli/clusterresource_for_user.yaml new file mode 100644 index 0000000000..103248ab45 --- /dev/null +++ b/test/testdata/oc_cli/clusterresource_for_user.yaml @@ -0,0 +1,15 @@ +apiVersion: quota.openshift.io/v1 +kind: ClusterResourceQuota +metadata: + name: for-user42982 +spec: + quota: + hard: + limits.cpu: "4" + limits.memory: 8Gi + pods: "4" + requests.cpu: "4" + requests.memory: 8Gi + selector: + annotations: + openshift.io/requester: username diff --git a/test/testdata/oc_cli/debugpod_48681.yaml b/test/testdata/oc_cli/debugpod_48681.yaml new file mode 100644 index 0000000000..6a8d2ceabd --- /dev/null +++ b/test/testdata/oc_cli/debugpod_48681.yaml @@ -0,0 +1,39 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: debug-testpod +objects: +- kind: Pod + apiVersion: v1 + metadata: + name: "${NAME}" + namespace: "${NAMESPACE}" + labels: + app: pod48681 + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - command: ["/bin/sh", "-ec", "sleep 300"] + image: "${CLIIMAGEID}" + imagePullPolicy: IfNotPresent + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + name: test-container + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 250m + memory: 64Mi + +parameters: +- name: NAME +- name: NAMESPACE +- name: CLIIMAGEID diff --git a/test/testdata/oc_cli/deploymentconfig_with_quota.yaml b/test/testdata/oc_cli/deploymentconfig_with_quota.yaml new file mode 100644 index 0000000000..025eace293 --- /dev/null +++ b/test/testdata/oc_cli/deploymentconfig_with_quota.yaml @@ -0,0 +1,62 @@ +apiVersion: apps.openshift.io/v1 +kind: DeploymentConfig +metadata: + name: hello-openshift +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + deploymentconfig: hello-openshift + strategy: + activeDeadlineSeconds: 21600 + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + rollingParams: + intervalSeconds: 1 + maxSurge: 25% + maxUnavailable: 25% + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + creationTimestamp: null + labels: + deploymentconfig: hello-openshift + spec: + containers: + - image: quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83 + imagePullPolicy: Always + name: hello-openshift + resources: + limits: + cpu: 60m + memory: 60Mi + requests: + cpu: 50m + memory: 40M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + - image: quay.io/openshifttest/registry@sha256:1106aedc1b2e386520bc2fb797d9a7af47d651db31d8e7ab472f2352da37d1b3 + imagePullPolicy: Always + name: hello-openshift2 + resources: + limits: + cpu: 60m + memory: 60Mi + requests: + cpu: 50m + memory: 40Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + test: false diff --git a/test/testdata/oc_cli/idmsFile64921.yaml b/test/testdata/oc_cli/idmsFile64921.yaml new file mode 100644 index 0000000000..9f19702d39 --- /dev/null +++ b/test/testdata/oc_cli/idmsFile64921.yaml @@ -0,0 +1,18 @@ +apiVersion: config.openshift.io/v1 +kind: ImageDigestMirrorSet +metadata: + name: example +spec: + imageDigestMirrors: + - mirrors: + - localhost:5000/openshift-release-dev/ocp-release + source: quay.io/openshift-release-dev/ocp-v4.0-art-dev + mirrorSourcePolicy: AllowContactingSource + - mirrors: + - localhost:5000/ocp/release + source: registry.ci.openshift.org/ocp/release + mirrorSourcePolicy: AllowContactingSource + - mirrors: + - localhost:5000/target/release + source: quay.io/openshift-release-dev/ocp-v4.0-art-dev + mirrorSourcePolicy: AllowContactingSource diff --git a/test/testdata/oc_cli/initContainer.yaml b/test/testdata/oc_cli/initContainer.yaml new file mode 100644 index 0000000000..8c5b37d9fd --- /dev/null +++ b/test/testdata/oc_cli/initContainer.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + name: hello-pod + name: hello-pod +spec: + initContainers: + - name: wait + image: quay.io/openshifttest/base-alpine@sha256:3126e4eed4a3ebd8bf972b2453fa838200988ee07c01b2251e3ea47e4b1f245c + command: ["/bin/sh", "-c", "sleep 10"] + containers: + - name: hello-pod + image: quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83 + ports: + - containerPort: 8080 + volumeMounts: + - mountPath: /tmp + name: tmp + volumes: + - name: tmp + emptyDir: {} diff --git a/test/testdata/oc_cli/initContainer66989.yaml b/test/testdata/oc_cli/initContainer66989.yaml new file mode 100644 index 0000000000..b77dfab20c --- /dev/null +++ b/test/testdata/oc_cli/initContainer66989.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + name: hello-pod + name: hello-pod +spec: + initContainers: + - name: wait + image: quay.io/openshifttest/base-alpine@sha256:3126e4eed4a3ebd8bf972b2453fa838200988ee07c01b2251e3ea47e4b1f245c + command: ["/bin/sh", "-c", "sleep 30"] + containers: + - name: hello-pod + image: quay.io/openshifttest/hello-openshift@sha256:4200f438cf2e9446f6bcff9d67ceea1f69ed07a2f83363b7fb52529f7ddd8a83 + ports: + - containerPort: 8080 + volumeMounts: + - mountPath: /tmp + name: tmp + volumes: + - name: tmp + emptyDir: {}