Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix OCM label check #140

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 27 additions & 17 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,10 @@ $(KIND_KUBECONFIG):
kind create cluster --name $(KIND_NAME) $(KIND_ARGS)
kind get kubeconfig --name $(KIND_NAME) > $(KIND_KUBECONFIG)
kind get kubeconfig --name $(KIND_NAME) --internal > $(KIND_KUBECONFIG_INTERNAL)
KUBECONFIG=$(KIND_KUBECONFIG) kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd-full/monitoring.coreos.com_servicemonitors.yaml
KUBECONFIG=$(KIND_KUBECONFIG) kubectl create -f https://raw.githubusercontent.com/openshift/api/release-4.12/route/v1/route.crd.yaml
KUBECONFIG=$(KIND_KUBECONFIG) kubectl apply -f \
https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd-full/monitoring.coreos.com_servicemonitors.yaml
KUBECONFIG=$(KIND_KUBECONFIG) kubectl create -f \
https://raw.githubusercontent.com/openshift/api/release-4.12/route/v1/route.crd.yaml

.PHONY: kind-delete-cluster
kind-delete-cluster: ## Delete a kind cluster.
Expand All @@ -183,16 +185,24 @@ kind-deploy-registration-operator-hub: $(OCM_REPO) $(KIND_KUBECONFIG) ## Deploy
KUBECONFIG=$(KIND_KUBECONFIG) $(KUBEWAIT) -r deploy/cluster-manager -n open-cluster-management -c condition=Available -m 90
KUBECONFIG=$(KIND_KUBECONFIG) $(KUBEWAIT) -r deploy/cluster-manager-placement-controller -n open-cluster-management-hub -c condition=Available -m 90
@echo installing Policy CRD on hub
KUBECONFIG=$(KIND_KUBECONFIG) kubectl apply -f https://raw.githubusercontent.com/open-cluster-management-io/governance-policy-propagator/main/deploy/crds/policy.open-cluster-management.io_policies.yaml
KUBECONFIG=$(KIND_KUBECONFIG) kubectl apply -f \
https://raw.githubusercontent.com/open-cluster-management-io/governance-policy-propagator/main/deploy/crds/policy.open-cluster-management.io_policies.yaml

.PHONY: kind-deploy-registration-operator-managed
kind-deploy-registration-operator-managed: $(OCM_REPO) $(KIND_KUBECONFIG) ## Deploy the ocm registration operator to the kind cluster.
cd $(OCM_REPO) && KUBECONFIG=$(KIND_KUBECONFIG) MANAGED_CLUSTER_NAME=$(CLUSTER_NAME) HUB_KUBECONFIG=$(HUB_KUBECONFIG_INTERNAL) KUSTOMIZE_VERSION=$(KUSTOMIZE_VERSION_CLEAN) make deploy-spoke-operator
cd $(OCM_REPO) && KUBECONFIG=$(KIND_KUBECONFIG) MANAGED_CLUSTER_NAME=$(CLUSTER_NAME) HUB_KUBECONFIG=$(HUB_KUBECONFIG_INTERNAL) KUSTOMIZE_VERSION=$(KUSTOMIZE_VERSION_CLEAN) make apply-spoke-cr
cd $(OCM_REPO) && \
KUBECONFIG=$(KIND_KUBECONFIG) MANAGED_CLUSTER_NAME=$(CLUSTER_NAME) HUB_KUBECONFIG=$(HUB_KUBECONFIG_INTERNAL) \
KUSTOMIZE_VERSION=$(KUSTOMIZE_VERSION_CLEAN) make deploy-spoke-operator
cd $(OCM_REPO) && \
KUBECONFIG=$(KIND_KUBECONFIG) MANAGED_CLUSTER_NAME=$(CLUSTER_NAME) HUB_KUBECONFIG=$(HUB_KUBECONFIG_INTERNAL) \
KUSTOMIZE_VERSION=$(KUSTOMIZE_VERSION_CLEAN) make apply-spoke-cr

.PHONY: kind-deploy-registration-operator-managed-hosted
kind-deploy-registration-operator-managed-hosted: $(OCM_REPO) $(KIND_KUBECONFIG) ## Deploy the ocm registration operator to the kind cluster in hosted mode.
cd $(OCM_REPO) && KUBECONFIG=$(HUB_KUBECONFIG) MANAGED_CLUSTER_NAME=$(CLUSTER_NAME) HUB_KUBECONFIG=$(HUB_KUBECONFIG_INTERNAL) HOSTED_CLUSTER_MANAGER_NAME=$(HUB_CLUSTER_NAME) EXTERNAL_MANAGED_KUBECONFIG=$(KIND_KUBECONFIG_INTERNAL) KUSTOMIZE_VERSION=$(KUSTOMIZE_VERSION_CLEAN) make deploy-spoke-hosted
cd $(OCM_REPO) && \
KUBECONFIG=$(HUB_KUBECONFIG) MANAGED_CLUSTER_NAME=$(CLUSTER_NAME) HUB_KUBECONFIG=$(HUB_KUBECONFIG_INTERNAL) \
HOSTED_CLUSTER_MANAGER_NAME=$(HUB_CLUSTER_NAME) EXTERNAL_MANAGED_KUBECONFIG=$(KIND_KUBECONFIG_INTERNAL) \
KUSTOMIZE_VERSION=$(KUSTOMIZE_VERSION_CLEAN) make deploy-spoke-hosted

.PHONY: kind-approve-cluster
kind-approve-cluster: $(KIND_KUBECONFIG) ## Approve managed cluster in the kind cluster.
Expand All @@ -203,7 +213,7 @@ kind-approve-cluster: $(KIND_KUBECONFIG) ## Approve managed cluster in the kind

.PHONY: wait-for-work-agent
wait-for-work-agent: $(KIND_KUBECONFIG) ## Wait for the klusterlet work agent to start.
KUBECONFIG=$(KIND_KUBECONFIG) $(KUBEWAIT) -r "pod -l=app=klusterlet-manifestwork-agent" -n open-cluster-management-agent -c condition=Ready -m 360
KUBECONFIG=$(KIND_KUBECONFIG) $(KUBEWAIT) -r "pod -l=app=klusterlet-agent" -n open-cluster-management-agent -c condition=Ready -m 360

.PHONY: kind-run-local
kind-run-local: manifests generate fmt vet $(KIND_KUBECONFIG) ## Run the policy-addon-controller locally against the kind cluster.
Expand Down Expand Up @@ -268,27 +278,27 @@ e2e-stop-instrumented:

.PHONY: e2e-debug
e2e-debug: ## Collect debug logs from deployed clusters.
@echo "##### Gathering information from $(KIND_NAME) #####"
##### Gathering information from $(KIND_NAME) #####
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl get managedclusters
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl get managedclusteraddons --all-namespaces
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n $(CONTROLLER_NAMESPACE) get deployments
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n $(CONTROLLER_NAMESPACE) get pods
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n open-cluster-management-agent-addon get deployments
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n open-cluster-management-agent-addon get pods
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n $(CONTROLLER_NAMESPACE) get all
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n open-cluster-management-agent get all
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n open-cluster-management-agent-addon get all
-KUBECONFIG=$(KIND_KUBECONFIG) kubectl get manifestwork --all-namespaces -o yaml

@echo "* Local controller log:"
## Local controller log:
-cat build/_output/controller.log
@echo "* Container logs in namespace $(CONTROLLER_NAMESPACE):"
## Container logs in namespace $(CONTROLLER_NAMESPACE):
-@for POD in $(shell KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n $(CONTROLLER_NAMESPACE) get pods -o name); do \
for CONTAINER in $$(KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n $(CONTROLLER_NAMESPACE) get $${POD} -o jsonpath={.spec.containers[*].name}); do \
echo "* Logs for pod $${POD} from container $${CONTAINER} in namespace $(CONTROLLER_NAMESPACE)"; \
echo "## Logs for pod $${POD} from container $${CONTAINER} in namespace $(CONTROLLER_NAMESPACE)"; \
KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n $(CONTROLLER_NAMESPACE) logs $${POD}; \
done; \
done
@echo "* Container logs in namespace open-cluster-management-agent-addon:"
## Container logs in namespace open-cluster-management-agent-addon:
-@for POD in $(shell KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n open-cluster-management-agent-addon get pods -o name); do \
for CONTAINER in $$(KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n open-cluster-management-agent-addon get $${POD} -o jsonpath={.spec.containers[*].name}); do \
echo "* Logs for pod $${POD} from container $${CONTAINER} in namespace open-cluster-management-agent-addon"; \
echo "## Logs for pod $${POD} from container $${CONTAINER} in namespace open-cluster-management-agent-addon"; \
KUBECONFIG=$(KIND_KUBECONFIG) kubectl -n open-cluster-management-agent-addon logs $${POD}; \
done; \
done
Expand Down
20 changes: 11 additions & 9 deletions test/e2e/case1_framework_deployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ var _ = Describe("Test framework deployment", Ordered, func() {
})

AfterAll(func() {
if CurrentSpecReport().Failed() {
debugCollection(case1PodSelector)
}

By("Deleting the default governance-policy-framework ClusterManagementAddon from the hub cluster")
Kubectl("delete", "-f", case1ClusterManagementAddOnCRDefault)
})
Expand Down Expand Up @@ -704,23 +708,21 @@ func checkContainersAndAvailabilityInNamespace(cluster managedClusterConfig, clu

if startupProbeInCluster(clusterIdx) {
By(logPrefix + "verifying all replicas in framework deployment are available")
Eventually(func() bool {
Eventually(func(g Gomega) {
deploy := GetWithTimeout(
client, gvrDeployment, case1DeploymentName, namespace, true, 60,
)

replicas, found, err := unstructured.NestedInt64(deploy.Object, "status", "replicas")
if !found || err != nil {
return false
}
g.Expect(found).To(BeTrue(), "status.replicas should exist in the deployment")
g.Expect(err).ToNot(HaveOccurred())

available, found, err := unstructured.NestedInt64(deploy.Object, "status", "availableReplicas")
if !found || err != nil {
return false
}
g.Expect(found).To(BeTrue(), "status.availableReplicas should exist in the deployment")
g.Expect(err).ToNot(HaveOccurred())

return available == replicas
}, 240, 1).Should(Equal(true))
g.Expect(available).To(Equal(replicas), "available replicas should equal expected replicas")
}, 240, 1).Should(Succeed())
}

By(logPrefix + "verifying one framework pod is running")
Expand Down
20 changes: 11 additions & 9 deletions test/e2e/case2_config_deployment_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,23 +48,21 @@ func verifyConfigPolicyDeployment(

if startupProbeInCluster(clusterNum) {
By(logPrefix + "verifying all replicas in config-policy-controller deployment are available")
Eventually(func() bool {
Eventually(func(g Gomega) {
deploy = GetWithTimeout(
client, gvrDeployment, case2DeploymentName, namespace, true, 30,
)

replicas, found, err := unstructured.NestedInt64(deploy.Object, "status", "replicas")
if !found || err != nil {
return false
}
g.Expect(found).To(BeTrue(), "status.replicas should exist in the deployment")
g.Expect(err).ToNot(HaveOccurred())

available, found, err := unstructured.NestedInt64(deploy.Object, "status", "availableReplicas")
if !found || err != nil {
return false
}
g.Expect(found).To(BeTrue(), "status.availableReplicas should exist in the deployment")
g.Expect(err).ToNot(HaveOccurred())

return available == replicas
}, 240, 1).Should(Equal(true))
g.Expect(available).To(Equal(replicas), "available replicas should equal expected replicas")
}, 240, 1).Should(Succeed())
}

By(logPrefix + "verifying a running config-policy-controller pod")
Expand Down Expand Up @@ -96,6 +94,10 @@ var _ = Describe("Test config-policy-controller deployment", Ordered, func() {
})

AfterAll(func() {
if CurrentSpecReport().Failed() {
debugCollection(case2PodSelector)
}

By("Deleting the default config-policy-controller ClusterManagementAddon from the hub cluster")
Kubectl("delete", "-f", case2ClusterManagementAddOnCRDefault)
})
Expand Down
70 changes: 59 additions & 11 deletions test/e2e/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"context"
"fmt"
"os/exec"
"slices"
"strings"

. "github.com/onsi/ginkgo/v2"
Expand Down Expand Up @@ -145,19 +146,12 @@ func ListWithTimeoutByNamespace(

var list *unstructured.UnstructuredList

EventuallyWithOffset(1, func() error {
EventuallyWithOffset(1, func(g Gomega) {
var err error
list, err = clientHubDynamic.Resource(gvr).Namespace(ns).List(context.TODO(), opts)
if err != nil {
return err
}

if len(list.Items) != size {
return fmt.Errorf("list size doesn't match, expected %d actual %d", size, len(list.Items))
}

return nil
}, timeout, 1).ShouldNot(HaveOccurred())
g.Expect(err).ToNot(HaveOccurred())
g.Expect(list.Items).To(HaveLen(size))
}, timeout, 1).Should(Succeed())

if wantFound {
return list
Expand Down Expand Up @@ -186,3 +180,57 @@ func getAddonStatus(addon *unstructured.Unstructured) bool {

return false
}

func debugCollection(podSelector string) {
namespaceSuffix := []string{""}

if slices.Contains(CurrentSpecReport().Labels(), "hosted-mode") {
namespaceSuffix = append(namespaceSuffix, "-hosted")
}

By("Recording debug logs")

output := "===\n"

for i, cluster := range managedClusterList {
targetKubeconfig := fmt.Sprintf("--kubeconfig=%s%d_e2e", kubeconfigFilename, i+1)
targetCluster := cluster.clusterName
clusterNs := []string{cluster.clusterName}

if cluster.clusterName == "cluster1" {
for _, cluster := range managedClusterList[1:] {
clusterNs = append(clusterNs, cluster.clusterName)
}
}

for _, namespace := range clusterNs {
for _, suffix := range namespaceSuffix {
namespace += suffix
output += fmt.Sprintf("Cluster %s: All objects in namespace %s:\n", targetCluster, namespace)
output += Kubectl("get", "all", "-n", namespace, targetKubeconfig)
output += "===\n"
output += fmt.Sprintf(
"Cluster %s: Pod logs for label %s in namespace %s:\n",
targetCluster, podSelector, namespace,
)
output += Kubectl("describe", "pod", "-n", namespace, "-l", podSelector, targetKubeconfig)
output += Kubectl("logs", "-n", namespace, "-l", podSelector, "--ignore-errors", targetKubeconfig)
output += "===\n"
}
}

output += fmt.Sprintf("Cluster %s: All objects in namespace %s:\n", targetCluster, addonNamespace)
output += Kubectl("get", "all", "-n", addonNamespace, targetKubeconfig)
output += "===\n"
output += fmt.Sprintf("Cluster %s: Pod logs for label %s in namespace %s for cluster %s:\n",
targetCluster, podSelector, addonNamespace, cluster.clusterName)
output += Kubectl(
"describe", "pod", "-n", addonNamespace, "-l", podSelector, targetKubeconfig,
)
output += Kubectl(
"logs", "-n", addonNamespace, "-l", podSelector, "--ignore-errors", targetKubeconfig,
)
}

GinkgoWriter.Print(output)
}