diff --git a/test/e2e/accounts_test.go b/test/e2e/accounts_test.go index f794dce7a56e9..54eba790af2c5 100644 --- a/test/e2e/accounts_test.go +++ b/test/e2e/accounts_test.go @@ -14,7 +14,6 @@ import ( "github.com/argoproj/argo-cd/v2/cmd/argocd/commands/headless" "github.com/argoproj/argo-cd/v2/pkg/apiclient/account" "github.com/argoproj/argo-cd/v2/pkg/apiclient/session" - "github.com/argoproj/argo-cd/v2/test/e2e/fixture" . "github.com/argoproj/argo-cd/v2/test/e2e/fixture" accountFixture "github.com/argoproj/argo-cd/v2/test/e2e/fixture/account" "github.com/argoproj/argo-cd/v2/util/io" @@ -77,7 +76,7 @@ func TestCanIGetLogsAllowSwitchOn(t *testing.T) { When(). Create(). Login(). - SetPermissions([]fixture.ACL{ + SetPermissions([]ACL{ { Resource: "logs", Action: "get", diff --git a/test/e2e/deployment_test.go b/test/e2e/deployment_test.go index 085ecee244ba2..20e79c2aff56c 100644 --- a/test/e2e/deployment_test.go +++ b/test/e2e/deployment_test.go @@ -1,12 +1,22 @@ package e2e import ( + "context" + "encoding/json" "fmt" + "os" "testing" + "time" "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/clientcmd" + "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/util/argo" + "github.com/argoproj/argo-cd/v2/util/clusterauth" "github.com/argoproj/gitops-engine/pkg/health" . "github.com/argoproj/gitops-engine/pkg/sync/common" @@ -108,3 +118,304 @@ func TestDeploymentWithoutTrackingMode(t *testing.T) { `, ctx.AppName())) }) } + +// This test verifies that Argo CD can: +// A) Deploy to a cluster where the URL of the cluster contains a query parameter: e.g. https://(kubernetes-url):443/?context=some-val +// and +// B) Multiple users can deploy to the same K8s cluster, using above mechanism (but with different Argo CD Cluster Secrets, and different ServiceAccounts) +func TestDeployToKubernetesAPIURLWithQueryParameter(t *testing.T) { + + // We test with both a cluster-scoped, and a non-cluster scoped, Argo CD Cluster Secret. + clusterScopedParam := []bool{false, true} + for _, clusterScoped := range clusterScopedParam { + + EnsureCleanState(t) + + // Simulate two users, each with their own Argo CD cluster secret that can only deploy to their Namespace + users := []string{E2ETestPrefix + "user1", E2ETestPrefix + "user2"} + + for _, username := range users { + createNamespaceScopedUser(t, username, clusterScoped) + + GivenWithSameState(t). + Name("e2e-test-app-"+username). + Path("deployment"). + When(). + CreateWithNoNameSpace("--dest-namespace", username). + Sync(). + Then(). + Expect(OperationPhaseIs(OperationSucceeded)). + Expect(SyncStatusIs(SyncStatusCodeSynced)). + Expect(HealthIs(health.HealthStatusHealthy)) + } + + } + +} + +// This test verifies that Argo CD can: +// When multiple Argo CD cluster secrets used to deploy to the same cluster (using query parameters), that the ServiceAccount RBAC +// fully enforces user boundary. +// Our simulated user's ServiceAccounts should not be able to deploy into a namespace that is outside that SA's RBAC. +func TestArgoCDSupportsMultipleServiceAccountsWithDifferingRBACOnSameCluster(t *testing.T) { + + // We test with both a cluster-scoped, and a non-cluster scoped, Argo CD Cluster Secret. + clusterScopedParam := []bool{ /*false,*/ true} + + for _, clusterScoped := range clusterScopedParam { + + EnsureCleanState(t) + + // Simulate two users, each with their own Argo CD cluster secret that can only deploy to their Namespace + users := []string{E2ETestPrefix + "user1", E2ETestPrefix + "user2"} + + for _, username := range users { + createNamespaceScopedUser(t, username, clusterScoped) + } + + for idx, username := range users { + + // we should use user-a's serviceaccount to deploy to user-b's namespace, and vice versa + // - If everything as working as expected, this should fail. + otherUser := users[(idx+1)%len(users)] + + // e.g. Attempt to deploy to user1's namespace, with user2's cluster Secret. This should fail, as user2's cluster Secret does not have the requisite permissions. + consequences := GivenWithSameState(t). + Name("e2e-test-app-"+username). + DestName(E2ETestPrefix+"cluster-"+otherUser). + Path("deployment"). + When(). + CreateWithNoNameSpace("--dest-namespace", username).IgnoreErrors(). + Sync().Then() + + // The error message differs based on whether the Argo CD Cluster Secret is namespace-scoped or cluster-scoped, but the idea is the same: + // - Even when deploying to the same cluster using 2 separate ServiceAccounts, the RBAC of those ServiceAccounts should continue to fully enforce RBAC boundaries. + + if !clusterScoped { + consequences.Expect(Condition(ApplicationConditionComparisonError, "Namespace \""+username+"\" for Deployment \"nginx-deployment\" is not managed")) + } else { + consequences.Expect(OperationMessageContains("User \"system:serviceaccount:" + otherUser + ":" + otherUser + "-serviceaccount\" cannot create resource \"deployments\" in API group \"apps\" in the namespace \"" + username + "\"")) + } + } + + } +} + +// generateReadOnlyClusterRoleandBindingForServiceAccount creates a ClusterRole/Binding that allows a ServiceAccount in a given namespace to read all resources on a cluster. +// - This allows the ServiceAccount to be used within a cluster-scoped Argo CD Cluster Secret +func generateReadOnlyClusterRoleandBindingForServiceAccount(roleSuffix string, serviceAccountNS string) (rbacv1.ClusterRole, rbacv1.ClusterRoleBinding) { + + clusterRole := rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: E2ETestPrefix + "read-all-" + roleSuffix, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"get", "list", "watch"}, + Resources: []string{"*"}, + APIGroups: []string{"*"}, + }}, + } + + clusterRoleBinding := rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: E2ETestPrefix + "read-all-" + roleSuffix, + }, + Subjects: []rbacv1.Subject{{ + Kind: rbacv1.ServiceAccountKind, + Namespace: serviceAccountNS, + Name: roleSuffix + "-serviceaccount", + }}, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: clusterRole.Name, + }, + } + + return clusterRole, clusterRoleBinding +} + +// buildArgoCDClusterSecret build (but does not create) an Argo CD Cluster Secret object with the given values +func buildArgoCDClusterSecret(secretName, secretNamespace, clusterName, clusterServer, clusterConfigJSON, clusterResources, clusterNamespaces string) corev1.Secret { + res := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNamespace, + Labels: map[string]string{ + common.LabelKeySecretType: common.LabelValueSecretTypeCluster, + }, + }, + Data: map[string][]byte{ + "name": ([]byte)(clusterName), + "server": ([]byte)(clusterServer), + "config": ([]byte)(string(clusterConfigJSON)), + }, + } + + if clusterResources != "" { + res.Data["clusterResources"] = ([]byte)(clusterResources) + } + + if clusterNamespaces != "" { + res.Data["namespaces"] = ([]byte)(clusterNamespaces) + } + + return res +} + +// createNamespaceScopedUser +// - username = name of Namespace the simulated user is able to deploy to +// - clusterScopedSecrets = whether the Service Account is namespace-scoped or cluster-scoped. +func createNamespaceScopedUser(t *testing.T, username string, clusterScopedSecrets bool) { + + // Create a new Namespace for our simulated user + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: username, + }, + } + _, err := KubeClientset.CoreV1().Namespaces().Create(context.Background(), &ns, metav1.CreateOptions{}) + assert.Nil(t, err) + + // Create a ServiceAccount in that Namespace, which will be used for the Argo CD Cluster SEcret + serviceAccountName := username + "-serviceaccount" + err = clusterauth.CreateServiceAccount(KubeClientset, serviceAccountName, ns.Name) + assert.Nil(t, err) + + // Create a Role that allows the ServiceAccount to read/write all within the Namespace + role := rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: E2ETestPrefix + "allow-all", + Namespace: ns.Name, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"*"}, + Resources: []string{"*"}, + APIGroups: []string{"*"}, + }}, + } + _, err = KubeClientset.RbacV1().Roles(role.Namespace).Create(context.Background(), &role, metav1.CreateOptions{}) + assert.Nil(t, err) + + // Bind the Role with the ServiceAccount in the Namespace + roleBinding := rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: E2ETestPrefix + "allow-all-binding", + Namespace: ns.Name, + }, + Subjects: []rbacv1.Subject{{ + Kind: rbacv1.ServiceAccountKind, + Name: serviceAccountName, + Namespace: ns.Name, + }}, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: role.Name, + }, + } + _, err = KubeClientset.RbacV1().RoleBindings(roleBinding.Namespace).Create(context.Background(), &roleBinding, metav1.CreateOptions{}) + assert.Nil(t, err) + + // Retrieve the bearer token from the ServiceAccount + token, err := clusterauth.GetServiceAccountBearerToken(KubeClientset, ns.Name, serviceAccountName, time.Second*60) + assert.Nil(t, err) + assert.NotEmpty(t, token) + + // In order to test a cluster-scoped Argo CD Cluster Secret, we may optionally grant the ServiceAccount read-all permissions at cluster scope. + if clusterScopedSecrets { + clusterRole, clusterRoleBinding := generateReadOnlyClusterRoleandBindingForServiceAccount(username, username) + + _, err := KubeClientset.RbacV1().ClusterRoles().Create(context.Background(), &clusterRole, metav1.CreateOptions{}) + assert.Nil(t, err) + + _, err = KubeClientset.RbacV1().ClusterRoleBindings().Create(context.Background(), &clusterRoleBinding, metav1.CreateOptions{}) + assert.Nil(t, err) + + } + + // Build the Argo CD Cluster Secret by using the service account token, and extracting needed values from kube config + clusterSecretConfigJSON := ClusterConfig{ + BearerToken: token, + TLSClientConfig: TLSClientConfig{ + Insecure: true, + }, + } + + jsonStringBytes, err := json.Marshal(clusterSecretConfigJSON) + assert.Nil(t, err) + + _, apiURL, err := extractKubeConfigValues() + assert.Nil(t, err) + + clusterResourcesField := "" + namespacesField := "" + + if !clusterScopedSecrets { + clusterResourcesField = "false" + namespacesField = ns.Name + } + + // We create an Argo CD cluster Secret declaratively, using the K8s client, rather than via CLI, as the CLI doesn't currently + // support Kubernetes API server URLs with query parameters. + + secret := buildArgoCDClusterSecret("test-"+username, ArgoCDNamespace, E2ETestPrefix+"cluster-"+username, apiURL+"?user="+username, + string(jsonStringBytes), clusterResourcesField, namespacesField) + + // Finally, create the Cluster secret in the Argo CD E2E namespace + _, err = KubeClientset.CoreV1().Secrets(secret.Namespace).Create(context.Background(), &secret, metav1.CreateOptions{}) + assert.Nil(t, err) +} + +// extractKubeConfigValues returns contents of the local environment's kubeconfig, using standard path resolution mechanism. +// Returns: +// - contents of kubeconfig +// - server name (within the kubeconfig) +// - error +func extractKubeConfigValues() (string, string, error) { + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + + config, err := loadingRules.Load() + if err != nil { + return "", "", err + } + + context, ok := config.Contexts[config.CurrentContext] + if !ok || context == nil { + return "", "", fmt.Errorf("no context") + } + + cluster, ok := config.Clusters[context.Cluster] + if !ok || cluster == nil { + return "", "", fmt.Errorf("no cluster") + } + + var kubeConfigDefault string + + paths := loadingRules.Precedence + { + + // For all the kubeconfig paths, look for one that exists + for _, path := range paths { + _, err = os.Stat(path) + if err == nil { + // Success + kubeConfigDefault = path + break + } // Otherwise, continue. + + } + + if kubeConfigDefault == "" { + return "", "", fmt.Errorf("unable to retrieve kube config path") + } + } + + kubeConfigContents, err := os.ReadFile(kubeConfigDefault) + if err != nil { + return "", "", err + } + + return string(kubeConfigContents), cluster.Server, nil +} diff --git a/test/e2e/fixture/cluster/actions.go b/test/e2e/fixture/cluster/actions.go index 3f047e8f9b03e..0613c9a22cf15 100644 --- a/test/e2e/fixture/cluster/actions.go +++ b/test/e2e/fixture/cluster/actions.go @@ -45,10 +45,10 @@ func (a *Actions) Create(args ...string) *Actions { Cluster: &v1alpha1.Cluster{ Server: a.context.server, Name: a.context.name, - Config: v1alpha1.ClusterConfig{}, + Config: v1alpha1.ClusterConfig{BearerToken: a.context.bearerToken}, ConnectionState: v1alpha1.ConnectionState{}, ServerVersion: "", - Namespaces: nil, + Namespaces: a.context.namespaces, RefreshRequestedAt: nil, Info: v1alpha1.ClusterInfo{}, Shard: nil, diff --git a/test/e2e/fixture/cluster/context.go b/test/e2e/fixture/cluster/context.go index 236be6a3a3913..bd0102f891d71 100644 --- a/test/e2e/fixture/cluster/context.go +++ b/test/e2e/fixture/cluster/context.go @@ -12,12 +12,13 @@ import ( type Context struct { t *testing.T // seconds - timeout int - name string - project string - server string - upsert bool - namespaces []string + timeout int + name string + project string + server string + upsert bool + namespaces []string + bearerToken string } func Given(t *testing.T) *Context { @@ -67,6 +68,11 @@ func (c *Context) Project(project string) *Context { return c } +func (c *Context) BearerToken(bearerToken string) *Context { + c.bearerToken = bearerToken + return c +} + func (c *Context) Upsert(upsert bool) *Context { c.upsert = upsert return c diff --git a/test/e2e/fixture/fixture.go b/test/e2e/fixture/fixture.go index 095b3e2f116be..d8d91f09436b0 100644 --- a/test/e2e/fixture/fixture.go +++ b/test/e2e/fixture/fixture.go @@ -58,6 +58,8 @@ const ( // cmp plugin sock file path PluginSockFilePath = "/app/config/plugin" + + E2ETestPrefix = "e2e-test-" ) const ( @@ -666,6 +668,33 @@ func EnsureCleanState(t *testing.T, opts ...TestOption) { FailOnErr(Run("", "kubectl", "create", "ns", DeploymentNamespace())) FailOnErr(Run("", "kubectl", "label", "ns", DeploymentNamespace(), TestingLabel+"=true")) + // delete old namespaces used by E2E tests + namespaces, err := KubeClientset.CoreV1().Namespaces().List(context.Background(), v1.ListOptions{}) + CheckError(err) + for _, namespace := range namespaces.Items { + if strings.HasPrefix(namespace.Name, E2ETestPrefix) { + FailOnErr(Run("", "kubectl", "delete", "ns", namespace.Name)) + } + } + + // delete old ClusterRoles that begin with "e2e-test-" prefix (E2ETestPrefix), which were created by tests + clusterRoles, err := KubeClientset.RbacV1().ClusterRoles().List(context.Background(), v1.ListOptions{}) + CheckError(err) + for _, clusterRole := range clusterRoles.Items { + if strings.HasPrefix(clusterRole.Name, E2ETestPrefix) { + FailOnErr(Run("", "kubectl", "delete", "clusterrole", clusterRole.Name)) + } + } + + // delete old ClusterRoleBindings that begin with "e2e-test-prefix", which were created by E2E tests + clusterRoleBindings, err := KubeClientset.RbacV1().ClusterRoleBindings().List(context.Background(), v1.ListOptions{}) + CheckError(err) + for _, clusterRoleBinding := range clusterRoleBindings.Items { + if strings.HasPrefix(clusterRoleBinding.Name, E2ETestPrefix) { + FailOnErr(Run("", "kubectl", "delete", "clusterrolebinding", clusterRoleBinding.Name)) + } + } + log.WithFields(log.Fields{"duration": time.Since(start), "name": t.Name(), "id": id, "username": "admin", "password": "password"}).Info("clean state") }