-
Notifications
You must be signed in to change notification settings - Fork 208
Adding support for server to authenticate agent #51
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -29,14 +29,15 @@ import ( | |
| "os/signal" | ||
| "syscall" | ||
|
|
||
| "k8s.io/klog" | ||
|
|
||
| "github.com/google/uuid" | ||
| "github.com/prometheus/client_golang/prometheus" | ||
| "github.com/spf13/cobra" | ||
| "github.com/spf13/pflag" | ||
| "google.golang.org/grpc" | ||
| "google.golang.org/grpc/credentials" | ||
| "k8s.io/klog" | ||
| "k8s.io/client-go/kubernetes" | ||
| "k8s.io/client-go/tools/clientcmd" | ||
| "sigs.k8s.io/apiserver-network-proxy/pkg/agent/agentserver" | ||
| "sigs.k8s.io/apiserver-network-proxy/pkg/util" | ||
| "sigs.k8s.io/apiserver-network-proxy/proto/agent" | ||
|
|
@@ -86,6 +87,14 @@ type ProxyRunOptions struct { | |
| serverID string | ||
| // Number of proxy server instances, should be 1 unless it is a HA proxy server. | ||
| serverCount uint | ||
| // Agent pod's namespace for token-based agent authentication | ||
| agentNamespace string | ||
| // Agent pod's service account for token-based agent authentication | ||
| agentServiceAccount string | ||
| // Token's audience for token-based agent authentication | ||
| authenticationAudience string | ||
| // Path to kubeconfig (used by kubernetes client) | ||
| kubeconfigPath string | ||
| } | ||
|
|
||
| func (o *ProxyRunOptions) Flags() *pflag.FlagSet { | ||
|
|
@@ -103,6 +112,10 @@ func (o *ProxyRunOptions) Flags() *pflag.FlagSet { | |
| flags.UintVar(&o.adminPort, "admin-port", o.adminPort, "Port we listen for admin connections on.") | ||
| flags.StringVar(&o.serverID, "server-id", o.serverID, "The unique ID of this server.") | ||
| flags.UintVar(&o.serverCount, "server-count", o.serverCount, "The number of proxy server instances, should be 1 unless it is an HA server.") | ||
| flags.StringVar(&o.agentNamespace, "agent-namespace", o.agentNamespace, "Expected agent's namespace during agent authentication (used with agent-service-account, authentication-audience, kubeconfig).") | ||
| flags.StringVar(&o.agentServiceAccount, "agent-service-account", o.agentServiceAccount, "Expected agent's service account during agent authentication (used with agent-namespace, authentication-audience, kubeconfig).") | ||
| flags.StringVar(&o.kubeconfigPath, "kubeconfig", o.kubeconfigPath, "absolute path to the kubeconfig file (used with agent-namespace, agent-service-account, authentication-audience).") | ||
| flags.StringVar(&o.authenticationAudience, "authentication-audience", o.authenticationAudience, "Expected agent's token authentication audience (used with agent-namespace, agent-service-account, kubeconfig).") | ||
| return flags | ||
| } | ||
|
|
||
|
|
@@ -120,6 +133,10 @@ func (o *ProxyRunOptions) Print() { | |
| klog.Warningf("Admin port set to %d.\n", o.adminPort) | ||
| klog.Warningf("ServerID set to %s.\n", o.serverID) | ||
| klog.Warningf("ServerCount set to %d.\n", o.serverCount) | ||
| klog.Warningf("AgentNamespace set to %q.\n", o.agentNamespace) | ||
| klog.Warningf("AgentServiceAccount set to %q.\n", o.agentServiceAccount) | ||
| klog.Warningf("AuthenticationAudience set to %q.\n", o.authenticationAudience) | ||
| klog.Warningf("KubeconfigPath set to %q.\n", o.kubeconfigPath) | ||
| } | ||
|
|
||
| func (o *ProxyRunOptions) Validate() error { | ||
|
|
@@ -202,24 +219,48 @@ func (o *ProxyRunOptions) Validate() error { | |
| if o.adminPort < 1024 { | ||
| return fmt.Errorf("please do not try to use reserved port %d for the admin port", o.adminPort) | ||
| } | ||
|
|
||
| // validate agent authentication params | ||
| // all 4 parametes must be empty or must have value (except kubeconfigPath that might be empty) | ||
| if o.agentNamespace != "" || o.agentServiceAccount != "" || o.authenticationAudience != "" || o.kubeconfigPath != "" { | ||
| if o.agentNamespace == "" { | ||
| return fmt.Errorf("agentNamespace cannot be empty when agent authentication is enabled") | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For future we should consider accumulating these errors. Sort of annoying to be given and error I need a service account on run1 and then be given an error I need an audience on run2. |
||
| } | ||
| if o.agentServiceAccount == "" { | ||
| return fmt.Errorf("agentServiceAccount cannot be empty when agent authentication is enabled") | ||
| } | ||
| if o.authenticationAudience == "" { | ||
| return fmt.Errorf("authenticationAudience cannot be empty when agent authentication is enabled") | ||
| } | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Also check if
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It cannot be nil, since we have newProxyRunOptions(). |
||
| if o.kubeconfigPath != "" { | ||
| if _, err := os.Stat(o.kubeconfigPath); os.IsNotExist(err) { | ||
| return fmt.Errorf("error checking kubeconfigPath %q, got %v", o.kubeconfigPath, err) | ||
| } | ||
| } | ||
| } | ||
|
|
||
| return nil | ||
| } | ||
|
|
||
| func newProxyRunOptions() *ProxyRunOptions { | ||
| o := ProxyRunOptions{ | ||
| serverCert: "", | ||
| serverKey: "", | ||
| serverCaCert: "", | ||
| clusterCert: "", | ||
| clusterKey: "", | ||
| clusterCaCert: "", | ||
| mode: "grpc", | ||
| udsName: "", | ||
| serverPort: 8090, | ||
| agentPort: 8091, | ||
| adminPort: 8092, | ||
| serverID: uuid.New().String(), | ||
| serverCount: 1, | ||
| serverCert: "", | ||
| serverKey: "", | ||
| serverCaCert: "", | ||
| clusterCert: "", | ||
| clusterKey: "", | ||
| clusterCaCert: "", | ||
| mode: "grpc", | ||
| udsName: "", | ||
| serverPort: 8090, | ||
| agentPort: 8091, | ||
| adminPort: 8092, | ||
| serverID: uuid.New().String(), | ||
| serverCount: 1, | ||
| agentNamespace: "", | ||
| agentServiceAccount: "", | ||
| kubeconfigPath: "", | ||
| authenticationAudience: "", | ||
| } | ||
| return &o | ||
| } | ||
|
|
@@ -247,7 +288,29 @@ func (p *Proxy) run(o *ProxyRunOptions) error { | |
| return fmt.Errorf("failed to validate server options with %v", err) | ||
| } | ||
| ctx, cancel := context.WithCancel(context.Background()) | ||
| server := agentserver.NewProxyServer(o.serverID, int(o.serverCount)) | ||
| defer cancel() | ||
dberkov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| var k8sClient *kubernetes.Clientset | ||
| if o.agentNamespace != "" { | ||
| config, err := clientcmd.BuildConfigFromFlags("", o.kubeconfigPath) | ||
dberkov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| if err != nil { | ||
| return fmt.Errorf("failed to load kubernetes client config: %v", err) | ||
| } | ||
|
|
||
| k8sClient, err = kubernetes.NewForConfig(config) | ||
| if err != nil { | ||
| return fmt.Errorf("failed to create kubernetes clientset: %v", err) | ||
| } | ||
| } | ||
|
|
||
| authOpt := &agentserver.AgentTokenAuthenticationOptions{ | ||
| Enabled: o.agentNamespace != "", | ||
| AgentNamespace: o.agentNamespace, | ||
| AgentServiceAccount: o.agentServiceAccount, | ||
| KubernetesClient: k8sClient, | ||
| AuthenticationAudience: o.authenticationAudience, | ||
| } | ||
| server := agentserver.NewProxyServer(o.serverID, int(o.serverCount), authOpt) | ||
|
|
||
| klog.Info("Starting master server for client connections.") | ||
| masterStop, err := p.runMasterServer(ctx, o, server) | ||
|
|
@@ -274,7 +337,6 @@ func (p *Proxy) run(o *ProxyRunOptions) error { | |
| if masterStop != nil { | ||
| masterStop() | ||
| } | ||
| cancel() | ||
|
|
||
| return nil | ||
| } | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -20,16 +20,42 @@ KUBIA_IP=$(kubectl get svc kubia -o=jsonpath='{.spec.clusterIP}') | |
| PROXY_IMAGE=$(docker images | grep "proxy-server-" -m1 | awk '{print $1}') | ||
| AGENT_IMAGE=$(docker images | grep "proxy-agent-" -m1 | awk '{print $1}') | ||
| TEST_CLIENT_IMAGE=$(docker images | grep "proxy-test-client-" -m1 | awk '{print $1}') | ||
| SERVER_TOKEN=$(./examples/kubernetes/token_generation.sh 32) | ||
| CLUSTER_CERT=<yourdirectory/server.crt> | ||
| CLUSTER_KEY=</yourdirectory/server.key> | ||
| ``` | ||
|
|
||
| #### GKE specific configuration | ||
| #### GCE sample configuration | ||
| ```bash | ||
| CLUSTER_CERT=/etc/srv/kubernetes/pki/apiserver.crt | ||
| CLUSTER_KEY=/etc/srv/kubernetes/pki/apiserver.key | ||
| ``` | ||
|
|
||
| # Register SERVER_TOKEN in [static-token-file](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#static-token-file) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is "static-token" the standard way to authenticate a process running in the master node? I think we can run the proxy server as a static pod, and then use a service account to authenticate it.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, this pattern used across all other static pods. Ex: https://github.com/kubernetes/kubernetes/blob/c14106ad1234742da80eb8f12ddcbf19dba61284/cluster/gce/gci/configure-helper.sh#L613-L615 |
||
| Append the output of the following line to the [static-token-file](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#static-token-file) and restart **kube-apiserver** on the master | ||
| ```bash | ||
| echo "${SERVER_TOKEN},system:konnectivity-server,uid:system:konnectivity-server" | ||
| ``` | ||
|
|
||
| #### GCE sample configuration | ||
| 1. [static-token-file](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#static-token-file) location is: **/etc/srv/kubernetes/known_tokens.csv** | ||
|
|
||
| 1. Restart kube-apiserver | ||
| ```bash | ||
| K8S_API_PID=$(sudo crictl ps | grep kube-apiserver | awk '{ print $1; }') | ||
| sudo crictl stop ${K8S_API_PID} | ||
| ``` | ||
|
|
||
| # Save following config at /etc/srv/kubernetes/konnectivity-server/kubeconfig on master VM | ||
| ```bash | ||
| SERVER_TOKEN=${SERVER_TOKEN} envsubst < examples/kubernetes/kubeconfig | ||
| ``` | ||
|
|
||
| # Create a clusterrolebinding allowing proxy-server authenticate proxy-client | ||
| ```bash | ||
| kubectl create clusterrolebinding --user system:konnectivity-server --clusterrole system:auth-delegator system:konnectivity-server | ||
| ``` | ||
|
|
||
| # Start **proxy-server** as a [static pod](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/) with following configuration | ||
| ```bash | ||
| TAG=${TAG} PROXY_IMAGE=${PROXY_IMAGE} CLUSTER_CERT=${CLUSTER_CERT} CLUSTER_KEY=${CLUSTER_KEY} envsubst < examples/kubernetes/konnectivity-server.yaml | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,8 +1,14 @@ | ||
| apiVersion: v1 | ||
| kind: ServiceAccount | ||
| metadata: | ||
| name: konnectivity-agent | ||
| namespace: kube-system | ||
| --- | ||
| apiVersion: v1 | ||
| kind: Pod | ||
| metadata: | ||
| name: konnectivity-agent | ||
| namespace: default | ||
| namespace: kube-system | ||
| annotations: | ||
| scheduler.alpha.kubernetes.io/critical-pod: '' | ||
| seccomp.security.alpha.kubernetes.io/pod: 'docker/default' | ||
|
|
@@ -20,6 +26,7 @@ spec: | |
| "--ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", | ||
| "--proxy-server-host=${CLUSTER_IP}", | ||
| "--proxy-server-port=8091", | ||
| "--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token", | ||
dberkov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| ] | ||
| livenessProbe: | ||
| httpGet: | ||
|
|
@@ -33,3 +40,14 @@ spec: | |
| limits: | ||
| cpu: 50m | ||
| memory: 30Mi | ||
| volumeMounts: | ||
| - mountPath: /var/run/secrets/tokens | ||
| name: konnectivity-agent-token | ||
| serviceAccountName: konnectivity-agent | ||
| volumes: | ||
| - name: konnectivity-agent-token | ||
| projected: | ||
| sources: | ||
| - serviceAccountToken: | ||
| path: konnectivity-agent-token | ||
| audience: system:konnectivity-server | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess this "audience" value gets encoded into the token?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. KubernetesClient.AuthenticationV1().TokenReviews() gets the audience is the parameters, so k8s API validates that token is issued with this audience There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. But konnectivity-server calls TokenReviews, and this is the yaml for the konnectivity-agent. My guess is the token data mounted by the agent will contain the "audience", and apiserver will be able to extract the "audience" out from the token. |
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -19,13 +19,17 @@ spec: | |
| "--log-file=/var/log/konnectivity-server.log", | ||
| "--logtostderr=false", | ||
| "--log-file-max-size=0", | ||
| "--uds-name=/etc/srv/kubernetes/konnectivity/konnectivity-server.socket", | ||
| "--uds-name=/etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket", | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do we need to change this?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We will have kubeconfig file generated in this folder (based on the pattern the folder name supposed to match component name), therefore wanted to combine both files (kubeconfig & uds) to same folder
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Edit: It looks like they're linked. Please ignore my comment. |
||
| "--cluster-cert=/etc/srv/kubernetes/pki/apiserver.crt", | ||
| "--cluster-key=/etc/srv/kubernetes/pki/apiserver.key", | ||
| "--server-port=0", | ||
| "--agent-port=8091", | ||
| "--admin-port=8092", | ||
| "--mode=http-connect" | ||
| "--mode=http-connect", | ||
| "--agent-namespace=kube-system", | ||
| "--agent-service-account=konnectivity-agent", | ||
| "--kubeconfig=/etc/srv/kubernetes/konnectivity-server/kubeconfig", | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Where do we copy this kubeconfig from?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It will be generated by kubernetes host service (aka: GKE in GCP) |
||
| "--authentication-audience=system:konnectivity-server", | ||
| ] | ||
| livenessProbe: | ||
| httpGet: | ||
|
|
@@ -53,7 +57,7 @@ spec: | |
| mountPath: /etc/srv/kubernetes/pki | ||
| readOnly: true | ||
| - name: konnectivity-home | ||
| mountPath: /etc/srv/kubernetes/konnectivity | ||
| mountPath: /etc/srv/kubernetes/konnectivity-server | ||
| volumes: | ||
| - name: varlogkonnectivityserver | ||
| hostPath: | ||
|
|
@@ -64,6 +68,5 @@ spec: | |
| path: /etc/srv/kubernetes/pki | ||
| - name: konnectivity-home | ||
| hostPath: | ||
| path: /etc/srv/kubernetes/konnectivity | ||
| path: /etc/srv/kubernetes/konnectivity-server | ||
| type: DirectoryOrCreate | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Will we ever want different service accounts for different agents? Eg. agent service account per failure domain?