-
Notifications
You must be signed in to change notification settings - Fork 1.8k
/
Copy pathauth.go
217 lines (197 loc) · 8.89 KB
/
auth.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
package proxy
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"net/url"
kubeutils "github.com/gravitational/teleport/lib/kube/utils"
"github.com/gravitational/trace"
"github.com/sirupsen/logrus"
authzapi "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
authztypes "k8s.io/client-go/kubernetes/typed/authorization/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/transport"
// Load kubeconfig auth plugins for gcp and azure.
// Without this, users can't provide a kubeconfig using those.
//
// Note: we don't want to load _all_ plugins. This is a balance between
// support for popular hosting providers and minimizing attack surface.
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
// kubeCreds contain authentication-related fields from kubeconfig.
//
// TODO(awly): make this an interface, one implementation for local k8s cluster
// and another for a remote teleport cluster.
type kubeCreds struct {
// tlsConfig contains (m)TLS configuration.
tlsConfig *tls.Config
// transportConfig contains HTTPS-related configuration.
// Note: use wrapTransport method if working with http.RoundTrippers.
transportConfig *transport.Config
// targetAddr is a kubernetes API address.
targetAddr string
kubeClient *kubernetes.Clientset
}
// ImpersonationPermissionsChecker describes a function that can be used to check
// for the required impersonation permissions on a Kubernetes cluster. Return nil
// to indicate success.
type ImpersonationPermissionsChecker func(ctx context.Context, clusterName string,
sarClient authztypes.SelfSubjectAccessReviewInterface) error
// getKubeCreds fetches the kubernetes API credentials.
//
// There are 2 possible sources of credentials:
// - pod service account credentials: files in hardcoded paths when running
// inside of a k8s pod; this is used when kubeClusterName is set
// - kubeconfig: a file with a set of k8s endpoints and credentials mapped to
// them this is used when kubeconfigPath is set
//
// serviceType changes the loading behavior:
// - LegacyProxyService:
// - if loading from kubeconfig, only "current-context" is returned; the
// returned map key matches tpClusterName
// - if no credentials are loaded, no error is returned
// - permission self-test failures are only logged
// - ProxyService:
// - no credentials are loaded and no error is returned
// - KubeService:
// - if loading from kubeconfig, all contexts are returned
// - if no credentials are loaded, returns an error
// - permission self-test failures cause an error to be returned
func getKubeCreds(ctx context.Context, log logrus.FieldLogger, tpClusterName, kubeClusterName, kubeconfigPath string, serviceType KubeServiceType, checkImpersonation ImpersonationPermissionsChecker) (map[string]*kubeCreds, error) {
log.
WithField("kubeconfigPath", kubeconfigPath).
WithField("kubeClusterName", kubeClusterName).
WithField("serviceType", serviceType).
Debug("Reading Kubernetes creds.")
// Proxy service should never have creds, forwards to kube service
if serviceType == ProxyService {
return map[string]*kubeCreds{}, nil
}
// Load kubeconfig or local pod credentials.
loadAll := serviceType == KubeService
cfg, err := kubeutils.GetKubeConfig(kubeconfigPath, loadAll, kubeClusterName)
if err != nil && !trace.IsNotFound(err) {
return nil, trace.Wrap(err)
}
if trace.IsNotFound(err) || len(cfg.Contexts) == 0 {
switch serviceType {
case KubeService:
return nil, trace.BadParameter("no Kubernetes credentials found; Kubernetes_service requires either a valid kubeconfig_path or to run inside of a Kubernetes pod")
case LegacyProxyService:
log.Debugf("Could not load Kubernetes credentials. This proxy will still handle Kubernetes requests for trusted teleport clusters or Kubernetes nodes in this teleport cluster")
}
return map[string]*kubeCreds{}, nil
}
if serviceType == LegacyProxyService {
// Hack for legacy proxy service - register a k8s cluster named after
// the teleport cluster name to route legacy requests.
//
// Also, remove all other contexts. Multiple kubeconfig entries are
// only supported for kubernetes_service.
if currentContext, ok := cfg.Contexts[cfg.CurrentContext]; ok {
cfg.Contexts = map[string]*rest.Config{
tpClusterName: currentContext,
}
} else {
return nil, trace.BadParameter("no Kubernetes current-context found; Kubernetes proxy service requires either a valid kubeconfig_path with a current-context or to run inside of a Kubernetes pod")
}
}
res := make(map[string]*kubeCreds, len(cfg.Contexts))
// Convert kubeconfig contexts into kubeCreds.
for cluster, clientCfg := range cfg.Contexts {
clusterCreds, err := extractKubeCreds(ctx, cluster, clientCfg, serviceType, kubeconfigPath, log, checkImpersonation)
if err != nil {
log.WithError(err).Warnf("failed to load credentials for cluster %q.", cluster)
continue
}
res[cluster] = clusterCreds
}
return res, nil
}
func extractKubeCreds(ctx context.Context, cluster string, clientCfg *rest.Config, serviceType KubeServiceType, kubeconfigPath string, log logrus.FieldLogger, checkPermissions ImpersonationPermissionsChecker) (*kubeCreds, error) {
log = log.WithField("cluster", cluster)
log.Debug("Checking Kubernetes impersonation permissions.")
client, err := kubernetes.NewForConfig(clientCfg)
if err != nil {
return nil, trace.Wrap(err, "failed to generate Kubernetes client for cluster %q", cluster)
}
// For each loaded cluster, check impersonation permissions. This
// check only logs when permissions are not configured, but does not fail startup.
if err := checkPermissions(ctx, cluster, client.AuthorizationV1().SelfSubjectAccessReviews()); err != nil {
log.WithError(err).Warning("Failed to test the necessary Kubernetes permissions. The target Kubernetes cluster may be down or have misconfigured RBAC. This teleport instance will still handle Kubernetes requests towards this Kubernetes cluster.")
if serviceType != KubeService && kubeconfigPath != "" {
// We used to recommend users to set a dummy kubeconfig on root
// proxies to get kubernetes support working for leaf clusters:
// https://community.goteleport.com/t/enabling-teleport-to-act-as-a-kubernetes-proxy-for-trusted-leaf-clusters/418
//
// Since this is no longer necessary, recommend them to clean up
// via logs.
log.Info("If this is a proxy and you provided a dummy kubeconfig_file, you can remove it from teleport.yaml to get rid of this warning")
}
} else {
log.Debug("Have all necessary Kubernetes impersonation permissions.")
}
targetAddr, err := parseKubeHost(clientCfg.Host)
if err != nil {
return nil, trace.Wrap(err)
}
tlsConfig, err := rest.TLSConfigFor(clientCfg)
if err != nil {
return nil, trace.Wrap(err, "failed to generate TLS config from kubeconfig: %v", err)
}
transportConfig, err := clientCfg.TransportConfig()
if err != nil {
return nil, trace.Wrap(err, "failed to generate transport config from kubeconfig: %v", err)
}
log.Debug("Initialized Kubernetes credentials")
return &kubeCreds{
tlsConfig: tlsConfig,
transportConfig: transportConfig,
targetAddr: targetAddr,
kubeClient: client,
}, nil
}
// parseKubeHost parses and formats kubernetes hostname
// to host:port format, if no port it set,
// it assumes default HTTPS port
func parseKubeHost(host string) (string, error) {
u, err := url.Parse(host)
if err != nil {
return "", trace.Wrap(err, "failed to parse Kubernetes host: %v", err)
}
if _, _, err := net.SplitHostPort(u.Host); err != nil {
// add default HTTPS port
return fmt.Sprintf("%v:443", u.Host), nil
}
return u.Host, nil
}
func (c *kubeCreds) wrapTransport(rt http.RoundTripper) (http.RoundTripper, error) {
if c == nil {
return rt, nil
}
return transport.HTTPWrappersForConfig(c.transportConfig, rt)
}
func checkImpersonationPermissions(ctx context.Context, cluster string, sarClient authztypes.SelfSubjectAccessReviewInterface) error {
for _, resource := range []string{"users", "groups", "serviceaccounts"} {
resp, err := sarClient.Create(ctx, &authzapi.SelfSubjectAccessReview{
Spec: authzapi.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authzapi.ResourceAttributes{
Verb: "impersonate",
Resource: resource,
},
},
}, metav1.CreateOptions{})
if err != nil {
return trace.Wrap(err, "failed to verify impersonation permissions for Kubernetes: %v; this may be due to missing the SelfSubjectAccessReview permission on the ClusterRole used by the proxy; please make sure that proxy has all the necessary permissions: https://goteleport.com/teleport/docs/kubernetes-ssh/#impersonation", err)
}
if !resp.Status.Allowed {
return trace.AccessDenied("proxy can't impersonate Kubernetes %s at the cluster level; please make sure that proxy has all the necessary permissions: https://goteleport.com/teleport/docs/kubernetes-ssh/#impersonation", resource)
}
}
return nil
}