Skip to content

Commit

Permalink
Remove requirement for cluster-admin for tiller in openshift example. (
Browse files Browse the repository at this point in the history
  • Loading branch information
absoludity authored Nov 4, 2019
1 parent 6ed392e commit ff6e26d
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 64 deletions.
4 changes: 3 additions & 1 deletion chart/kubeapps/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -179,14 +179,16 @@ Or:
Error: namespaces "kubeapps" is forbidden: User "system:serviceaccount:kube-system:default" cannot get namespaces in the namespace "kubeapps"
```

This usually is an indication that Tiller was not installed with enough permissions to create the resources by Kubeapps. In order to install Kubeapps, you will need to install Tiller with elevated permissions (e.g. as a cluster-admin). For example:
This usually is an indication that Tiller was not installed with enough permissions to create the resources required by Kubeapps. In order to install Kubeapps, tiller will need to be able to install Custom Resource Definitions cluster-wide, as well as manage app repositories in your kubeapps namespace. The easiest way to enable this in a development environment is install Tiller with elevated permissions (e.g. as a cluster-admin). For example:

```
kubectl -n kube-system create sa tiller
kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller
```

but for a production environment you can assign the specific permissions so that tiller can [manage CRDs on the cluster](https://github.com/kubeapps/kubeapps/blob/master/docs/user/manifests/openshift-tiller-with-crd-rbac.yaml) as well as [create app repositories in your Kubeapps namespace](https://github.com/kubeapps/kubeapps/blob/master/docs/user/manifests/openshift-tiller-with-apprepository-rbac.yaml) (examples are from our in development support for OpenShift).

It is also possible, though less common, that your cluster does not have Role Based Access Control (RBAC) enabled. To check if your cluster has RBAC you can execute:

```console
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,19 @@ objects:
- "kubeapps.com"
resources:
- apprepositories
- apprepositories/finalizers
verbs: ["*"]
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
- roles
- rolebindings
verbs: ["*"]
- apiGroups:
- ""
resources:
- events
verbs: ["create"]
- kind: RoleBinding
apiVersion: v1
metadata:
Expand Down
42 changes: 0 additions & 42 deletions docs/user/manifests/openshift-tiller-with-crd-rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,6 @@ objects:
name: tiller
namespace: ${TILLER_NAMESPACE}

# TODO: Remove cluster-admin binding for tiller and get working with the individual roles below.
- kind: ClusterRoleBinding
apiVersion: v1
metadata:
name: temporary-tiller-cluster-admin
roleRef:
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: ${TILLER_NAMESPACE}

- kind: Role
apiVersion: v1
metadata:
Expand Down Expand Up @@ -72,35 +59,6 @@ objects:
name: tiller
namespace: ${TILLER_NAMESPACE}

# On OpenShift, the developer who 'helm install's a chart needs certain access to pods in the tiller namespace.
- kind: Role
apiVersion: v1
metadata:
name: helm-user
rules:
- apiGroups:
- ""
resources:
- pods
- pods/portforward
verbs:
- get
- list
- create
- kind: RoleBinding
apiVersion: v1
metadata:
name: helm-user
roleRef:
name: helm-user
namespace: ${TILLER_NAMESPACE}
subjects:
- kind: User
name: developer

parameters:
- name: HELM_VERSION
value: v2.6.1
required: true
- name: TILLER_NAMESPACE
required: true
29 changes: 8 additions & 21 deletions script/openshift-cluster.mk
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,23 @@
# use, but does document requirements for OpenShift. The targets assume that you have:
# 1) helm installed
# 2) minishift installed and a cluster started (which automatically updates your KUBECONFIG).
# 3) the `oc` cli setup (ie. you've run `eval $(minishift oc-env)`)
# 3) the `oc` cli setup (ie. you've run `eval $(minishift oc-env)`) and are authed as `system:admin`
TILLER_NAMESPACE ?= tiller
KUBEAPPS_NAMESPACE ?= kubeapps

MONGODB_CHART_VERSION = $(strip $(shell cat chart/kubeapps/requirements.lock | grep version | cut --delimiter=":" -f2))

devel/openshift-tiller-project-created:
@oc login -u developer
oc new-project ${TILLER_NAMESPACE}
touch $@

# TODO: The following three targets create enable tiller to create CRDs
# cluster-wide as well as app repositories in the TILLER_NAMESPACE. They should
# not be specific to OpenShift, but the default for any development
# environment, though the importance of this will vanish with Helm3.
devel/openshift-tiller-with-crd-rbac.yaml: devel/openshift-tiller-project-created
@oc process -f ./docs/user/manifests/openshift-tiller-with-crd-rbac.yaml \
-p TILLER_NAMESPACE="${TILLER_NAMESPACE}" \
-p HELM_VERSION=v2.14.3 \
-o yaml \
> $@

Expand All @@ -27,21 +29,12 @@ devel/openshift-tiller-with-apprepository-rbac.yaml: devel/openshift-tiller-with
-o yaml \
> $@

# Openshift requires you to have a project selected when referencing roles, otherwise the following error results:
# Error from server: invalid origin role binding tiller-apprepositories: attempts to reference
# role in namespace "kubeapps" instead of current namespace "tiller"
# The admin role is required because the following gives tiller a cluster-wide permission (crd-rbac).
openshift-install-tiller: devel/openshift-tiller-with-crd-rbac.yaml devel/openshift-tiller-with-apprepository-rbac.yaml devel/openshift-kubeapps-project-created
@oc login -u system:admin
oc project ${TILLER_NAMESPACE}
oc apply -f devel/openshift-tiller-with-crd-rbac.yaml --wait=true
oc project ${KUBEAPPS_NAMESPACE}
oc apply -f devel/openshift-tiller-with-apprepository-rbac.yaml
kubectl --namespace ${TILLER_NAMESPACE} apply -f devel/openshift-tiller-with-crd-rbac.yaml --wait=true
kubectl --namespace ${KUBEAPPS_NAMESPACE} apply -f devel/openshift-tiller-with-apprepository-rbac.yaml
helm init --tiller-namespace ${TILLER_NAMESPACE} --service-account tiller --wait
oc login -u developer

devel/openshift-kubeapps-project-created: devel/openshift-tiller-project-created
@oc login -u developer
oc new-project ${KUBEAPPS_NAMESPACE}
oc policy add-role-to-user edit "system:serviceaccount:${TILLER_NAMESPACE}:tiller"
touch $@
Expand All @@ -52,16 +45,12 @@ chart/kubeapps/charts/mongodb-${MONGODB_CHART_VERSION}.tgz:
devel/openshift-kubeapps-installed: openshift-install-tiller chart/kubeapps/charts/mongodb-${MONGODB_CHART_VERSION}.tgz
@oc project ${KUBEAPPS_NAMESPACE}
helm --tiller-namespace=${TILLER_NAMESPACE} install ./chart/kubeapps -n ${KUBEAPPS_NAMESPACE} \
--set tillerProxy.host=tiller-deploy.tiller:44134 \
--set tillerProxy.host=tiller-deploy.${TILLER_NAMESPACE}:44134 \
--values ./docs/user/manifests/kubeapps-local-dev-values.yaml

# Due to openshift having multiple secrets for the service account, the code is slightly different from
# that at https://github.com/kubeapps/kubeapps/blob/master/docs/user/getting-started.md#on-linuxmacos
# TODO: update the docs to use the similar bash command.
# TODO: update this target to use a kubeapps user, rather than tiller service account.
# Note kubectl jsonpath support does not yet support regex filtering:
# https://github.com/kubernetes/kubernetes/issues/61406
# hence the separate grep.
openshift-tiller-token:
@kubectl get secret -n "${TILLER_NAMESPACE}" \
$(shell kubectl get serviceaccount -n "${TILLER_NAMESPACE}" tiller -o jsonpath='{range .secrets[*]}{.name}{"\n"}{end}' | grep tiller-token) \
Expand All @@ -70,13 +59,11 @@ openshift-tiller-token:
openshift-kubeapps: devel/openshift-kubeapps-installed

openshift-kubeapps-reset:
@oc login -u system:admin
oc delete project ${KUBEAPPS_NAMESPACE} || true
oc delete project ${TILLER_NAMESPACE} || true
oc delete -f devel/openshift-tiller-with-crd-rbac.yaml || true
oc delete -f devel/openshift-tiller-with-apprepository-rbac.yaml || true
oc delete customresourcedefinition apprepositories.kubeapps.com || true
rm devel/openshift-* || true
oc login -u developer

.PHONY: openshift-install-tiller openshift-kubeapps openshift-kubeapps-reset

0 comments on commit ff6e26d

Please sign in to comment.