diff --git a/.gitignore b/.gitignore index e0deadde8f..c77eed5da5 100644 --- a/.gitignore +++ b/.gitignore @@ -5,5 +5,4 @@ bin .idea/ .vscode/ .vagrant/ -Vagrantfile sshfile diff --git a/Makefile b/Makefile index 531321d8cf..6746acd5ec 100644 --- a/Makefile +++ b/Makefile @@ -65,7 +65,8 @@ debug: @echo SOURCE_GIT_TAG:"$(SOURCE_GIT_TAG)" # These tags make sure we can statically link and avoid shared dependencies -GO_BUILD_FLAGS :=-tags 'include_gcs include_oss containers_image_openpgp gssapi providerless netgo osusergo' +# netcgo : use system resolver for DNS instead of the netgo implementation which does not include mDNS +GO_BUILD_FLAGS :=-tags 'include_gcs include_oss containers_image_openpgp gssapi providerless netcgo osusergo' # targets "all:" and "build:" defined in vendor/github.com/openshift/build-machinery-go/make/targets/golang/build.mk microshift: build-containerized-cross-build-linux-amd64 diff --git a/assets/rbac/0000_10_bootstrap-crb-approver.yaml b/assets/rbac/0000_10_bootstrap-crb-approver.yaml new file mode 100644 index 0000000000..a211b14520 --- /dev/null +++ b/assets/rbac/0000_10_bootstrap-crb-approver.yaml @@ -0,0 +1,14 @@ +# Approve all CSRs for the group "system:bootstrappers" +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: auto-approve-csrs-for-group +subjects: +- kind: Group + name: system:bootstrappers + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:certificates.k8s.io:certificatesigningrequests:nodeclient + apiGroup: rbac.authorization.k8s.io + \ No newline at end of file diff --git a/assets/rbac/0000_10_bootstrap-crb-creator.yaml b/assets/rbac/0000_10_bootstrap-crb-creator.yaml new file mode 100644 index 0000000000..f112240227 --- /dev/null +++ b/assets/rbac/0000_10_bootstrap-crb-creator.yaml @@ -0,0 +1,14 @@ +# enable bootstrapping nodes to create CSR +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: create-csrs-for-bootstrapping +subjects: +- kind: Group + name: system:bootstrappers + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:node-bootstrapper + apiGroup: rbac.authorization.k8s.io + \ No newline at end of file diff --git a/assets/rbac/0000_10_bootstrap-crb-renewal.yaml b/assets/rbac/0000_10_bootstrap-crb-renewal.yaml new file mode 100644 index 0000000000..2bf9d41e87 --- /dev/null +++ b/assets/rbac/0000_10_bootstrap-crb-renewal.yaml @@ -0,0 +1,13 @@ +# approve renewal CSRs for the group "system:nodes" +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: auto-approve-renewals-for-nodes +subjects: +- kind: Group + name: system:nodes + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient + apiGroup: rbac.authorization.k8s.io diff --git a/pkg/assets/rbac/bindata.go b/pkg/assets/rbac/bindata.go index 6f5979161a..27f5629079 100644 --- a/pkg/assets/rbac/bindata.go +++ b/pkg/assets/rbac/bindata.go @@ -3,6 +3,9 @@ // assets/rbac/0000_00_flannel-clusterrole.yaml // assets/rbac/0000_00_flannel-clusterrolebinding.yaml // assets/rbac/0000_00_podsecuritypolicy-flannel.yaml +// assets/rbac/0000_10_bootstrap-crb-approver.yaml +// assets/rbac/0000_10_bootstrap-crb-creator.yaml +// assets/rbac/0000_10_bootstrap-crb-renewal.yaml // assets/rbac/0000_60_service-ca_00_clusterrole.yaml // assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml // assets/rbac/0000_60_service-ca_00_role.yaml @@ -200,6 +203,96 @@ func assetsRbac0000_00_podsecuritypolicyFlannelYaml() (*asset, error) { return a, nil } +var _assetsRbac0000_10_bootstrapCrbApproverYaml = []byte(`# Approve all CSRs for the group "system:bootstrappers" +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: auto-approve-csrs-for-group +subjects: +- kind: Group + name: system:bootstrappers + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:certificates.k8s.io:certificatesigningrequests:nodeclient + apiGroup: rbac.authorization.k8s.io + `) + +func assetsRbac0000_10_bootstrapCrbApproverYamlBytes() ([]byte, error) { + return _assetsRbac0000_10_bootstrapCrbApproverYaml, nil +} + +func assetsRbac0000_10_bootstrapCrbApproverYaml() (*asset, error) { + bytes, err := assetsRbac0000_10_bootstrapCrbApproverYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/rbac/0000_10_bootstrap-crb-approver.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsRbac0000_10_bootstrapCrbCreatorYaml = []byte(`# enable bootstrapping nodes to create CSR +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: create-csrs-for-bootstrapping +subjects: +- kind: Group + name: system:bootstrappers + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:node-bootstrapper + apiGroup: rbac.authorization.k8s.io + `) + +func assetsRbac0000_10_bootstrapCrbCreatorYamlBytes() ([]byte, error) { + return _assetsRbac0000_10_bootstrapCrbCreatorYaml, nil +} + +func assetsRbac0000_10_bootstrapCrbCreatorYaml() (*asset, error) { + bytes, err := assetsRbac0000_10_bootstrapCrbCreatorYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/rbac/0000_10_bootstrap-crb-creator.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _assetsRbac0000_10_bootstrapCrbRenewalYaml = []byte(`# approve renewal CSRs for the group "system:nodes" +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: auto-approve-renewals-for-nodes +subjects: +- kind: Group + name: system:nodes + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient + apiGroup: rbac.authorization.k8s.io +`) + +func assetsRbac0000_10_bootstrapCrbRenewalYamlBytes() ([]byte, error) { + return _assetsRbac0000_10_bootstrapCrbRenewalYaml, nil +} + +func assetsRbac0000_10_bootstrapCrbRenewalYaml() (*asset, error) { + bytes, err := assetsRbac0000_10_bootstrapCrbRenewalYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "assets/rbac/0000_10_bootstrap-crb-renewal.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _assetsRbac0000_60_serviceCa_00_clusterroleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -715,6 +808,9 @@ var _bindata = map[string]func() (*asset, error){ "assets/rbac/0000_00_flannel-clusterrole.yaml": assetsRbac0000_00_flannelClusterroleYaml, "assets/rbac/0000_00_flannel-clusterrolebinding.yaml": assetsRbac0000_00_flannelClusterrolebindingYaml, "assets/rbac/0000_00_podsecuritypolicy-flannel.yaml": assetsRbac0000_00_podsecuritypolicyFlannelYaml, + "assets/rbac/0000_10_bootstrap-crb-approver.yaml": assetsRbac0000_10_bootstrapCrbApproverYaml, + "assets/rbac/0000_10_bootstrap-crb-creator.yaml": assetsRbac0000_10_bootstrapCrbCreatorYaml, + "assets/rbac/0000_10_bootstrap-crb-renewal.yaml": assetsRbac0000_10_bootstrapCrbRenewalYaml, "assets/rbac/0000_60_service-ca_00_clusterrole.yaml": assetsRbac0000_60_serviceCa_00_clusterroleYaml, "assets/rbac/0000_60_service-ca_00_clusterrolebinding.yaml": assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml, "assets/rbac/0000_60_service-ca_00_role.yaml": assetsRbac0000_60_serviceCa_00_roleYaml, @@ -773,6 +869,9 @@ var _bintree = &bintree{nil, map[string]*bintree{ "0000_00_flannel-clusterrole.yaml": {assetsRbac0000_00_flannelClusterroleYaml, map[string]*bintree{}}, "0000_00_flannel-clusterrolebinding.yaml": {assetsRbac0000_00_flannelClusterrolebindingYaml, map[string]*bintree{}}, "0000_00_podsecuritypolicy-flannel.yaml": {assetsRbac0000_00_podsecuritypolicyFlannelYaml, map[string]*bintree{}}, + "0000_10_bootstrap-crb-approver.yaml": {assetsRbac0000_10_bootstrapCrbApproverYaml, map[string]*bintree{}}, + "0000_10_bootstrap-crb-creator.yaml": {assetsRbac0000_10_bootstrapCrbCreatorYaml, map[string]*bintree{}}, + "0000_10_bootstrap-crb-renewal.yaml": {assetsRbac0000_10_bootstrapCrbRenewalYaml, map[string]*bintree{}}, "0000_60_service-ca_00_clusterrole.yaml": {assetsRbac0000_60_serviceCa_00_clusterroleYaml, map[string]*bintree{}}, "0000_60_service-ca_00_clusterrolebinding.yaml": {assetsRbac0000_60_serviceCa_00_clusterrolebindingYaml, map[string]*bintree{}}, "0000_60_service-ca_00_role.yaml": {assetsRbac0000_60_serviceCa_00_roleYaml, map[string]*bintree{}}, diff --git a/pkg/bootstrap/bootstrap.go b/pkg/bootstrap/bootstrap.go new file mode 100644 index 0000000000..604d9c94b4 --- /dev/null +++ b/pkg/bootstrap/bootstrap.go @@ -0,0 +1,24 @@ +package bootstrap + +import ( + "github.com/openshift/microshift/pkg/assets" + "github.com/openshift/microshift/pkg/config" + "github.com/sirupsen/logrus" +) + +func ApplyBootstrapClusterRoleBindings(cfg *config.MicroshiftConfig, kubeconfigPath string) error { + var ( + clusterRoleBinding = []string{ + "assets/rbac/0000_10_bootstrap-crb-creator.yaml", + "assets/rbac/0000_10_bootstrap-crb-approver.yaml", + "assets/rbac/0000_10_bootstrap-crb-renewal.yaml", + } + ) + + if err := assets.ApplyClusterRoleBindings(clusterRoleBinding, kubeconfigPath); err != nil { + logrus.Warningf("failed to apply clusterRolebinding %v: %v", clusterRoleBinding, err) + return err + } + + return nil +} diff --git a/pkg/bootstrap/token-manager.go b/pkg/bootstrap/token-manager.go new file mode 100644 index 0000000000..30acb44081 --- /dev/null +++ b/pkg/bootstrap/token-manager.go @@ -0,0 +1,40 @@ +package bootstrap + +import ( + "context" + "os" + "path/filepath" + + "github.com/openshift/microshift/pkg/config" + "github.com/openshift/microshift/pkg/util" +) + +type TokenManager struct { + path string + cfg *config.MicroshiftConfig +} + +func NewTokenManager(cfg *config.MicroshiftConfig) *TokenManager { + return &TokenManager{ + path: filepath.Join(cfg.DataDir, "resources", "microshift-bootstrap-token"), + cfg: cfg, + } +} + +func (s *TokenManager) Name() string { return "token-manager" } +func (s *TokenManager) Dependencies() []string { return []string{} } + +func (s *TokenManager) Run(ctx context.Context, ready chan<- struct{}, stopped chan<- struct{}) error { + defer close(stopped) + defer close(ready) + + CreateTokenFile(s.path) + _, err := os.Stat(s.cfg.DataDir + "/resources/kubelet/bootstrap-kubeconfig") + if os.IsNotExist(err) { + if err := util.BootstrapKubeconfig(GetToken(s.path), s.cfg.DataDir+"/resources/kubelet/bootstrap-kubeconfig", "system:bootstrappers", []string{"system:bootstrappers"}, s.cfg.Cluster.URL); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/bootstrap/token.go b/pkg/bootstrap/token.go new file mode 100644 index 0000000000..c3957e2111 --- /dev/null +++ b/pkg/bootstrap/token.go @@ -0,0 +1,47 @@ +package bootstrap + +import ( + "encoding/hex" + "math/rand" + "os" + "strings" + "time" + + "k8s.io/klog/v2" +) + +func CreateTokenFile(path string) { + err := os.Remove(path) + if err != nil { + klog.ErrorS(err, "Token file does not exist") + } + + token := randString(16) + token = token + ",kubelet-bootstrap,10001,\"system:bootstrappers\"" + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + klog.ErrorS(err, "Token file cannot be created") + } + defer f.Close() + f.Write([]byte(token)) + +} + +func GetToken(path string) string { + f, err := os.ReadFile(path) + if err != nil { + klog.ErrorS(err, "Token file cannot be opened") + } + token := f[:strings.IndexByte(string(f), ',')] + + return string(token) +} + +func randString(length int) string { + b := make([]byte, length) + rand.Seed(time.Now().UnixNano()) + if _, err := rand.Read(b); err != nil { + return "" + } + return hex.EncodeToString(b) +} diff --git a/pkg/cmd/run.go b/pkg/cmd/run.go index 8646bb1ce3..2579f4f1bb 100644 --- a/pkg/cmd/run.go +++ b/pkg/cmd/run.go @@ -10,6 +10,7 @@ import ( "time" "github.com/coreos/go-systemd/daemon" + "github.com/openshift/microshift/pkg/bootstrap" "github.com/openshift/microshift/pkg/config" "github.com/openshift/microshift/pkg/controllers" "github.com/openshift/microshift/pkg/kustomize" @@ -63,11 +64,14 @@ func RunMicroshift(cfg *config.MicroshiftConfig, flags *pflag.FlagSet) error { // TODO: change to only initialize what is strictly necessary for the selected role(s) if _, err := os.Stat(filepath.Join(cfg.DataDir, "certs")); errors.Is(err, os.ErrNotExist) { - initAll(cfg) + if config.StringInList("controlplane", cfg.Roles) { + initAll(cfg) + } } m := servicemanager.NewServiceManager() if config.StringInList("controlplane", cfg.Roles) { + util.Must(m.AddService(bootstrap.NewTokenManager(cfg))) util.Must(m.AddService(controllers.NewEtcd(cfg))) util.Must(m.AddService(controllers.NewKubeAPIServer(cfg))) util.Must(m.AddService(controllers.NewKubeScheduler(cfg))) diff --git a/pkg/components/components.go b/pkg/components/components.go index 5bd937fd20..6fd080307b 100755 --- a/pkg/components/components.go +++ b/pkg/components/components.go @@ -1,6 +1,7 @@ package components import ( + "github.com/openshift/microshift/pkg/bootstrap" "github.com/openshift/microshift/pkg/config" "github.com/sirupsen/logrus" ) @@ -28,5 +29,9 @@ func StartComponents(cfg *config.MicroshiftConfig) error { logrus.Warningf("failed to start Flannel: %v", err) return err } + if err := bootstrap.ApplyBootstrapClusterRoleBindings(cfg, cfg.DataDir+"/resources/kubeadmin/kubeconfig"); err != nil { + logrus.Warningf("failed to start Flannel: %v", err) + return err + } return nil } diff --git a/pkg/config/config.go b/pkg/config/config.go index ac845d3f4f..98fbf7cc91 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -9,7 +9,7 @@ import ( "strconv" "github.com/kelseyhightower/envconfig" - homedir "github.com/mitchellh/go-homedir" + "github.com/mitchellh/go-homedir" "github.com/openshift/microshift/pkg/util" "github.com/sirupsen/logrus" "github.com/spf13/pflag" @@ -86,7 +86,7 @@ func NewMicroshiftConfig() *MicroshiftConfig { NodeName: nodeName, NodeIP: nodeIP, Cluster: ClusterConfig{ - URL: "https://127.0.0.1:6443", + URL: util.NodeURL(nodeIP, nodeName), ClusterCIDR: "10.42.0.0/16", ServiceCIDR: "10.43.0.0/16", DNS: "10.43.0.10", diff --git a/pkg/controllers/kube-apiserver.go b/pkg/controllers/kube-apiserver.go index 35d5e23df8..656017c161 100644 --- a/pkg/controllers/kube-apiserver.go +++ b/pkg/controllers/kube-apiserver.go @@ -55,7 +55,7 @@ func NewKubeAPIServer(cfg *config.MicroshiftConfig) *KubeAPIServer { } func (s *KubeAPIServer) Name() string { return "kube-apiserver" } -func (s *KubeAPIServer) Dependencies() []string { return []string{"etcd"} } +func (s *KubeAPIServer) Dependencies() []string { return []string{"token-manager", "etcd"} } func (s *KubeAPIServer) configure(cfg *config.MicroshiftConfig) { caCertFile := filepath.Join(cfg.DataDir, "certs", "ca-bundle", "ca-bundle.crt") @@ -92,6 +92,7 @@ func (s *KubeAPIServer) configure(cfg *config.MicroshiftConfig) { "--client-ca-file=" + caCertFile, "--enable-admission-plugins=NodeRestriction", "--enable-aggregator-routing=true", + "--enable-bootstrap-token-auth", "--etcd-cafile=" + caCertFile, "--etcd-certfile=" + cfg.DataDir + "/resources/kube-apiserver/secrets/etcd-client/tls.crt", "--etcd-keyfile=" + cfg.DataDir + "/resources/kube-apiserver/secrets/etcd-client/tls.key", @@ -114,6 +115,7 @@ func (s *KubeAPIServer) configure(cfg *config.MicroshiftConfig) { "--storage-backend=etcd3", "--tls-cert-file=" + cfg.DataDir + "/certs/kube-apiserver/secrets/service-network-serving-certkey/tls.crt", "--tls-private-key-file=" + cfg.DataDir + "/certs/kube-apiserver/secrets/service-network-serving-certkey/tls.key", + "--token-auth-file=" + cfg.DataDir + "/resources/microshift-bootstrap-token", "--cors-allowed-origins=/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$", "--logtostderr=" + strconv.FormatBool(cfg.LogDir == "" || cfg.LogAlsotostderr), "--alsologtostderr=" + strconv.FormatBool(cfg.LogAlsotostderr), diff --git a/pkg/controllers/kube-controller-manager.go b/pkg/controllers/kube-controller-manager.go index 554ce4bc2d..9dc33b2785 100644 --- a/pkg/controllers/kube-controller-manager.go +++ b/pkg/controllers/kube-controller-manager.go @@ -62,6 +62,7 @@ func (s *KubeControllerManager) configure(cfg *config.MicroshiftConfig) { "--service-account-private-key-file=" + cfg.DataDir + "/resources/kube-apiserver/secrets/service-account-key/service-account.key", "--allocate-node-cidrs=true", "--cluster-cidr=" + cfg.Cluster.ClusterCIDR, + "--controllers=*,tokencleaner,bootstrapsigner", "--authorization-kubeconfig=" + kubeconfig, "--authentication-kubeconfig=" + kubeconfig, "--root-ca-file=" + caCertFile, diff --git a/pkg/node/kube-proxy.go b/pkg/node/kube-proxy.go index 634834c331..65b11a571a 100644 --- a/pkg/node/kube-proxy.go +++ b/pkg/node/kube-proxy.go @@ -47,7 +47,7 @@ func NewKubeProxyServer(cfg *config.MicroshiftConfig) *ProxyOptions { } func (s *ProxyOptions) Name() string { return componentKubeProxy } -func (s *ProxyOptions) Dependencies() []string { return []string{"kube-apiserver"} } +func (s *ProxyOptions) Dependencies() []string { return []string{"kubelet"} } func (s *ProxyOptions) configure(cfg *config.MicroshiftConfig) error { if err := s.writeConfig(cfg); err != nil { @@ -86,17 +86,20 @@ func (s *ProxyOptions) writeConfig(cfg *config.MicroshiftConfig) error { data := []byte(` apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration -clientConnection: - kubeconfig: ` + cfg.DataDir + `/resources/kube-proxy/kubeconfig -hostnameOverride: ` + cfg.NodeName + ` -clusterCIDR: ` + cfg.Cluster.ClusterCIDR + ` +clientConnection:`) + + data = append(data, "\n"+` kubeconfig: `+cfg.DataDir+"/resources/kubelet/kubeconfig"...) + + data = append(data, ` +hostnameOverride: `+cfg.NodeName+` +clusterCIDR: `+cfg.Cluster.ClusterCIDR+` mode: "iptables" iptables: masqueradeAll: true conntrack: maxPerCore: 0 featureGates: - AllAlpha: false`) + AllAlpha: false`...) path := filepath.Join(cfg.DataDir, "resources", "kube-proxy", "config", "config.yaml") os.MkdirAll(filepath.Dir(path), os.FileMode(0755)) @@ -115,6 +118,7 @@ func (s *ProxyOptions) Run(ctx context.Context, ready chan<- struct{}, stopped c logrus.Infof("%s is ready", s.Name()) close(ready) }() + if err := s.options.Run(); err != nil { logrus.Fatalf("%s failed to start %v", s.Name(), err) } diff --git a/pkg/node/kubelet.go b/pkg/node/kubelet.go index 262f184ca1..c6632233d5 100644 --- a/pkg/node/kubelet.go +++ b/pkg/node/kubelet.go @@ -24,13 +24,12 @@ import ( "path/filepath" "strconv" + "github.com/openshift/microshift/pkg/config" + "github.com/openshift/microshift/pkg/util" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/openshift/microshift/pkg/config" - "github.com/openshift/microshift/pkg/util" - utilfeature "k8s.io/apiserver/pkg/util/feature" cliflag "k8s.io/component-base/cli/flag" @@ -59,26 +58,36 @@ func NewKubeletServer(cfg *config.MicroshiftConfig) *KubeletServer { } func (s *KubeletServer) Name() string { return componentKubelet } -func (s *KubeletServer) Dependencies() []string { return []string{"kube-apiserver"} } +func (s *KubeletServer) Dependencies() []string { return []string{} } func (s *KubeletServer) configure(cfg *config.MicroshiftConfig) error { if err := s.writeConfig(cfg); err != nil { logrus.Fatalf("Failed to write kubelet config: %v", err) } + kubeconfigFile := cfg.DataDir + "/resources/kubelet/kubeconfig" + kubeBootstrapFile := cfg.DataDir + "/resources/kubelet/bootstrap-kubeconfig" // Prepare commandline args args := []string{ - "--bootstrap-kubeconfig=" + cfg.DataDir + "/resources/kubelet/kubeconfig", - "--kubeconfig=" + cfg.DataDir + "/resources/kubelet/kubeconfig", + "--kubeconfig=" + kubeconfigFile, "--logtostderr=" + strconv.FormatBool(cfg.LogDir == "" || cfg.LogAlsotostderr), "--alsologtostderr=" + strconv.FormatBool(cfg.LogAlsotostderr), "--v=" + strconv.Itoa(cfg.LogVLevel), "--vmodule=" + cfg.LogVModule, } + + if _, err := os.Stat(kubeBootstrapFile); err == nil { + args = append(args, "--bootstrap-kubeconfig="+kubeBootstrapFile) + } + if cfg.LogDir != "" { args = append(args, "--log-file="+filepath.Join(cfg.LogDir, "kubelet.log")) } + + if len(cfg.Roles) == 1 && cfg.Roles[0] == "node" { + args = append(args, "--cert-dir="+cfg.DataDir+"/resources/kubelet/secrets/kubelet-client/") + } cleanFlagSet := pflag.NewFlagSet(componentKubelet, pflag.ContinueOnError) cleanFlagSet.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) @@ -118,23 +127,24 @@ func (s *KubeletServer) configure(cfg *config.MicroshiftConfig) error { } func (s *KubeletServer) writeConfig(cfg *config.MicroshiftConfig) error { - data := []byte(` -kind: KubeletConfiguration -apiVersion: kubelet.config.k8s.io/v1beta1 -authentication: + data := []byte(`kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1`) + if config.StringInList("controlplane", cfg.Roles) { + data = append(data, "\n"+`authentication: x509: - clientCAFile: ` + cfg.DataDir + `/certs/ca-bundle/ca-bundle.crt + clientCAFile: `+cfg.DataDir+`/certs/ca-bundle/ca-bundle.crt anonymous: enabled: false -tlsCertFile: ` + cfg.DataDir + `/resources/kubelet/secrets/kubelet-client/tls.crt -tlsPrivateKeyFile: ` + cfg.DataDir + `/resources/kubelet/secrets/kubelet-client/tls.key -cgroupDriver: "systemd" +tlsCertFile: `+cfg.DataDir+`/resources/kubelet/secrets/kubelet-client/tls.crt +tlsPrivateKeyFile: `+cfg.DataDir+`/resources/kubelet/secrets/kubelet-client/tls.key`...) + } + data = append(data, "\n"+`cgroupDriver: "systemd" cgroupRoot: / failSwapOn: false -volumePluginDir: ` + cfg.DataDir + `/kubelet-plugins/volume/exec +volumePluginDir: `+cfg.DataDir+`/kubelet-plugins/volume/exec clusterDNS: - - ` + cfg.Cluster.DNS + ` -clusterDomain: ` + cfg.Cluster.Domain + ` + - `+cfg.Cluster.DNS+` +clusterDomain: `+cfg.Cluster.Domain+` containerLogMaxSize: 50Mi maxPods: 250 kubeAPIQPS: 50 @@ -151,11 +161,11 @@ featureGates: # Will be removed in future openshift/api update https://github.com/openshift/api/commit/c8c8f6d0f4a8ac4ff4ad7d1a84b27e1aa7ebf9b4 RemoveSelfLink: false NodeDisruptionExclusion: true - RotateKubeletServerCertificate: false #TODO + RotateKubeletServerCertificate: true SCTPSupport: true ServiceNodeExclusion: true SupportPodPidsLimit: true -serverTLSBootstrap: false #TODO`) +serverTLSBootstrap: true`...) // Load real resolv.conf in case systemd-resolved is used // https://github.com/coredns/coredns/blob/master/plugin/loop/README.md#troubleshooting-loops-in-kubernetes-clusters diff --git a/pkg/servicemanager/manager.go b/pkg/servicemanager/manager.go index 8be70f2452..d77dbd592b 100644 --- a/pkg/servicemanager/manager.go +++ b/pkg/servicemanager/manager.go @@ -40,7 +40,7 @@ func (m *ServiceManager) AddService(s Service) error { // i.e. they'll always remain topology sorted. Should we want to relax this // constraint later, we can add topo sorting in the Run() step. if _, exists := m.serviceMap[dependency]; !exists { - return fmt.Errorf("dependecy '%s' of service '%s' not yet defined", dependency, s.Name()) + return fmt.Errorf("dependency '%s' of service '%s' not yet defined", dependency, s.Name()) } } diff --git a/pkg/servicemanager/manager_test.go b/pkg/servicemanager/manager_test.go index 12247a4416..4c3f90319f 100644 --- a/pkg/servicemanager/manager_test.go +++ b/pkg/servicemanager/manager_test.go @@ -33,7 +33,7 @@ func TestAddService(t *testing.T) { serviceTest{service: NewGenericService("foo", nil, nil), out: errors.New("service 'foo' added more than once")}, }, { - serviceTest{service: NewGenericService("bar", []string{"foo"}, nil), out: errors.New("dependecy 'foo' of service 'bar' not yet defined")}, + serviceTest{service: NewGenericService("bar", []string{"foo"}, nil), out: errors.New("dependency 'foo' of service 'bar' not yet defined")}, serviceTest{service: NewGenericService("foo", nil, nil), out: nil}, }, } diff --git a/pkg/util/kubeconfig.go b/pkg/util/kubeconfig.go index 3d54b2f3c3..7ceb87d734 100644 --- a/pkg/util/kubeconfig.go +++ b/pkg/util/kubeconfig.go @@ -73,3 +73,55 @@ users: return kubeconfigTemplate.Execute(output, &data) } + +func BootstrapKubeconfig(token, path, common string, svcName []string, clusterURL string) error { + bootstrapKubeconfigTemplate := template.Must(template.New("bootstrap-kubeconfig").Parse(` +apiVersion: v1 +kind: Config +current-context: microshift +preferences: {} +contexts: +- context: + cluster: microshift + user: kubelet-bootstrap + name: microshift +clusters: +- cluster: + server: {{.ClusterURL}} + certificate-authority-data: {{.ClusterCA}} + name: microshift +users: +- name: kubelet-bootstrap + user: + token: {{.Token}} +`)) + certBuff, keyBuff, err := GenCertsBuff(common, svcName) + if err != nil { + return err + } + clusterCA := Base64(CertToPem(GetRootCA())) + clientCert := Base64(certBuff) + clientKey := Base64(keyBuff) + data := struct { + ClusterURL string + ClusterCA string + ClientCert string + ClientKey string + Token string + }{ + ClusterURL: clusterURL, + ClusterCA: clusterCA, + ClientCert: clientCert, + ClientKey: clientKey, + Token: token, + } + os.MkdirAll(filepath.Dir(path), os.FileMode(0755)) + + output, err := os.Create(path) + if err != nil { + return err + } + defer output.Close() + + return bootstrapKubeconfigTemplate.Execute(output, &data) +} diff --git a/pkg/util/net.go b/pkg/util/net.go index a0600b4ae8..b0d3cac539 100644 --- a/pkg/util/net.go +++ b/pkg/util/net.go @@ -20,8 +20,10 @@ import ( tcpnet "net" "net/http" "strconv" + "strings" "time" + "github.com/openshift/microshift/pkg/mdns/server" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" @@ -81,3 +83,12 @@ func CreateLocalhostListenerOnPort(port int) (tcpnet.Listener, error) { return ln, nil } + +func NodeURL(nodeIP, nodeName string) string { + host := nodeIP + // TODO: decide what to do outside of mDNS and IP context (IPs can change..) + if strings.HasSuffix(nodeName, server.DefaultmDNSTLD) { + host = nodeName + } + return "https://" + host + ":6443" +} diff --git a/vagrant/multi-worker/.gitignore b/vagrant/multi-worker/.gitignore new file mode 100644 index 0000000000..2991a6cb86 --- /dev/null +++ b/vagrant/multi-worker/.gitignore @@ -0,0 +1,3 @@ +.token_installed +microshift +kubeconfig diff --git a/vagrant/multi-worker/Makefile b/vagrant/multi-worker/Makefile new file mode 100644 index 0000000000..58bfbf85b0 --- /dev/null +++ b/vagrant/multi-worker/Makefile @@ -0,0 +1,81 @@ +# Export shell defined to support Ubuntu +export SHELL := $(shell which bash) + +PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) + +microshift: + make -C ../.. DEBUG=true + cp ../../microshift $(PROJECT_DIR) +.PHONY: microshift + +.vagrant: + vagrant up + +up: microshift .vagrant +.PHONY: up + +down: + vagrant destroy -f + rm .token_installed +.PHONY: down + +install-worker-token: .vagrant + vagrant ssh worker -- sudo mkdir -p /var/lib/microshift/resources/kubelet + vagrant ssh master -- sudo cat /var/lib/microshift/resources/kubelet/bootstrap-kubeconfig | vagrant ssh worker -- sudo tee /var/lib/microshift/resources/kubelet/bootstrap-kubeconfig + +.PHONY: install-worker-token + +.token_installed: install-worker-token + touch .token_installed + +start-worker: .token_installed + # we need a separate service definition + vagrant ssh worker -- sudo sed -i \'s/run/run --roles=node/g\' /usr/lib/systemd/system/microshift.service + vagrant ssh worker -- sudo systemctl enable --now microshift.service + +restart-worker: + vagrant ssh worker -- sudo systemctl restart microshift.service + +restart-master: + vagrant ssh master -- sudo systemctl restart microshift.service + +.PHONY: start-worker restart-worker restart-master + +update-worker: microshift .vagrant + vagrant upload microshift /vagrant/microshift worker + vagrant ssh worker -- sudo rm /usr/bin/microshift + vagrant ssh worker -- sudo mv /vagrant/microshift /usr/bin + vagrant ssh worker -- sudo restorecon -F -v /usr/bin/microshift + vagrant ssh worker -- sudo systemctl kill microshift || true + vagrant ssh worker -- sudo systemctl kill microshift || true # fast and dirty... just the worker ;) +.PHONY: update-worker + +update-master: microshift .vagrant + vagrant upload microshift /vagrant/microshift master + vagrant ssh master -- sudo rm /usr/bin/microshift + vagrant ssh master -- sudo mv /vagrant/microshift /usr/bin + vagrant ssh master -- sudo restorecon -F -v /usr/bin/microshift + vagrant ssh master -- sudo systemctl kill microshift || true + vagrant ssh master -- sudo systemctl kill microshift || true # fast and dirty.. just the master? ':D +.PHONY: update-master + +kubeconfig: .vagrant + vagrant ssh master -- sudo cat /var/lib/microshift/resources/kubeadmin/kubeconfig > kubeconfig + +worker-logs-follow: .vagrant + vagrant ssh worker -- sudo journalctl -o cat -f -u microshift.service + +master-logs-follow: .vagrant + vagrant ssh master -- sudo journalctl -o cat -f -u microshift.service + +worker-logs: .vagrant + vagrant ssh worker -- sudo journalctl -o cat -u microshift.service + +master-logs: .vagrant + vagrant ssh master -- sudo journalctl -o cat -u microshift.service + +master-dlv: .vagrant + vagrant ssh master -- /vagrant/runDlv.sh + +.PHONY: worker-logs worker-logs-follow master-logs master-logs-follow + diff --git a/vagrant/multi-worker/Vagrantfile b/vagrant/multi-worker/Vagrantfile new file mode 100644 index 0000000000..b8b6213cec --- /dev/null +++ b/vagrant/multi-worker/Vagrantfile @@ -0,0 +1,80 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + + +$script = <<-'SCRIPT' + sudo dnf module -y enable cri-o:1.21 + sudo dnf copr enable -y @redhat-et/microshift + sudo dnf install -y microshift firewalld nss-mdns avahi-tools + sudo systemctl enable crio --now + sudo sed -i 's/mdns4_minimal/mdns/g' /etc/nsswitch.conf + echo .local | sudo tee /etc/mdns.allow + echo .local. | sudo tee -a /etc/mdns.allow + + sudo systemctl enable avahi-daemon --now + sudo cp /vagrant/microshift /usr/bin + + sudo systemctl enable firewalld --now + sudo firewall-cmd --zone=public --permanent --add-port=30000-32767/tcp + sudo firewall-cmd --zone=public --add-masquerade --permanent + sudo firewall-cmd --zone=public --add-port=80/tcp --permanent + sudo firewall-cmd --zone=public --add-port=443/tcp --permanent + sudo firewall-cmd --zone=public --add-port=10250/tcp --permanent + sudo firewall-cmd --zone=public --add-port=10251/tcp --permanent + sudo firewall-cmd --zone=public --add-port=5353/udp --permanent + sudo firewall-cmd --permanent --zone=trusted --add-source=10.42.0.0/16 + sudo firewall-cmd --reload + sudo sh -c "cd /usr/bin; curl https://mirror.openshift.com/pub/openshift-v4/$(uname -m)/clients/ocp/stable/openshift-client-linux.tar.gz | tar xvfz -" + +SCRIPT + + +Vagrant.configure("2") do |config| + + config.vm.define "master" do |master| + master.vm.box = "fedora/35-cloud-base" + master.vm.hostname = "microshift.local" + master.vm.provider "libvirt" do |v| + v.memory = 4096 + v.cpus = 2 + end + master.vm.provision "shell", inline: $script + + # open the k8s API endpoint + master.vm.provision "shell", inline: "sudo firewall-cmd --zone=public --add-port=6443/tcp --permanent && sudo firewall-cmd --reload" + + # start microshift here + master.vm.provision "shell", inline: "sudo systemctl enable microshift --now" + end + + config.vm.define "worker" do |worker| + worker.vm.box = "fedora/35-cloud-base" + worker.vm.hostname = "worker.local" + worker.vm.provider "libvirt" do |v| + v.memory = 4096 + v.cpus = 2 + end + worker.vm.provision "shell", inline: $script + end + + config.vm.define "other" do |worker| + worker.vm.box = "fedora/35-cloud-base" + worker.vm.hostname = "consumer.local" + worker.vm.provider "libvirt" do |v| + v.memory = 2048 + v.cpus = 2 + end + worker.vm.provision "shell", inline: <<-'SCRIPT' + sudo dnf install -y nss-mdns avahi-tools + sudo sed -i 's/mdns4_minimal/mdns/g' /etc/nsswitch.conf + echo .local | sudo tee /etc/mdns.allow + echo .local. | sudo tee -a /etc/mdns.allow + sudo systemctl enable --now avahi-daemon + + SCRIPT + + end + + + +end diff --git a/vagrant/multi-worker/runDlv.sh b/vagrant/multi-worker/runDlv.sh new file mode 100755 index 0000000000..12ede7ca07 --- /dev/null +++ b/vagrant/multi-worker/runDlv.sh @@ -0,0 +1,12 @@ +#!/bin/bash +DLV=/home/vagrant/go/bin/dlv +[[ -x $DLV ]] || \ + sudo dnf install -y golang && + go install github.com/go-delve/delve/cmd/dlv@latest + +sudo systemctl kill microshift +sudo systemctl disable --now microshift +sudo systemctl kill microshift +sudo firewall-cmd --zone=public --add-port=2345/tcp --permanent +sudo firewall-cmd --reload +sudo $DLV --listen=:2345 --headless --api-version=2 --accept-multiclient exec /usr/bin/microshift -- run