diff --git a/cmd/openshift-install/targets.go b/cmd/openshift-install/targets.go index c2fe691660f..f8c70e81a9d 100644 --- a/cmd/openshift-install/targets.go +++ b/cmd/openshift-install/targets.go @@ -68,18 +68,18 @@ func newTargetsCmd() []*cobra.Command { func runTargetCmd(targets ...asset.WritableAsset) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { - assetStore, err := asset.NewStore(rootOpts.dir) + assetStore := &asset.StoreImpl{} + err := assetStore.Load(rootOpts.dir) if err != nil { - return errors.Wrapf(err, "failed to create asset store") + logrus.Errorf("Could not load assets from state file: %v", err) } - for _, a := range targets { err := assetStore.Fetch(a) if err != nil { if exitError, ok := errors.Cause(err).(*exec.ExitError); ok && len(exitError.Stderr) > 0 { logrus.Error(strings.Trim(string(exitError.Stderr), "\n")) } - err = errors.Wrapf(err, "failed to fetch %s", a.Name()) + err = errors.Wrapf(err, "failed to generate %s", a.Name()) } if err2 := asset.PersistToFile(a, rootOpts.dir); err2 != nil { @@ -95,15 +95,11 @@ func runTargetCmd(targets ...asset.WritableAsset) func(cmd *cobra.Command, args return err } } - - if err := assetStore.Save(rootOpts.dir); err != nil { - return errors.Wrapf(err, "failed to write to state file") - } - - if err := assetStore.Purge(targets); err != nil { - return errors.Wrapf(err, "failed to delete existing on-disk files") + err = assetStore.Save(rootOpts.dir) + if err != nil { + errors.Wrapf(err, "failed to write to state file") + return err } - return nil } } diff --git a/docs/design/assetgeneration.md b/docs/design/assetgeneration.md index 87186991015..edf735e8b1a 100644 --- a/docs/design/assetgeneration.md +++ b/docs/design/assetgeneration.md @@ -1,4 +1,4 @@ -# Asset Generation +# Asset generation The installer internally uses a directed acyclic graph to represent all of the assets it creates as well as their dependencies. This process looks very similar to many build systems (e.g. Bazel, Make). @@ -16,67 +16,43 @@ An asset is the generic representation of work-item for installer that needs to The asset would usually follow these steps to generate its output: -1. Fetch its parent assets. +1. Load components from disk. (If found, mark them from deletion, we consume our inputs.) -2. Generate the assets either by: - * Using the parent assets - * Loading from on-disk assets - * Loading from state file +2. Load its components from State object given by installer. -3. If any of the parent assets are **dirty** (currently we think all on-disk assets are **dirty**), then use the parent assets to generate and return **dirty**. +3. If both the sources exist, -4. If none of the parent assets are **dirty**, but the asset itself is on disk, then use the on-disk asset and return **dirty**. + * Both are same: nothing to do use the State object. -5. If none of the parent assets or this asset is **dirty**, but the asset is found in the state file, then use the asset from state file and return **NOT dirty**. + * Both differ: maybe try to merge and mark itself **dirty** or error out. -6. If none of the parent assets are **dirty**, this asset is not **dirty**, and this asset is not found in the state file, then generate the asset using its parent assets and return **NOT dirty**. +4. If only one exists, Move the disk source to State object. + +5. If none exists, Use the assets from State object to create the components and store them in the State object. An example of the Asset: ```go -type Asset interface { +type Asset interface{ Dependencies() []Assets - Generate(Parents) error - Name() string + Generate(State) error } ``` ## Writable Asset A writable asset is an asset that generates files to write to disk. These files could be for the user to consume as output from installer targets, such as install-config.yml from the InstallConfig asset. Or these files could be used internally by the installer, such as the cert/key files generated by TLS assets. -A writable asset can also be loaded from disk to construct. ```go type WritableAsset interface{ Asset Files() []File - Load(FileFetcher) (found bool, err error) } type File struct { Filename string Data []byte } - -// FileFetcher is passed to every Loadable asset when implementing -// the Load() function. The FileFetcher enables the Loadable asset -// to read specific file(s) from disk. -type FileFetcher interface { - // FetchByName returns the file with the given name. - FetchByName(string) *File - // FetchByPattern returns the files whose name match the given regexp. - FetchByPattern(*regexp.Regexp) ([]*File, error) -} -``` -After being loaded and consumed by a children asset, the existing on-disk asset will be purged. -E.g. - -```shell -$ openshift-install install-config -# Generate install-config.yml - -$ openshift-install manifests -# Generate manifests/ and tectonic/ dir, also remove install-config.yml ``` ## Target generation @@ -85,7 +61,9 @@ The installer uses depth-first traversal on the dependency graph, starting at th ### Dirty detection -An asset generation reports **DIRTY** when it detects that the components have been modified from previous run. For now the asset is considered dirty when it's on-disk. +An asset generation reports **DIRTY** when it detects that the components have been modified from previous run. + +When generating dependencies of an asset, if any one of the dependencies report dirty, the installer informs the asset that its dependencies are dirty. The asset can either generate from its dependencies or exit with error. ### Example diff --git a/pkg/asset/asset.go b/pkg/asset/asset.go index c3fb50f4dc1..e416ef40494 100644 --- a/pkg/asset/asset.go +++ b/pkg/asset/asset.go @@ -21,16 +21,11 @@ type Asset interface { } // WritableAsset is an Asset that has files that can be written to disk. -// It can also be loaded from disk. type WritableAsset interface { Asset // Files returns the files to write. Files() []*File - - // Load returns the on-disk asset if it exists. - // The asset object should be changed only when it's loaded successfully. - Load(FileFetcher) (found bool, err error) } // File is a file for an Asset. diff --git a/pkg/asset/asset_test.go b/pkg/asset/asset_test.go index 18cbf499c4e..67fffd8acdf 100644 --- a/pkg/asset/asset_test.go +++ b/pkg/asset/asset_test.go @@ -33,10 +33,6 @@ func (a *writablePersistAsset) Files() []*File { return a.FileList } -func (a *writablePersistAsset) Load(FileFetcher) (bool, error) { - return false, nil -} - func TestPersistToFile(t *testing.T) { cases := []struct { name string diff --git a/pkg/asset/cluster/cluster.go b/pkg/asset/cluster/cluster.go index 6b43b631504..5990a57d1c9 100644 --- a/pkg/asset/cluster/cluster.go +++ b/pkg/asset/cluster/cluster.go @@ -22,7 +22,7 @@ const ( // MetadataFilename is name of the file where clustermetadata is stored. MetadataFilename = "metadata.json" - stateFileName = "terraform.tfstate" + stateFileName = "terraform.state" ) // Cluster uses the terraform executable to launch a cluster @@ -67,7 +67,7 @@ func (c *Cluster) Generate(parents asset.Parents) (err error) { return errors.Wrap(err, "failed to write terraform.tfvars file") } - platform := installConfig.Config.Platform.Name() + platform := terraformVariables.Platform if err := data.Unpack(tmpDir, platform); err != nil { return err } @@ -155,12 +155,3 @@ func (c *Cluster) Generate(parents asset.Parents) (err error) { func (c *Cluster) Files() []*asset.File { return c.FileList } - -// Load returns error if the tfstate file is already on-disk, because we want to -// prevent user from accidentally re-launching the cluster. -func (c *Cluster) Load(f asset.FileFetcher) (found bool, err error) { - if f.FetchByName(stateFileName) != nil { - return true, fmt.Errorf("%q already exisits", stateFileName) - } - return false, nil -} diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index 08696af76cd..3d80b18cf8f 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -17,7 +17,8 @@ const ( // TerraformVariables depends on InstallConfig and // Ignition to generate the terrafor.tfvars. type TerraformVariables struct { - File *asset.File + Platform string + File *asset.File } var _ asset.WritableAsset = (*TerraformVariables)(nil) @@ -45,6 +46,8 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { worker := &machine.Worker{} parents.Get(installConfig, bootstrap, master, worker) + t.Platform = installConfig.Config.Platform.Name() + bootstrapIgn := string(bootstrap.Files()[0].Data) masterFiles := master.Files() @@ -74,14 +77,3 @@ func (t *TerraformVariables) Files() []*asset.File { } return []*asset.File{} } - -// Load reads the terraform.tfvars from disk. -func (t *TerraformVariables) Load(f asset.FileFetcher) (found bool, err error) { - file := f.FetchByName(tfvarsFilename) - if file == nil { - return false, nil - } - - t.File = file - return true, nil -} diff --git a/pkg/asset/filefetcher.go b/pkg/asset/filefetcher.go deleted file mode 100644 index 11fad17661a..00000000000 --- a/pkg/asset/filefetcher.go +++ /dev/null @@ -1,84 +0,0 @@ -package asset - -import ( - "io/ioutil" - "os" - "path/filepath" - "regexp" - "sort" -) - -// FileFetcher fetches the asset files from disk. -type FileFetcher interface { - // FetchByName returns the file with the given name. - FetchByName(string) *File - // FetchByPattern returns the files whose name match the given regexp. - FetchByPattern(*regexp.Regexp) []*File -} - -type fileFetcher struct { - onDiskAssets map[string][]byte -} - -func newFileFetcher(clusterDir string) (*fileFetcher, error) { - fileMap := make(map[string][]byte) - - // Don't bother if the clusterDir is not created yet because that - // means there's no assets generated yet. - _, err := os.Stat(clusterDir) - if err != nil && os.IsNotExist(err) { - return &fileFetcher{}, nil - } - - if err := filepath.Walk(clusterDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - filename, err := filepath.Rel(clusterDir, path) - if err != nil { - return err - } - - data, err := ioutil.ReadFile(path) - if err != nil { - return err - } - - fileMap[filename] = data - return nil - }); err != nil { - return nil, err - } - return &fileFetcher{onDiskAssets: fileMap}, nil -} - -// FetchByName returns the file with the given name. -func (f *fileFetcher) FetchByName(name string) *File { - data, ok := f.onDiskAssets[name] - if !ok { - return nil - } - return &File{Filename: name, Data: data} -} - -// FetchByPattern returns the files whose name match the given regexp. -func (f *fileFetcher) FetchByPattern(re *regexp.Regexp) []*File { - var files []*File - - for filename, data := range f.onDiskAssets { - if re.MatchString(filename) { - files = append(files, &File{ - Filename: filename, - Data: data, - }) - } - } - - sort.Slice(files, func(i, j int) bool { return files[i].Filename < files[j].Filename }) - return files -} diff --git a/pkg/asset/filefetcher_test.go b/pkg/asset/filefetcher_test.go deleted file mode 100644 index 9548527ca5f..00000000000 --- a/pkg/asset/filefetcher_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package asset - -import ( - "fmt" - "regexp" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFetchByName(t *testing.T) { - tests := []struct { - name string - files map[string][]byte - input string - expectFile *File - }{ - { - name: "only dirs", - files: nil, - input: "", - expectFile: nil, - }, - { - name: "input empty", - files: map[string][]byte{"foo.bar": []byte("some data")}, - input: "", - expectFile: nil, - }, - { - name: "input doesn't match", - files: map[string][]byte{"foo.bar": []byte("some data")}, - input: "bar.foo", - expectFile: nil, - }, - { - name: "with contents", - files: map[string][]byte{"foo.bar": []byte("some data")}, - input: "foo.bar", - expectFile: &File{ - Filename: "foo.bar", - Data: []byte("some data"), - }, - }, - { - name: "match one file", - files: map[string][]byte{"foo.bar": []byte("some data")}, - input: "foo.bar", - expectFile: &File{ - Filename: "foo.bar", - Data: []byte("some data"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - f := &fileFetcher{onDiskAssets: tt.files} - file := f.FetchByName(tt.input) - assert.Equal(t, tt.expectFile, file) - }) - } -} - -func TestFetchByPattern(t *testing.T) { - tests := []struct { - name string - files map[string][]byte - input *regexp.Regexp - expectFiles []*File - }{ - { - name: "match master configs", - files: map[string][]byte{ - "master-0.ign": []byte("some data 0"), - "master-1.ign": []byte("some data 1"), - "master-2.ign": []byte("some data 2"), - "master-10.ign": []byte("some data 3"), - "master-20.ign": []byte("some data 4"), - "master-00.ign": []byte("some data 5"), - "master-01.ign": []byte("some data 6"), - "master-0x.ign": []byte("some data 7"), - "master-1x.ign": []byte("some data 8"), - "amaster-0.ign": []byte("some data 9"), - "master-.ign": []byte("some data 10"), - "master-.igni": []byte("some data 11"), - "master-.ignign": []byte("some data 12"), - }, - input: regexp.MustCompile(`^(master-(0|([1-9]\d*))\.ign)$`), - expectFiles: []*File{ - { - Filename: "master-0.ign", - Data: []byte("some data 0"), - }, - { - Filename: "master-1.ign", - Data: []byte("some data 1"), - }, - { - Filename: "master-10.ign", - Data: []byte("some data 3"), - }, - { - Filename: "master-2.ign", - Data: []byte("some data 2"), - }, - { - Filename: "master-20.ign", - Data: []byte("some data 4"), - }, - }, - }, - { - name: "match directory", - files: map[string][]byte{ - "manifests/": []byte("some data 0"), - "manifests/0": []byte("some data 1"), - "manifests/some": []byte("some data 2"), - "manifest/": []byte("some data 3"), - "manifests": []byte("some data 4"), - "amanifests/a": []byte("some data 5"), - }, - input: regexp.MustCompile(fmt.Sprintf(`^%s\%c.*`, "manifests", '/')), - expectFiles: []*File{ - { - Filename: "manifests/", - Data: []byte("some data 0"), - }, - { - Filename: "manifests/0", - Data: []byte("some data 1"), - }, - { - Filename: "manifests/some", - Data: []byte("some data 2"), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - f := &fileFetcher{onDiskAssets: tt.files} - assert.Equal(t, tt.expectFiles, f.FetchByPattern(tt.input)) - }) - } -} diff --git a/pkg/asset/ignition/bootstrap/bootstrap.go b/pkg/asset/ignition/bootstrap/bootstrap.go index 2d7644bf200..618f3253930 100644 --- a/pkg/asset/ignition/bootstrap/bootstrap.go +++ b/pkg/asset/ignition/bootstrap/bootstrap.go @@ -25,9 +25,8 @@ import ( ) const ( - rootDir = "/opt/tectonic" - defaultReleaseImage = "registry.svc.ci.openshift.org/openshift/origin-release:v4.0" - bootstrapIgnFilename = "bootstrap.ign" + rootDir = "/opt/tectonic" + defaultReleaseImage = "registry.svc.ci.openshift.org/openshift/origin-release:v4.0" ) // bootstrapTemplateData is the data to use to replace values in bootstrap @@ -118,7 +117,7 @@ func (a *Bootstrap) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "failed to Marshal Ignition config") } a.File = &asset.File{ - Filename: bootstrapIgnFilename, + Filename: "bootstrap.ign", Data: data, } @@ -300,19 +299,3 @@ func applyTemplateData(template *template.Template, templateData interface{}) st } return buf.String() } - -// Load returns the bootstrap ignition from disk. -func (a *Bootstrap) Load(f asset.FileFetcher) (found bool, err error) { - file := f.FetchByName(bootstrapIgnFilename) - if file == nil { - return false, nil - } - - config := &igntypes.Config{} - if err := json.Unmarshal(file.Data, config); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - - a.File, a.Config = file, config - return true, nil -} diff --git a/pkg/asset/ignition/machine/master.go b/pkg/asset/ignition/machine/master.go index 7cde423c046..fd45c5320ff 100644 --- a/pkg/asset/ignition/machine/master.go +++ b/pkg/asset/ignition/machine/master.go @@ -3,7 +3,6 @@ package machine import ( "encoding/json" "fmt" - "regexp" igntypes "github.com/coreos/ignition/config/v2_2/types" "github.com/pkg/errors" @@ -64,23 +63,3 @@ func (a *Master) Name() string { func (a *Master) Files() []*asset.File { return a.FileList } - -// Load returns the master ignitions from disk. -func (a *Master) Load(f asset.FileFetcher) (found bool, err error) { - fileList := f.FetchByPattern(regexp.MustCompile(`^(master-(0|([1-9]\d*))\.ign)$`)) - if len(fileList) == 0 { - return false, nil - } - - configs := make([]*igntypes.Config, len(fileList)) - for i, file := range fileList { - configs[i] = &igntypes.Config{} - if err := json.Unmarshal(file.Data, configs[i]); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - } - - a.FileList, a.Configs = fileList, configs - - return true, nil -} diff --git a/pkg/asset/ignition/machine/worker.go b/pkg/asset/ignition/machine/worker.go index 201b2e86dd2..ac08899e978 100644 --- a/pkg/asset/ignition/machine/worker.go +++ b/pkg/asset/ignition/machine/worker.go @@ -11,10 +11,6 @@ import ( "github.com/openshift/installer/pkg/asset/tls" ) -const ( - workerIgnFilename = "worker.ign" -) - // Worker is an asset that generates the ignition config for worker nodes. type Worker struct { Config *igntypes.Config @@ -44,7 +40,7 @@ func (a *Worker) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "failed to get InstallConfig from parents") } a.File = &asset.File{ - Filename: workerIgnFilename, + Filename: "worker.ign", Data: data, } @@ -63,19 +59,3 @@ func (a *Worker) Files() []*asset.File { } return []*asset.File{} } - -// Load returns the worker ignitions from disk. -func (a *Worker) Load(f asset.FileFetcher) (found bool, err error) { - file := f.FetchByName(workerIgnFilename) - if file == nil { - return false, nil - } - - config := &igntypes.Config{} - if err := json.Unmarshal(file.Data, config); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - - a.File, a.Config = file, config - return true, nil -} diff --git a/pkg/asset/installconfig/installconfig.go b/pkg/asset/installconfig/installconfig.go index 46d70cb0204..286cd8258c2 100644 --- a/pkg/asset/installconfig/installconfig.go +++ b/pkg/asset/installconfig/installconfig.go @@ -124,7 +124,7 @@ func (a *InstallConfig) Generate(parents asset.Parents) error { return errors.Wrap(err, "failed to Marshal InstallConfig") } a.File = &asset.File{ - Filename: installConfigFilename, + Filename: "install-config.yml", Data: data, } @@ -159,19 +159,3 @@ func parseCIDR(s string) net.IPNet { _, cidr, _ := net.ParseCIDR(s) return *cidr } - -// Load returns the installconfig from disk. -func (a *InstallConfig) Load(f asset.FileFetcher) (found bool, err error) { - file := f.FetchByName(installConfigFilename) - if file == nil { - return false, nil - } - - config := &types.InstallConfig{} - if err := yaml.Unmarshal(file.Data, config); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - - a.File, a.Config = file, config - return true, nil -} diff --git a/pkg/asset/kubeconfig/admin.go b/pkg/asset/kubeconfig/admin.go index 4e70f019f45..2151dc6cf1f 100644 --- a/pkg/asset/kubeconfig/admin.go +++ b/pkg/asset/kubeconfig/admin.go @@ -1,17 +1,11 @@ package kubeconfig import ( - "path/filepath" - "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/tls" ) -var ( - kubeconfigAdminPath = filepath.Join("auth", "kubeconfig") -) - // Admin is the asset for the admin kubeconfig. type Admin struct { kubeconfig @@ -40,7 +34,7 @@ func (k *Admin) Generate(parents asset.Parents) error { adminCertKey, installConfig.Config, "admin", - kubeconfigAdminPath, + "kubeconfig", ) } @@ -48,8 +42,3 @@ func (k *Admin) Generate(parents asset.Parents) error { func (k *Admin) Name() string { return "Kubeconfig Admin" } - -// Load returns the kubeconfig from disk. -func (k *Admin) Load(f asset.FileFetcher) (found bool, err error) { - return k.load(f, kubeconfigAdminPath) -} diff --git a/pkg/asset/kubeconfig/kubeconfig.go b/pkg/asset/kubeconfig/kubeconfig.go index d17ff37b51e..413fae8dda2 100644 --- a/pkg/asset/kubeconfig/kubeconfig.go +++ b/pkg/asset/kubeconfig/kubeconfig.go @@ -2,6 +2,7 @@ package kubeconfig import ( "fmt" + "path/filepath" "github.com/ghodss/yaml" "github.com/pkg/errors" @@ -23,7 +24,7 @@ func (k *kubeconfig) generate( clientCertKey tls.CertKeyInterface, installConfig *types.InstallConfig, userName string, - kubeconfigPath string, + filename string, ) error { k.Config = &clientcmd.Config{ Clusters: []clientcmd.NamedCluster{ @@ -62,7 +63,7 @@ func (k *kubeconfig) generate( } k.File = &asset.File{ - Filename: kubeconfigPath, + Filename: filepath.Join("auth", filename), Data: data, } @@ -76,19 +77,3 @@ func (k *kubeconfig) Files() []*asset.File { } return []*asset.File{} } - -// load returns the kubeconfig from disk. -func (k *kubeconfig) load(f asset.FileFetcher, name string) (found bool, err error) { - file := f.FetchByName(name) - if file == nil { - return false, nil - } - - config := &clientcmd.Config{} - if err := yaml.Unmarshal(file.Data, config); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - - k.File, k.Config = file, config - return true, nil -} diff --git a/pkg/asset/kubeconfig/kubeconfig_test.go b/pkg/asset/kubeconfig/kubeconfig_test.go index 44352a244c8..f40bbf35803 100644 --- a/pkg/asset/kubeconfig/kubeconfig_test.go +++ b/pkg/asset/kubeconfig/kubeconfig_test.go @@ -57,7 +57,7 @@ func TestKubeconfigGenerate(t *testing.T) { { name: "admin kubeconfig", userName: "admin", - filename: "auth/kubeconfig", + filename: "kubeconfig", clientCert: adminCert, expectedData: []byte(`clusters: - cluster: @@ -81,7 +81,7 @@ users: { name: "kubelet kubeconfig", userName: "kubelet", - filename: "auth/kubeconfig-kubelet", + filename: "kubeconfig-kubelet", clientCert: kubeletCert, expectedData: []byte(`clusters: - cluster: @@ -111,7 +111,7 @@ users: assert.NoError(t, err, "unexpected error generating config") actualFiles := kc.Files() assert.Equal(t, 1, len(actualFiles), "unexpected number of files generated") - assert.Equal(t, tt.filename, actualFiles[0].Filename, "unexpected file name generated") + assert.Equal(t, "auth/"+tt.filename, actualFiles[0].Filename, "unexpected file name generated") assert.Equal(t, tt.expectedData, actualFiles[0].Data, "unexpected config") }) } diff --git a/pkg/asset/kubeconfig/kubelet.go b/pkg/asset/kubeconfig/kubelet.go index 17c26cbca12..2337312b266 100644 --- a/pkg/asset/kubeconfig/kubelet.go +++ b/pkg/asset/kubeconfig/kubelet.go @@ -1,17 +1,11 @@ package kubeconfig import ( - "path/filepath" - "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" "github.com/openshift/installer/pkg/asset/tls" ) -var ( - kubeconfigKubeletPath = filepath.Join("auth", "kubeconfig-kubelet") -) - // Kubelet is the asset for the kubelet kubeconfig. type Kubelet struct { kubeconfig @@ -40,7 +34,7 @@ func (k *Kubelet) Generate(parents asset.Parents) error { kubeletCertKey, installConfig.Config, "kubelet", - kubeconfigKubeletPath, + "kubeconfig-kubelet", ) } @@ -48,8 +42,3 @@ func (k *Kubelet) Generate(parents asset.Parents) error { func (k *Kubelet) Name() string { return "Kubeconfig Kubelet" } - -// Load is a no-op because kubelet kubeconfig is not written to disk. -func (k *Kubelet) Load(asset.FileFetcher) (bool, error) { - return false, nil -} diff --git a/pkg/asset/manifests/kube-addon-operator.go b/pkg/asset/manifests/kube-addon-operator.go index 66958d45d00..0b6bb942c06 100644 --- a/pkg/asset/manifests/kube-addon-operator.go +++ b/pkg/asset/manifests/kube-addon-operator.go @@ -11,10 +11,6 @@ import ( "k8s.io/apimachinery/pkg/util/rand" ) -const ( - kaoCfgFilename = "kube-addon-operator-config.yml" -) - // kubeAddonOperator generates the network-operator-*.yml files type kubeAddonOperator struct { Config *kubeaddon.OperatorConfig @@ -59,7 +55,7 @@ func (kao *kubeAddonOperator) Generate(dependencies asset.Parents) error { } kao.File = &asset.File{ - Filename: kaoCfgFilename, + Filename: "kube-addon-operator-config.yml", Data: data, } @@ -73,8 +69,3 @@ func (kao *kubeAddonOperator) Files() []*asset.File { } return []*asset.File{} } - -// Load is a no-op because kube-addon-operator manifest is not written to disk. -func (kao *kubeAddonOperator) Load(asset.FileFetcher) (bool, error) { - return false, nil -} diff --git a/pkg/asset/manifests/machine-api-operator.go b/pkg/asset/manifests/machine-api-operator.go index 38c81ea0628..fea72ae7478 100644 --- a/pkg/asset/manifests/machine-api-operator.go +++ b/pkg/asset/manifests/machine-api-operator.go @@ -17,7 +17,6 @@ const ( maoTargetNamespace = "openshift-cluster-api" // DefaultChannel is the default RHCOS channel for the cluster. DefaultChannel = "tested" - maoCfgFilename = "machine-api-operator-config.yml" ) // machineAPIOperator generates the network-operator-*.yml files @@ -135,7 +134,7 @@ func (mao *machineAPIOperator) Generate(dependencies asset.Parents) error { return errors.Wrapf(err, "failed to marshal %s config", mao.Name()) } mao.File = &asset.File{ - Filename: maoCfgFilename, + Filename: "machine-api-operator-config.yml", Data: data, } @@ -146,8 +145,3 @@ func (mao *machineAPIOperator) Generate(dependencies asset.Parents) error { func (mao *machineAPIOperator) Files() []*asset.File { return []*asset.File{mao.File} } - -// Load is a no-op because machine-api-operator manifest is not written to disk. -func (mao *machineAPIOperator) Load(asset.FileFetcher) (bool, error) { - return false, nil -} diff --git a/pkg/asset/manifests/network-operator.go b/pkg/asset/manifests/network-operator.go index 0cb3002b085..82ad448404d 100644 --- a/pkg/asset/manifests/network-operator.go +++ b/pkg/asset/manifests/network-operator.go @@ -12,9 +12,7 @@ import ( ) const ( - defaultMTU = "1450" - noCfgFilename = "network-operator-config.yml" - noManifestFilename = "network-operator-manifests.yml" + defaultMTU = "1450" ) // networkOperator generates the network-operator-*.yml files @@ -61,11 +59,11 @@ func (no *networkOperator) Generate(dependencies asset.Parents) error { } no.FileList = []*asset.File{ { - Filename: noCfgFilename, + Filename: "network-operator-config.yml", Data: configData, }, { - Filename: noManifestFilename, + Filename: "network-operator-manifests.yml", Data: []byte{}, }, } @@ -77,8 +75,3 @@ func (no *networkOperator) Generate(dependencies asset.Parents) error { func (no *networkOperator) Files() []*asset.File { return no.FileList } - -// Load is a no-op because network-operator manifest is not written to disk. -func (no *networkOperator) Load(asset.FileFetcher) (bool, error) { - return false, nil -} diff --git a/pkg/asset/manifests/operators.go b/pkg/asset/manifests/operators.go index 4be12421c64..bc58a3fbe03 100644 --- a/pkg/asset/manifests/operators.go +++ b/pkg/asset/manifests/operators.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "fmt" "path/filepath" - "regexp" "text/template" "github.com/ghodss/yaml" @@ -24,18 +23,15 @@ const ( manifestDir = "manifests" ) -var ( - kubeSysConfigPath = filepath.Join(manifestDir, "cluster-config.yaml") - - _ asset.WritableAsset = (*Manifests)(nil) -) - // Manifests generates the dependent operator config.yaml files type Manifests struct { - KubeSysConfig *configurationObject - FileList []*asset.File + KubeSysConfig *configurationObject + TectonicConfig *configurationObject + FileList []*asset.File } +var _ asset.WritableAsset = (*Manifests)(nil) + type genericData map[string]string // Name returns a human friendly name for the operator @@ -49,7 +45,9 @@ func (m *Manifests) Dependencies() []asset.Asset { return []asset.Asset{ &installconfig.InstallConfig{}, &networkOperator{}, + &kubeAddonOperator{}, &machineAPIOperator{}, + &Tectonic{}, &tls.RootCA{}, &tls.EtcdCA{}, &tls.IngressCertKey{}, @@ -72,9 +70,10 @@ func (m *Manifests) Dependencies() []asset.Asset { // Generate generates the respective operator config.yml files func (m *Manifests) Generate(dependencies asset.Parents) error { no := &networkOperator{} + addon := &kubeAddonOperator{} mao := &machineAPIOperator{} installConfig := &installconfig.InstallConfig{} - dependencies.Get(no, mao, installConfig) + dependencies.Get(no, addon, mao, installConfig) // no+mao go to kube-system config map m.KubeSysConfig = configMap("kube-system", "cluster-config-v1", genericData{ @@ -87,11 +86,24 @@ func (m *Manifests) Generate(dependencies asset.Parents) error { return errors.Wrap(err, "failed to create kube-system/cluster-config-v1 configmap") } + // addon goes to openshift system + m.TectonicConfig = configMap("tectonic-system", "cluster-config-v1", genericData{ + "addon-config": string(addon.Files()[0].Data), + }) + tectonicConfigData, err := yaml.Marshal(m.TectonicConfig) + if err != nil { + return errors.Wrap(err, "failed to create tectonic-system/cluster-config-v1 configmap") + } + m.FileList = []*asset.File{ { - Filename: kubeSysConfigPath, + Filename: filepath.Join(manifestDir, "cluster-config.yaml"), Data: kubeSysConfigData, }, + { + Filename: filepath.Join("tectonic", "00_cluster-config.yaml"), + Data: tectonicConfigData, + }, } m.FileList = append(m.FileList, m.generateBootKubeManifests(dependencies)...) @@ -219,32 +231,3 @@ func applyTemplateData(template *template.Template, templateData interface{}) [] } return buf.Bytes() } - -// Load returns the manifests asset from disk. -func (m *Manifests) Load(f asset.FileFetcher) (bool, error) { - re := fmt.Sprintf(`^%s\%c.*`, manifestDir, filepath.Separator) // e.g. `^manifests\/.*` - fileList := f.FetchByPattern(regexp.MustCompile(re)) - if len(fileList) == 0 { - return false, nil - } - - kubeSysConfig := &configurationObject{} - var found bool - for _, file := range fileList { - if file.Filename == kubeSysConfigPath { - if err := yaml.Unmarshal(file.Data, kubeSysConfig); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal cluster-config.yaml") - } - found = true - } - } - - if !found { - return false, nil - - } - - m.FileList, m.KubeSysConfig = fileList, kubeSysConfig - - return true, nil -} diff --git a/pkg/asset/manifests/tectonic.go b/pkg/asset/manifests/tectonic.go index 1a4791911be..c040993cb1c 100644 --- a/pkg/asset/manifests/tectonic.go +++ b/pkg/asset/manifests/tectonic.go @@ -3,12 +3,7 @@ package manifests import ( "bytes" "encoding/base64" - "fmt" "path/filepath" - "regexp" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/asset/installconfig" @@ -17,22 +12,13 @@ import ( "github.com/openshift/installer/pkg/asset/tls" ) -const ( - tectonicManifestDir = "tectonic" -) - -var ( - tectonicConfigPath = filepath.Join(tectonicManifestDir, "00_cluster-config.yaml") - - _ asset.WritableAsset = (*Tectonic)(nil) -) - // Tectonic generates the dependent resource manifests for tectonic (as against bootkube) type Tectonic struct { - TectonicConfig *configurationObject - FileList []*asset.File + FileList []*asset.File } +var _ asset.WritableAsset = (*Tectonic)(nil) + // Name returns a human friendly name for the operator func (t *Tectonic) Name() string { return "Tectonic Manifests" @@ -47,7 +33,6 @@ func (t *Tectonic) Dependencies() []asset.Asset { &tls.KubeCA{}, &machines.ClusterK8sIO{}, &machines.Worker{}, - &kubeAddonOperator{}, } } @@ -58,8 +43,7 @@ func (t *Tectonic) Generate(dependencies asset.Parents) error { kubeCA := &tls.KubeCA{} clusterk8sio := &machines.ClusterK8sIO{} worker := &machines.Worker{} - addon := &kubeAddonOperator{} - dependencies.Get(installConfig, ingressCertKey, kubeCA, clusterk8sio, worker, addon) + dependencies.Get(installConfig, ingressCertKey, kubeCA, clusterk8sio, worker) templateData := &tectonicTemplateData{ IngressCaCert: base64.StdEncoding.EncodeToString(kubeCA.Cert()), @@ -95,24 +79,10 @@ func (t *Tectonic) Generate(dependencies asset.Parents) error { "99_tectonic-system-02-pull.json": applyTemplateData(content.PullTectonicSystem, templateData), } - // addon goes to openshift system - t.TectonicConfig = configMap("tectonic-system", "cluster-config-v1", genericData{ - "addon-config": string(addon.Files()[0].Data), - }) - tectonicConfigData, err := yaml.Marshal(t.TectonicConfig) - if err != nil { - return errors.Wrap(err, "failed to create tectonic-system/cluster-config-v1 configmap") - } - - t.FileList = []*asset.File{ - { - Filename: tectonicConfigPath, - Data: tectonicConfigData, - }, - } + t.FileList = make([]*asset.File, 0, len(assetData)) for name, data := range assetData { t.FileList = append(t.FileList, &asset.File{ - Filename: filepath.Join(tectonicManifestDir, name), + Filename: filepath.Join("tectonic", name), Data: data, }) } @@ -124,30 +94,3 @@ func (t *Tectonic) Generate(dependencies asset.Parents) error { func (t *Tectonic) Files() []*asset.File { return t.FileList } - -// Load returns the tectonic asset from disk. -func (t *Tectonic) Load(f asset.FileFetcher) (bool, error) { - re := fmt.Sprintf(`^%s\%c.*`, tectonicManifestDir, filepath.Separator) // e.g. `^tectonic\/.*` - fileList := f.FetchByPattern(regexp.MustCompile(re)) - if len(fileList) == 0 { - return false, nil - } - - tectonicConfig := &configurationObject{} - var found bool - for _, file := range fileList { - if file.Filename == tectonicConfigPath { - if err := yaml.Unmarshal(file.Data, tectonicConfig); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal 00_cluster-config.yaml") - } - found = true - } - } - - if !found { - return false, nil - } - - t.FileList, t.TectonicConfig = fileList, tectonicConfig - return true, nil -} diff --git a/pkg/asset/store.go b/pkg/asset/store.go index d029465b070..bc616cbbb5d 100644 --- a/pkg/asset/store.go +++ b/pkg/asset/store.go @@ -20,59 +20,23 @@ type Store interface { // Fetch retrieves the state of the given asset, generating it and its // dependencies if necessary. Fetch(Asset) error - - // Save dumps the entire state map into a file - Save(dir string) error - - // Purge deletes the on-disk assets that are consumed already. - // E.g., install-config.yml will be deleted after fetching 'manifests'. - Purge(excluded []WritableAsset) error -} - -// assetState includes an asset and a boolean that indicates -// whether it's dirty or not. -type assetState struct { - asset Asset - dirty bool } // StoreImpl is the implementation of Store. type StoreImpl struct { - assets map[reflect.Type]assetState + assets map[reflect.Type]Asset stateFileAssets map[string]json.RawMessage - fileFetcher *fileFetcher - onDiskAssets []WritableAsset // This records the on-disk assets that are loaded already, which will be cleaned up in the end. -} - -// NewStore returns an asset store that implements the Store interface. -func NewStore(dir string) (Store, error) { - ff, err := newFileFetcher(dir) - if err != nil { - return nil, errors.Wrapf(err, "failed to create file fetcher from dir %q", dir) - } - - store := &StoreImpl{ - fileFetcher: ff, - assets: make(map[reflect.Type]assetState), - } - - if err := store.load(dir); err != nil { - return nil, err - } - - return store, nil } // Fetch retrieves the state of the given asset, generating it and its // dependencies if necessary. func (s *StoreImpl) Fetch(asset Asset) error { - _, err := s.fetch(asset, "") - return err + return s.fetch(asset, "") } -// load retrieves the state from the state file present in the given directory +// Load retrieves the state from the state file present in the given directory // and returns the assets map -func (s *StoreImpl) load(dir string) error { +func (s *StoreImpl) Load(dir string) error { path := filepath.Join(dir, stateFileName) assets := make(map[string]json.RawMessage) data, err := ioutil.ReadFile(path) @@ -90,26 +54,22 @@ func (s *StoreImpl) load(dir string) error { return nil } -// LoadAssetFromState renders the asset object arguments from the state file contents. -func (s *StoreImpl) LoadAssetFromState(asset Asset) error { +// GetStateAsset renders the asset object arguments from the state file contents +// also returns a boolean indicating whether the object was found in the state file or not +func (s *StoreImpl) GetStateAsset(asset Asset) (bool, error) { bytes, ok := s.stateFileAssets[reflect.TypeOf(asset).String()] if !ok { - return errors.Errorf("asset %s is not found in the state file", asset.Name()) + return false, nil } - return json.Unmarshal(bytes, asset) -} - -// IsAssetInState tests whether the asset is in the state file. -func (s *StoreImpl) IsAssetInState(asset Asset) bool { - _, ok := s.stateFileAssets[reflect.TypeOf(asset).String()] - return ok + err := json.Unmarshal(bytes, asset) + return true, err } // Save dumps the entire state map into a file func (s *StoreImpl) Save(dir string) error { assetMap := make(map[string]Asset) for k, v := range s.assets { - assetMap[k.String()] = v.asset + assetMap[k.String()] = v } data, err := json.MarshalIndent(&assetMap, "", " ") if err != nil { @@ -126,21 +86,13 @@ func (s *StoreImpl) Save(dir string) error { return nil } -// fetch retrieves the state of the given asset, generating it and its -// dependencies if necessary. -// It returns dirty if the asset or any of its parents is loaded from on-disk files. -func (s *StoreImpl) fetch(asset Asset, indent string) (dirty bool, err error) { +func (s *StoreImpl) fetch(asset Asset, indent string) error { logrus.Debugf("%sFetching %s...", indent, asset.Name()) - - // Return immediately if the asset is found in the cache, - // this is because we are doing a depth-first-search, it's guaranteed - // that we always fetch the parent before children, so we don't need - // to worry about invalidating anything in the cache. storedAsset, ok := s.assets[reflect.TypeOf(asset)] if ok { logrus.Debugf("%sFound %s...", indent, asset.Name()) - reflect.ValueOf(asset).Elem().Set(reflect.ValueOf(storedAsset.asset).Elem()) - return storedAsset.dirty, nil + reflect.ValueOf(asset).Elem().Set(reflect.ValueOf(storedAsset).Elem()) + return nil } dependencies := asset.Dependencies() @@ -148,101 +100,33 @@ func (s *StoreImpl) fetch(asset Asset, indent string) (dirty bool, err error) { if len(dependencies) > 0 { logrus.Debugf("%sGenerating dependencies of %s...", indent, asset.Name()) } - - var anyParentsDirty bool for _, d := range dependencies { - dt, err := s.fetch(d, indent+" ") + err := s.fetch(d, indent+" ") if err != nil { - return false, errors.Wrapf(err, "failed to fetch dependency for %s", asset.Name()) - } - if dt { - anyParentsDirty = true + return errors.Wrapf(err, "failed to fetch dependency for %s", asset.Name()) } parents.Add(d) } - // Try to find the asset from the state file. + // Before generating the asset, look if we have it all ready in the state file + // if yes, then use it instead logrus.Debugf("%sLooking up asset from state file: %s", indent, reflect.TypeOf(asset).String()) - foundInStateFile := s.IsAssetInState(asset) - - // Try to load from on-disk files first. - var foundOnDisk bool - as, ok := asset.(WritableAsset) + ok, err := s.GetStateAsset(asset) + if err != nil { + return errors.Wrapf(err, "failed to unmarshal asset %q from state file %q", asset.Name(), stateFileName) + } if ok { - logrus.Debugf("%sLooking up asset %s from disk", indent, asset.Name()) - foundOnDisk, err = as.Load(s.fileFetcher) + logrus.Debugf("%sAsset found in state file", indent) + } else { + logrus.Debugf("%sAsset not found in state file. Generating %s...", indent, asset.Name()) + err := asset.Generate(parents) if err != nil { - return false, errors.Wrapf(err, "unexpected error when loading asset %s", asset.Name()) - } - if foundOnDisk { - logrus.Debugf("%sFound %s on disk...", indent, asset.Name()) - s.onDiskAssets = append(s.onDiskAssets, as) - } - } - - dirty = anyParentsDirty || foundOnDisk - - switch { - case anyParentsDirty && foundOnDisk: - // TODO(yifan): We should check the content to make sure there's no conflict. - logrus.Warningf("%sBoth parent assets and current asset %s are on disk, Re-generating ...", indent, asset.Name()) - if err := asset.Generate(parents); err != nil { - return dirty, errors.Wrapf(err, "failed to generate asset %s", asset.Name()) - } - case anyParentsDirty: - if foundInStateFile { - logrus.Warningf("%sRe-generating %s...", indent, asset.Name()) - } else { - logrus.Debugf("%sGenerating %s...", indent, asset.Name()) - } - if err := asset.Generate(parents); err != nil { - return dirty, errors.Wrapf(err, "failed to generate asset %s", asset.Name()) - } - case foundOnDisk: - logrus.Debugf("%sUsing on-disk asset %s", indent, asset.Name()) - default: // !anyParentsDirty && !foundOnDisk - if foundInStateFile { - if err := s.LoadAssetFromState(asset); err != nil { - return dirty, errors.Wrapf(err, "failed to load asset from state file %s", asset.Name()) - } - } else { - logrus.Debugf("%sAsset %s not found in state file. Generating ...", indent, asset.Name()) - if err := asset.Generate(parents); err != nil { - return dirty, errors.Wrapf(err, "failed to generate asset %s", asset.Name()) - } + return errors.Wrapf(err, "failed to generate asset %s", asset.Name()) } } - - s.assets[reflect.TypeOf(asset)] = assetState{asset: asset, dirty: dirty} - return dirty, nil -} - -// Purge deletes the on-disk assets that are consumed already. -// E.g., install-config.yml will be deleted after fetching 'manifests'. -// The target assets are excluded. -func (s *StoreImpl) Purge(excluded []WritableAsset) error { - var toPurge []WritableAsset - - for _, asset := range s.onDiskAssets { - var found bool - for _, as := range excluded { - if reflect.TypeOf(as) == reflect.TypeOf(asset) { - found = true - break - } - } - if !found { - toPurge = append(toPurge, asset) - } - } - - for _, asset := range toPurge { - logrus.Debugf("Purging asset %q", asset.Name()) - for _, f := range asset.Files() { - if err := os.Remove(f.Filename); err != nil { - return errors.Wrapf(err, "failed to remove file %q", f.Filename) - } - } + if s.assets == nil { + s.assets = make(map[reflect.Type]Asset) } + s.assets[reflect.TypeOf(asset)] = asset return nil } diff --git a/pkg/asset/store_test.go b/pkg/asset/store_test.go index c2a165d778c..49ed8e078fd 100644 --- a/pkg/asset/store_test.go +++ b/pkg/asset/store_test.go @@ -18,14 +18,12 @@ func (l *generationLog) logGeneration(a Asset) { type testStoreAsset interface { Asset SetDependencies([]Asset) - SetDirty(bool) } type testStoreAssetImpl struct { name string dependencies []Asset generationLog *generationLog - dirty bool } func (a *testStoreAssetImpl) Dependencies() []Asset { @@ -45,17 +43,6 @@ func (a *testStoreAssetImpl) SetDependencies(dependencies []Asset) { a.dependencies = dependencies } -func (a *testStoreAssetImpl) SetDirty(dirty bool) { - a.dirty = dirty -} -func (a *testStoreAssetImpl) Files() []*File { - return []*File{{Filename: a.name}} -} - -func (a *testStoreAssetImpl) Load(FileFetcher) (bool, error) { - return a.dirty, nil -} - type testStoreAssetA struct { testStoreAssetImpl } @@ -207,7 +194,7 @@ func TestStoreFetch(t *testing.T) { log: []string{}, } store := &StoreImpl{ - assets: make(map[reflect.Type]assetState), + assets: Parents{}, } assets := make(map[string]testStoreAsset, len(tc.assets)) for name := range tc.assets { @@ -222,7 +209,7 @@ func TestStoreFetch(t *testing.T) { } for _, assetName := range tc.existingAssets { asset := assets[assetName] - store.assets[reflect.TypeOf(asset)] = assetState{asset: asset} + store.assets[reflect.TypeOf(asset)] = asset } err := store.Fetch(assets[tc.target]) assert.NoError(t, err, "error fetching asset") @@ -230,100 +217,3 @@ func TestStoreFetch(t *testing.T) { }) } } - -func TestStoreFetchDirty(t *testing.T) { - cases := []struct { - name string - assets map[string][]string - dirtyAssets []string - target string - expectedGenerationLog []string - expectedDirty bool - }{ - { - name: "no dirty assets", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - dirtyAssets: nil, - target: "a", - expectedGenerationLog: []string{"b", "a"}, - expectedDirty: false, - }, - { - name: "dirty asset causes re-generation", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - dirtyAssets: []string{"a"}, - target: "a", - expectedGenerationLog: []string{"b"}, - expectedDirty: true, - }, - { - name: "dirty dependent asset causes re-generation", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - dirtyAssets: []string{"b"}, - target: "a", - expectedGenerationLog: []string{"a"}, - expectedDirty: true, - }, - { - name: "dirty dependents invalidate all its children", - assets: map[string][]string{ - "a": {"b", "c"}, - "b": {"d"}, - "c": {"d"}, - "d": {}, - }, - dirtyAssets: []string{"d"}, - target: "a", - expectedGenerationLog: []string{"b", "c", "a"}, - expectedDirty: true, - }, - { - name: "re-generate when both parents and childre are dirty", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - dirtyAssets: []string{"a", "b"}, - target: "a", - expectedGenerationLog: []string{"a"}, - expectedDirty: true, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - gl := &generationLog{ - log: []string{}, - } - store := &StoreImpl{ - assets: make(map[reflect.Type]assetState), - } - assets := make(map[string]testStoreAsset, len(tc.assets)) - for name := range tc.assets { - assets[name] = newTestStoreAsset(gl, name) - } - for name, deps := range tc.assets { - dependencies := make([]Asset, len(deps)) - for i, d := range deps { - dependencies[i] = assets[d] - } - assets[name].SetDependencies(dependencies) - } - for _, name := range tc.dirtyAssets { - assets[name].SetDirty(true) - } - dirty, err := store.fetch(assets[tc.target], "") - assert.NoError(t, err, "unexpected error") - assert.EqualValues(t, tc.expectedGenerationLog, gl.log) - assert.Equal(t, tc.expectedDirty, dirty) - }) - } -} diff --git a/pkg/asset/tls/certkey.go b/pkg/asset/tls/certkey.go index dbb0b921261..48976bfb19f 100644 --- a/pkg/asset/tls/certkey.go +++ b/pkg/asset/tls/certkey.go @@ -102,8 +102,3 @@ func (c *CertKey) generateFiles(filenameBase string) { }, } } - -// Load is a no-op because TLS assets are not written to disk. -func (c *CertKey) Load(asset.FileFetcher) (bool, error) { - return false, nil -} diff --git a/pkg/asset/tls/serviceaccountkeypair.go b/pkg/asset/tls/serviceaccountkeypair.go index ffbb363f10a..8e0dca2d2bc 100644 --- a/pkg/asset/tls/serviceaccountkeypair.go +++ b/pkg/asset/tls/serviceaccountkeypair.go @@ -1,13 +1,15 @@ package tls -import "github.com/openshift/installer/pkg/asset" +import ( + "github.com/openshift/installer/pkg/asset" +) // ServiceAccountKeyPair is the asset that generates the service-account public/private key pair. type ServiceAccountKeyPair struct { KeyPair } -var _ asset.WritableAsset = (*ServiceAccountKeyPair)(nil) +var _ asset.Asset = (*ServiceAccountKeyPair)(nil) // Dependencies returns the dependency of the the cert/key pair, which includes // the parent CA, and install config if it depends on the install config for @@ -25,8 +27,3 @@ func (a *ServiceAccountKeyPair) Generate(dependencies asset.Parents) error { func (a *ServiceAccountKeyPair) Name() string { return "Key Pair (service-account.pub)" } - -// Load is a no-op because the service account keypair is not written to disk. -func (a *ServiceAccountKeyPair) Load(asset.FileFetcher) (bool, error) { - return false, nil -}