diff --git a/lib/client/identityfile/identity.go b/lib/client/identityfile/identity.go index 22a3bdee6c01d..fb80e9c7b50fc 100644 --- a/lib/client/identityfile/identity.go +++ b/lib/client/identityfile/identity.go @@ -137,6 +137,9 @@ type ConfigWriter interface { // permissions if the file is new. WriteFile(name string, data []byte, perm os.FileMode) error + // ReadFile reads the file at tpath `name` + ReadFile(name string) ([]byte, error) + // Remove removes a file. Remove(name string) error @@ -152,6 +155,11 @@ func (s *StandardConfigWriter) WriteFile(name string, data []byte, perm os.FileM return os.WriteFile(name, data, perm) } +// ReadFile reads the file at tpath `name`, returning +func (s *StandardConfigWriter) ReadFile(name string) ([]byte, error) { + return os.ReadFile(name) +} + // Remove removes the named file or (empty) directory. // If there is an error, it will be of type *PathError. func (s *StandardConfigWriter) Remove(name string) error { @@ -389,7 +397,7 @@ func Write(ctx context.Context, cfg WriteConfig) (filesWritten []string, err err case FormatKubernetes: filesWritten = append(filesWritten, cfg.OutputPath) - // If the user does not want to override, it will merge the previous kubeconfig + // If the user does not want to override, it will merge the previous kubeconfig // with the new entry. if err := checkOverwrite(ctx, writer, cfg.OverwriteDestination, filesWritten...); err != nil && !trace.IsAlreadyExists(err) { return nil, trace.Wrap(err) @@ -408,13 +416,13 @@ func Write(ctx context.Context, cfg WriteConfig) (filesWritten []string, err err kubeCluster = []string{cfg.KubeClusterName} } - if err := kubeconfig.Update(cfg.OutputPath, kubeconfig.Values{ + if err := kubeconfig.UpdateConfig(cfg.OutputPath, kubeconfig.Values{ TeleportClusterName: cfg.Key.ClusterName, ClusterAddr: cfg.KubeProxyAddr, Credentials: cfg.Key, TLSServerName: cfg.KubeTLSServerName, KubeClusters: kubeCluster, - }, cfg.KubeStoreAllCAs); err != nil { + }, cfg.KubeStoreAllCAs, writer); err != nil { return nil, trace.Wrap(err) } diff --git a/lib/client/identityfile/identity_test.go b/lib/client/identityfile/identity_test.go index 121e3746d54d0..dd53e75931383 100644 --- a/lib/client/identityfile/identity_test.go +++ b/lib/client/identityfile/identity_test.go @@ -180,6 +180,42 @@ func TestWrite(t *testing.T) { assertKubeconfigContents(t, cfg.OutputPath, key.ClusterName, "far.away.cluster", cfg.KubeTLSServerName) } +// Assert that the kubeconfig writer only writes to the supplied filesystem +// abstraction, and not to the system +func TestWriteKubeOnlyWritesToWriter(t *testing.T) { + key := newClientKey(t) + outputDir := t.TempDir() + + fs := NewInMemoryConfigWriter() + cfg := WriteConfig{ + Key: key, + Writer: fs, + } + + cfg.OutputPath = filepath.Join(outputDir, "kubeconfig") + cfg.Format = FormatOpenSSH + cfg.KubeProxyAddr = "far.away.cluster" + cfg.KubeTLSServerName = "kube.far.away.cluster" + files, err := Write(context.Background(), cfg) + require.NoError(t, err) + + // Assert that none of the listed files + for _, fn := range files { + // assert that no such file exists on the system filesystem + _, err := os.Stat(fn) + require.Error(t, err) + + // assert that the file exists is in the filesystem abstraction + require.Contains(t, fs.files, fn) + } + + // Assert that nothing has written to the temp dir without it being added to + // the returned file list + actualFiles, err := os.ReadDir(outputDir) + require.NoError(t, err) + require.Empty(t, actualFiles) +} + func TestWriteAllFormats(t *testing.T) { for _, format := range KnownFileFormats { t.Run(string(format), func(t *testing.T) { diff --git a/lib/client/identityfile/inmemory_config_writer.go b/lib/client/identityfile/inmemory_config_writer.go index 3c0f6274a50cf..ae54abc4661a5 100644 --- a/lib/client/identityfile/inmemory_config_writer.go +++ b/lib/client/identityfile/inmemory_config_writer.go @@ -20,20 +20,35 @@ import ( "io/fs" "os" "sync" - "time" "github.com/gravitational/trace" + "github.com/jonboulle/clockwork" "github.com/gravitational/teleport/lib/utils" ) +type InMemoryFS map[string]*utils.InMemoryFile + +type InMemoryConfigWriterOption func(*InMemoryConfigWriter) + +func WithClock(clock clockwork.Clock) InMemoryConfigWriterOption { + return func(w *InMemoryConfigWriter) { + w.clock = clock + } +} + // NewInMemoryConfigWriter creates a new virtual file system // It stores the files contents and their properties in memory -func NewInMemoryConfigWriter() *InMemoryConfigWriter { - return &InMemoryConfigWriter{ +func NewInMemoryConfigWriter(options ...InMemoryConfigWriterOption) *InMemoryConfigWriter { + w := &InMemoryConfigWriter{ mux: &sync.RWMutex{}, - files: make(map[string]*utils.InMemoryFile), + clock: clockwork.NewRealClock(), + files: InMemoryFS{}, } + for _, option := range options { + option(w) + } + return w } // InMemoryConfigWriter is a basic virtual file system abstraction that writes into memory @@ -41,7 +56,8 @@ func NewInMemoryConfigWriter() *InMemoryConfigWriter { // instead of writing to a more persistent storage. type InMemoryConfigWriter struct { mux *sync.RWMutex - files map[string]*utils.InMemoryFile + clock clockwork.Clock + files InMemoryFS } // WriteFile writes the given data to path `name` @@ -49,7 +65,7 @@ type InMemoryConfigWriter struct { func (m *InMemoryConfigWriter) WriteFile(name string, data []byte, perm os.FileMode) error { m.mux.Lock() defer m.mux.Unlock() - m.files[name] = utils.NewInMemoryFile(name, perm, time.Now(), data) + m.files[name] = utils.NewInMemoryFile(name, perm, m.clock.Now(), data) return nil } @@ -92,7 +108,13 @@ func (m *InMemoryConfigWriter) ReadFile(name string) ([]byte, error) { return f.Content(), nil } -// Open is not implemented but exists here to satisfy the io/fs.ReadFileFS interface. +// Open is not implemented but exists here to satisfy the io/fs. interface. func (m *InMemoryConfigWriter) Open(name string) (fs.File, error) { return nil, trace.NotImplemented("Open is not implemented for InMemoryConfigWriter") } + +func (m *InMemoryConfigWriter) WithReadonlyFiles(fn func(InMemoryFS) error) error { + m.mux.RLock() + defer m.mux.RUnlock() + return fn(m.files) +} diff --git a/lib/kube/kubeconfig/kubeconfig.go b/lib/kube/kubeconfig/kubeconfig.go index b9a9a8f176b78..46b130908e8bb 100644 --- a/lib/kube/kubeconfig/kubeconfig.go +++ b/lib/kube/kubeconfig/kubeconfig.go @@ -101,17 +101,47 @@ type ExecValues struct { Env map[string]string } +// ConfigFS is a simple filesystem abstraction to allow alternative file +// writing options when generating kube config files. +type ConfigFS interface { + // WriteFile writes the given data to path `name`, using the specified + // permissions if the file is new. + WriteFile(name string, data []byte, perm os.FileMode) error + + ReadFile(name string) ([]byte, error) +} + +// defaultConfigFS is a ConfigFS that is backed by the system filesystem +type defaultConfigFS struct{} + +func (defaultConfigFS) WriteFile(name string, data []byte, perm os.FileMode) error { + return os.WriteFile(name, data, perm) +} + +func (defaultConfigFS) ReadFile(name string) ([]byte, error) { + return os.ReadFile(name) +} + // Update adds Teleport configuration to kubeconfig. // // If `path` is empty, Update will try to guess it based on the environment or // known defaults. func Update(path string, v Values, storeAllCAs bool) error { + return UpdateConfig(path, v, storeAllCAs, defaultConfigFS{}) +} + +// UpdateConfig adds Teleport configuration to kubeconfig, reading and writing +// from the supplied ConfigFS +// +// If `path` is empty, Update will try to guess it based on the environment or +// known defaults. +func UpdateConfig(path string, v Values, storeAllCAs bool, fs ConfigFS) error { contextTmpl, err := parseContextOverrideTemplate(v.OverrideContext) if err != nil { return trace.Wrap(err) } - config, err := Load(path) + config, err := LoadConfig(path, fs) if err != nil { return trace.Wrap(err) } @@ -237,7 +267,7 @@ func Update(path string, v Values, storeAllCAs bool) error { log.WithError(err).Warn("Kubernetes integration is not supported when logging in with a non-rsa private key.") } - return Save(path, *config) + return SaveConfig(path, *config, fs) } func setContext(contexts map[string]*clientcmdapi.Context, name, cluster, auth, kubeName, namespace string) { @@ -337,12 +367,29 @@ func removeByServerAddr(config *clientcmdapi.Config, wantServer string) { // Load tries to read a kubeconfig file and if it can't, returns an error. // One exception, missing files result in empty configs, not an error. func Load(path string) (*clientcmdapi.Config, error) { + return LoadConfig(path, defaultConfigFS{}) +} + +// LoadConfig tries to read a kubeconfig file and if it can't, returns an error. +// One exception, missing files result in empty configs, not an error. +func LoadConfig(path string, fs ConfigFS) (*clientcmdapi.Config, error) { filename, err := finalPath(path) if err != nil { return nil, trace.Wrap(err) } - config, err := clientcmd.LoadFromFile(filename) - if err != nil && !os.IsNotExist(err) { + + configBytes, err := fs.ReadFile(filename) + switch { + case os.IsNotExist(err): + return clientcmdapi.NewConfig(), nil + + case err != nil: + err = trace.ConvertSystemError(err) + return nil, trace.WrapWithMessage(err, "failed to load existing kubeconfig %q: %v", filename, err) + } + + config, err := clientcmd.Load(configBytes) + if err != nil { err = trace.ConvertSystemError(err) return nil, trace.WrapWithMessage(err, "failed to parse existing kubeconfig %q: %v", filename, err) } @@ -350,24 +397,68 @@ func Load(path string) (*clientcmdapi.Config, error) { config = clientcmdapi.NewConfig() } + // Now that we are using clientcmd.Load() we need to manually set all of the + // object origin values manually. We used to use clientcmd.LoadFile() that + // did it for us. + setConfigOriginsAndDefaults(config, filename) + return config, nil } +// setConfigOriginsAndDefaults sets up the origin info for the config file. +func setConfigOriginsAndDefaults(config *clientcmdapi.Config, filename string) { + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdapi.AuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdapi.Cluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdapi.Context{} + } +} + // Save saves updated config to location specified by environment variable or // default location func Save(path string, config clientcmdapi.Config) error { + return SaveConfig(path, config, defaultConfigFS{}) +} + +// Save saves updated config to location specified by environment variable or +// default location. +func SaveConfig(path string, config clientcmdapi.Config, fs ConfigFS) error { filename, err := finalPath(path) if err != nil { return trace.Wrap(err) } - if err := clientcmd.WriteToFile(config, filename); err != nil { + configBytes, err := clientcmd.Write(config) + if err != nil { + return trace.ConvertSystemError(err) + } + + if err := fs.WriteFile(filename, configBytes, 0600); err != nil { return trace.ConvertSystemError(err) } return nil + } -// finalPath returns the final path to kubeceonfig using, in order of +// finalPath returns the final path to kubeconfig using, in order of // precedence: // - `customPath`, if not empty // - ${KUBECONFIG} environment variable diff --git a/lib/tbot/config/template.go b/lib/tbot/config/template.go index 2d1998561d295..44ba82391b4b4 100644 --- a/lib/tbot/config/template.go +++ b/lib/tbot/config/template.go @@ -28,6 +28,7 @@ import ( "github.com/gravitational/teleport/api/utils/keys" "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/client/identityfile" "github.com/gravitational/teleport/lib/tbot/bot" "github.com/gravitational/teleport/lib/tbot/identity" ) @@ -129,6 +130,15 @@ func (b *BotConfigWriter) Stat(name string) (fs.FileInfo, error) { return nil, &os.PathError{Op: "stat", Path: name, Err: os.ErrNotExist} } +// ReadFile reads a given file. This implementation always returns not found. +func (b *BotConfigWriter) ReadFile(name string) ([]byte, error) { + return nil, &os.PathError{Op: "read", Path: name, Err: os.ErrNotExist} +} + +// compile-time assertion that the BotConfigWriter implements the +// identityfile.ConfigWriter interface +var _ identityfile.ConfigWriter = (*BotConfigWriter)(nil) + // newClientKey returns a sane client.Key for the given bot identity. func newClientKey(ident *identity.Identity, hostCAs []types.CertAuthority) (*client.Key, error) { pk, err := keys.ParsePrivateKey(ident.PrivateKeyBytes) diff --git a/tool/tctl/common/auth_command.go b/tool/tctl/common/auth_command.go index 930bb064bada2..83e16b4dfca31 100644 --- a/tool/tctl/common/auth_command.go +++ b/tool/tctl/common/auth_command.go @@ -255,8 +255,8 @@ func (a *AuthCommand) GenerateKeys(ctx context.Context) error { // GenerateAndSignKeys generates a new keypair and signs it for role func (a *AuthCommand) GenerateAndSignKeys(ctx context.Context, clusterAPI auth.ClientI) error { if a.streamTarfile { - tarWriter := newTarWriter(os.Stdout, clockwork.NewRealClock()) - defer tarWriter.Close() + tarWriter := newTarWriter(clockwork.NewRealClock()) + defer tarWriter.Archive(os.Stdout) a.identityWriter = tarWriter } @@ -952,7 +952,7 @@ func (a *AuthCommand) checkKubeCluster(ctx context.Context, clusterAPI auth.Clie if a.outputFormat != identityfile.FormatKubernetes && a.kubeCluster != "" { // User set --kube-cluster-name but it's not actually used for the chosen --format. // Print a warning but continue. - fmt.Printf("Note: --kube-cluster-name is only used with --format=%q, ignoring for --format=%q\n", identityfile.FormatKubernetes, a.outputFormat) + fmt.Fprintf(a.helperMsgDst(), "Note: --kube-cluster-name is only used with --format=%q, ignoring for --format=%q\n", identityfile.FormatKubernetes, a.outputFormat) } if a.outputFormat != identityfile.FormatKubernetes { return nil @@ -979,7 +979,7 @@ func (a *AuthCommand) checkProxyAddr(ctx context.Context, clusterAPI auth.Client if a.outputFormat != identityfile.FormatKubernetes && a.proxyAddr != "" { // User set --proxy but it's not actually used for the chosen --format. // Print a warning but continue. - fmt.Printf("Note: --proxy is only used with --format=%q, ignoring for --format=%q\n", identityfile.FormatKubernetes, a.outputFormat) + fmt.Fprintf(a.helperMsgDst(), "Note: --proxy is only used with --format=%q, ignoring for --format=%q\n", identityfile.FormatKubernetes, a.outputFormat) return nil } if a.outputFormat != identityfile.FormatKubernetes { diff --git a/tool/tctl/common/tarwriter.go b/tool/tctl/common/tarwriter.go index de77fe8d12160..a378aec826f63 100644 --- a/tool/tctl/common/tarwriter.go +++ b/tool/tctl/common/tarwriter.go @@ -17,8 +17,7 @@ package common import ( "archive/tar" "io" - "io/fs" - "os" + "sort" "github.com/gravitational/trace" "github.com/jonboulle/clockwork" @@ -27,55 +26,58 @@ import ( ) // tarWriter implements a ConfigWriter that generates a tarfile from the -// files written to the config writer. Does not implement +// files written to it. type tarWriter struct { - tarball *tar.Writer - clock clockwork.Clock + *identityfile.InMemoryConfigWriter } -// newTarWriter creates a new tarWriter that writes the generated tar -// file to the supplied `io.Writer`. Be sure to terminate the tar archive -// by calling `Close()` on the resuting `tarWriter`. -func newTarWriter(out io.Writer, clock clockwork.Clock) *tarWriter { - return &tarWriter{ - tarball: tar.NewWriter(out), - clock: clock, - } +// newTarWriter creates a new tarWriter that caches the files written to it and +// dumps them to a tarball on demand. +func newTarWriter(clock clockwork.Clock) *tarWriter { + cache := identityfile.NewInMemoryConfigWriter(identityfile.WithClock(clock)) + return &tarWriter{InMemoryConfigWriter: cache} } -// Remove is not implemented, and only exists to fill out the -// `ConfigWriter` interface. -func (t *tarWriter) Remove(_ string) error { - return trace.NotImplemented("tarWriter.Remove()") -} +// Archive dumps the contents of the ConfigWriter to the supplied sink as +// a tarball. May be called multiple times on the same instance. +func (t *tarWriter) Archive(out io.Writer) error { + tarball := tar.NewWriter(out) -// Stat always returns `ErrNotExist` in ordre to sidestep the -// overwite check when writing certificates via a ConfigWriter. -func (t *tarWriter) Stat(_ string) (fs.FileInfo, error) { - return nil, os.ErrNotExist -} + err := t.WithReadonlyFiles(func(files identityfile.InMemoryFS) error { + // Sort the filenames so that files will be written to the output tarball + // in a repeatable order + filenames := make([]string, 0, len(files)) + for filename := range files { + filenames = append(filenames, filename) + } + sort.Strings(filenames) -// WriteFile adds the supplied content to the tar archive. -func (t *tarWriter) WriteFile(name string, content []byte, mode fs.FileMode) error { - header := &tar.Header{ - Name: name, - Mode: int64(mode), - ModTime: t.clock.Now(), - Size: int64(len(content)), - } - if err := t.tarball.WriteHeader(header); err != nil { - return trace.Wrap(err) - } - if _, err := t.tarball.Write(content); err != nil { + // Stream the tarball to the supplied output writer + for _, filename := range filenames { + file := files[filename] + header := &tar.Header{ + Name: filename, + Mode: int64(file.Mode()), + ModTime: file.ModTime(), + Size: file.Size(), + } + if err := tarball.WriteHeader(header); err != nil { + return trace.Wrap(err) + } + if _, err := tarball.Write(file.Content()); err != nil { + return trace.Wrap(err) + } + } + return nil + }) + + if err != nil { return trace.Wrap(err) } - return nil -} -// Close finalizes the tar archive, adding any necessary padding and footers. -func (t *tarWriter) Close() error { - return trace.Wrap(t.tarball.Close()) + return trace.Wrap(tarball.Close()) } -// identityfile.ConfigWriter implementation check +// compile-time assertion that the tarWriter implements the ConfigWriter +// interface var _ identityfile.ConfigWriter = (*tarWriter)(nil) diff --git a/tool/tctl/common/tarwriter_test.go b/tool/tctl/common/tarwriter_test.go index 39523fbe6b075..0c02264fc7736 100644 --- a/tool/tctl/common/tarwriter_test.go +++ b/tool/tctl/common/tarwriter_test.go @@ -18,96 +18,194 @@ import ( "archive/tar" "bytes" "io" + "io/fs" "os" + "sort" "testing" "time" + "github.com/gravitational/trace" "github.com/jonboulle/clockwork" "github.com/stretchr/testify/require" ) func TestTarWriterProducesValidEmptyTarfile(t *testing.T) { + uut := newTarWriter(clockwork.NewFakeClock()) var tarball bytes.Buffer - uut := newTarWriter(&tarball, clockwork.NewFakeClock()) - require.NoError(t, uut.Close()) + require.NoError(t, uut.Archive(&tarball)) reader := tar.NewReader(&tarball) _, err := reader.Next() require.ErrorIs(t, err, io.EOF) } -func TestTarWriterStatAlwaysReturnsNotFound(t *testing.T) { - var tarball bytes.Buffer - uut := newTarWriter(&tarball, clockwork.NewFakeClock()) - defer uut.Close() - +func TestTarWriterStatReturnsNotFoundOnEmptyArchive(t *testing.T) { + uut := newTarWriter(clockwork.NewFakeClock()) _, err := uut.Stat("somefile") require.ErrorIs(t, err, os.ErrNotExist) } -func TestTarWriterFileWrite(t *testing.T) { - testContent := []struct { - filename string +func TestTarWriter(t *testing.T) { + // The TAR format doesn't support sub-second timestamps, so make sure + // that the nsec component is 0 + t0 := time.Date(1976, time.July, 29, 2, 30, 01, 0, time.Local) + + testContent := map[string]struct { filemode int64 content []byte }{ - { - filename: "alpha", + "alpha": { filemode: 0600, content: []byte("I am the very model of a modern Major-General"), - }, { - filename: "beta", + }, + "beta": { filemode: 0777, content: []byte("I've information vegetable, animal, and mineral"), - }, { - filename: "gamma", + }, + "gamma": { filemode: 0666, content: []byte("I know the kings of England, and I quote the fights historical"), }, + "delta": { + filemode: 644, + content: []byte("From Marathon to Waterloo, in order categorical"), + }, } - // The TAR format doesn't support sub-second timestamps, so make sure - // that the nsec component is 0 - t0 := time.Date(1976, time.July, 29, 2, 30, 01, 0, time.Local) - fakeClock := clockwork.NewFakeClockAt(t0) - - // GIVEN a tar file created by writing data to a identity file writer - var tarball bytes.Buffer - uut := newTarWriter(&tarball, fakeClock) - for _, f := range testContent { - require.NoError(t, uut.WriteFile(f.filename, f.content, os.FileMode(f.filemode))) - fakeClock.Advance(24 * time.Hour) + filenames := make([]string, 0, len(testContent)) + for fn := range testContent { + filenames = append(filenames, fn) } - require.NoError(t, uut.Close()) - - // WHEN I read the file - i := 0 - reader := tar.NewReader(&tarball) - expectedTime := t0 - - // Expect all the file content and metadata is preserved - var err error - for err != io.EOF { - header, err := reader.Next() - if err == io.EOF { - break + sort.Strings(filenames) + + newPopulatedConfigWriter := func(innerT *testing.T) (*tarWriter, map[string]time.Time) { + modTimes := map[string]time.Time{} + fakeClock := clockwork.NewFakeClockAt(t0) + uut := newTarWriter(fakeClock) + for fn, f := range testContent { + err := uut.WriteFile(fn, f.content, os.FileMode(f.filemode)) + require.NoError(innerT, err) + + modTimes[fn] = fakeClock.Now() + fakeClock.Advance(24 * time.Hour) } + var tarball bytes.Buffer + require.NoError(innerT, uut.Archive(&tarball)) + return uut, modTimes + } - require.Equal(t, testContent[i].filename, header.Name) - require.Equal(t, testContent[i].filemode, header.Mode) - require.Equal(t, len(testContent[i].content), int(header.Size)) - require.Equal(t, expectedTime, header.ModTime) - - actualContent := make([]byte, header.Size) - _, err = reader.Read(actualContent) - if err != nil && err != io.EOF { - require.NoError(t, err) + t.Run("simple write", func(t *testing.T) { + // Given a tarball created by writing content to a ConfigWriter + var tarball bytes.Buffer + uut, modTimes := newPopulatedConfigWriter(t) + err := uut.Archive(&tarball) + require.NoError(t, err) + + // WHEN I read the file + i := 0 + reader := tar.NewReader(&tarball) + + // Expect all the file content and metadata is preserved + for err != io.EOF { + header, err := reader.Next() + if err == io.EOF { + break + } + + // Expect that the files will come out of the TAR ordered by + // filename, and that their metadata is preserved + filename := filenames[i] + expected := testContent[filename] + require.Equal(t, filename, header.Name) + require.Equal(t, expected.filemode, header.Mode) + require.Len(t, expected.content, int(header.Size)) + require.Equal(t, modTimes[filename], header.ModTime) + + actualContent := make([]byte, header.Size) + _, err = reader.Read(actualContent) + if err != nil && err != io.EOF { + require.NoError(t, err) + } + require.Equal(t, expected.content, actualContent) + i++ } - require.Equal(t, testContent[i].content, actualContent) - expectedTime = expectedTime.Add(24 * time.Hour) - i++ - } + require.Len(t, testContent, i) + }) + + t.Run("delete file", func(t *testing.T) { + // Given populated ConfigWriter + uut, _ := newPopulatedConfigWriter(t) + + // When I delete a file from the config bundle, expect that the + // operation succeeds + require.NoError(t, uut.Remove("gamma")) + + // When I create an archive from the modified ConfigWriter, expect that + // the operation succeeds + var tarball bytes.Buffer + err := uut.Archive(&tarball) + require.NoError(t, err) + + // When I list all of the files in the resulting archive... + reader := tar.NewReader(&tarball) + archivedFiles := []string{} + for err != io.EOF { + header, err := reader.Next() + if err == io.EOF { + break + } + archivedFiles = append(archivedFiles, header.Name) + + // Skip over the actual content data + io.CopyN(io.Discard, reader, header.Size) + } - require.Equal(t, i, len(testContent)) + // Expect that the name of the deleted file does not appear in the + // archive listing, but all the other expected names do. + require.Len(t, archivedFiles, len(filenames)-1) + require.NotContains(t, archivedFiles, "gamma") + for _, fn := range filenames { + if fn != "gamma" { + require.Contains(t, archivedFiles, fn) + } + } + }) + + t.Run("delete missing file", func(t *testing.T) { + // Given populated ConfigWriter + uut, _ := newPopulatedConfigWriter(t) + + // When I delete file that doesn't exist, expect the operation to + // succeed; missing files are ot considered an error + err := uut.Remove("omega") + require.NoError(t, err) + }) + + t.Run("stat file", func(t *testing.T) { + // Given populated ConfigWriter + uut, modTimes := newPopulatedConfigWriter(t) + + // When I stat file that does exist, expect the operation to succeed + info, err := uut.Stat("beta") + require.NoError(t, err) + + // Expect also that the file info is correct + expected := testContent["beta"] + require.Equal(t, "beta", info.Name()) + require.Equal(t, fs.FileMode(expected.filemode), info.Mode()) + require.Len(t, expected.content, int(info.Size())) + require.Equal(t, modTimes["beta"], info.ModTime()) + }) + + t.Run("stat missing file", func(t *testing.T) { + // Given populated ConfigWriter + uut, _ := newPopulatedConfigWriter(t) + + // When I stat file that doesn't exist, expect the operation to + // return not found + _, err := uut.Stat("omega") + require.Error(t, err) + require.True(t, trace.IsNotFound(err)) + }) }