Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 2 additions & 29 deletions pkg/security/securityassets/security_assets.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,21 @@
package securityassets

import (
"io/fs"
"os"

"github.com/cockroachdb/errors/oserror"
)

// Loader describes the functions necessary to read certificate and key files.
type Loader struct {
ReadDir func(dirname string) ([]os.FileInfo, error)
ReadDir func(dirname string) ([]os.DirEntry, error)
ReadFile func(filename string) ([]byte, error)
Stat func(name string) (os.FileInfo, error)
}

// defaultLoader uses real filesystem calls.
var defaultLoader = Loader{
ReadDir: readDir,
ReadDir: os.ReadDir,
ReadFile: os.ReadFile,
Stat: os.Stat,
}
Expand Down Expand Up @@ -55,29 +54,3 @@ func (al Loader) FileExists(filename string) (bool, error) {
}
return true, nil
}

// readDir reads the directory named by dirname and returns a list of
// fs.FileInfo for the directory's contents, sorted by filename. If an error
// occurs reading the directory, ReadDir returns no directory entries along with
// the error.
func readDir(dirname string) ([]os.FileInfo, error) {
entries, err := os.ReadDir(dirname)
if err != nil {
return nil, err
}
infos := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
info, err := entry.Info()
if err != nil {
// Skip files that disappeared between ReadDir and Info().
// This can happen when the directory contains transient files
// like Unix socket lock files that are created/deleted rapidly.
if oserror.IsNotExist(err) {
continue
}
return nil, err
}
infos = append(infos, info)
}
return infos, nil
}
16 changes: 6 additions & 10 deletions pkg/security/securitytest/securitytest.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,27 +96,23 @@ func AppendFile(assetPath, dstPath string) error {
return errors.CombineErrors(err, f.Close())
}

// AssetReadDir mimics ioutil.ReadDir, returning a list of []os.FileInfo for
// the specified directory.
func AssetReadDir(name string) ([]os.FileInfo, error) {
// AssetReadDir mimics os.ReadDir, returning a list of []os.DirEntry for the
// specified directory.
func AssetReadDir(name string) ([]os.DirEntry, error) {
entries, err := testCerts.ReadDir(name)
if err != nil {
return nil, err
}
infos := make([]os.FileInfo, 0, len(entries))
filtered_entries := make([]os.DirEntry, 0, len(entries))
for _, e := range entries {
info, err := e.Info()
if err != nil {
return nil, err
}
if strings.HasSuffix(e.Name(), ".md") ||
strings.HasSuffix(e.Name(), ".sh") ||
strings.HasSuffix(e.Name(), ".cnf") {
continue
}
infos = append(infos, &fileInfo{inner: info})
filtered_entries = append(filtered_entries, e)
}
return infos, nil
return filtered_entries, nil
}

// AssetStat wraps Stat().
Expand Down
1 change: 1 addition & 0 deletions pkg/server/dumpstore/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ go_library(
"//pkg/settings",
"//pkg/settings/cluster",
"//pkg/util/log",
"@com_github_cockroachdb_errors//oserror",
],
)

Expand Down
31 changes: 15 additions & 16 deletions pkg/server/dumpstore/dumpstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,14 @@ package dumpstore

import (
"context"
"io/fs"
"os"
"path/filepath"
"time"

"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors/oserror"
)

// DumpStore represents a store for dump files.
Expand Down Expand Up @@ -59,10 +59,10 @@ type Dumper interface {
//
// There may be files not owned by this dumper in the files array;
// these should be ignored.
PreFilter(ctx context.Context, files []os.FileInfo, cleanupFn func(fileName string) error) (preserved map[int]bool, err error)
PreFilter(ctx context.Context, files []os.DirEntry, cleanupFn func(fileName string) error) (preserved map[int]bool, err error)

// CheckOwnsFile returns true iff the dumper owns the given file.
CheckOwnsFile(ctx context.Context, fi os.FileInfo) bool
CheckOwnsFile(ctx context.Context, fi os.DirEntry) bool
}

// NewStore creates a new DumpStore.
Expand All @@ -83,22 +83,13 @@ func (s *DumpStore) GetFullPath(fileName string) string {

// GC runs the GC policy on this store.
func (s *DumpStore) GC(ctx context.Context, now time.Time, dumper Dumper) {
// NB: ioutil.ReadDir sorts the file names in ascending order.
// NB: os.ReadDir sorts the file names in ascending order.
// This brings the oldest files first.
entries, err := os.ReadDir(s.dir)
files, err := os.ReadDir(s.dir)
if err != nil {
log.Dev.Warningf(ctx, "%v", err)
return
}
files := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
info, err := entry.Info()
if err != nil {
log.Dev.Warningf(ctx, "%v", err)
return
}
files = append(files, info)
}

maxS := s.maxCombinedFileSizeSetting.Get(&s.st.SV)

Expand Down Expand Up @@ -129,7 +120,7 @@ func (s *DumpStore) GC(ctx context.Context, now time.Time, dumper Dumper) {
func removeOldAndTooBigExcept(
ctx context.Context,
dumper Dumper,
files []os.FileInfo,
files []os.DirEntry,
now time.Time,
maxS int64,
preserved map[int]bool,
Expand All @@ -144,8 +135,16 @@ func removeOldAndTooBigExcept(
continue
}

info, err := fi.Info()
if err != nil {
if !oserror.IsNotExist(err) {
log.Dev.Warningf(ctx, "%v", err)
}
continue
}

// Note: we are counting preserved files against the maximum.
actualSize += fi.Size()
actualSize += info.Size()

// Ignore all the preserved entries, even if they are "too old".
if preserved[i] {
Expand Down
17 changes: 4 additions & 13 deletions pkg/server/dumpstore/dumpstore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ package dumpstore
import (
"context"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
Expand Down Expand Up @@ -156,16 +155,16 @@ func TestRemoveOldAndTooBig(t *testing.T) {
type myDumper struct{}

func (myDumper) PreFilter(
_ context.Context, files []os.FileInfo, cleanupFn func(fileName string) error,
_ context.Context, files []os.DirEntry, cleanupFn func(fileName string) error,
) (preserved map[int]bool, err error) {
panic("unimplemented")
}

func (myDumper) CheckOwnsFile(_ context.Context, fi os.FileInfo) bool {
func (myDumper) CheckOwnsFile(_ context.Context, fi os.DirEntry) bool {
return strings.HasPrefix(fi.Name(), "memprof")
}

func populate(t *testing.T, dirName string, fileNames []string, sizes []int64) []os.FileInfo {
func populate(t *testing.T, dirName string, fileNames []string, sizes []int64) []os.DirEntry {
if len(sizes) > 0 {
require.Equal(t, len(fileNames), len(sizes))
}
Expand All @@ -187,17 +186,9 @@ func populate(t *testing.T, dirName string, fileNames []string, sizes []int64) [
}

// Retrieve the file list for the remainder of the test.
entries, err := os.ReadDir(dirName)
files, err := os.ReadDir(dirName)
if err != nil {
t.Fatal(err)
}
files := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
info, err := entry.Info()
if err != nil {
t.Fatal(err)
}
files = append(files, info)
}
return files
}
4 changes: 2 additions & 2 deletions pkg/server/goroutinedumper/goroutinedumper.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func (gd *GoroutineDumper) gcDumps(ctx context.Context, now time.Time) {

// PreFilter is part of the dumpstore.Dumper interface.
func (gd *GoroutineDumper) PreFilter(
ctx context.Context, files []os.FileInfo, cleanupFn func(fileName string) error,
ctx context.Context, files []os.DirEntry, cleanupFn func(fileName string) error,
) (preserved map[int]bool, _ error) {
preserved = make(map[int]bool)
for i := len(files) - 1; i >= 0; i-- {
Expand All @@ -150,7 +150,7 @@ func (gd *GoroutineDumper) PreFilter(
}

// CheckOwnsFile is part of the dumpstore.Dumper interface.
func (gd *GoroutineDumper) CheckOwnsFile(_ context.Context, fi os.FileInfo) bool {
func (gd *GoroutineDumper) CheckOwnsFile(_ context.Context, fi os.DirEntry) bool {
return strings.HasPrefix(fi.Name(), goroutineDumpPrefix)
}

Expand Down
6 changes: 3 additions & 3 deletions pkg/server/profiler/profilestore.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,15 +67,15 @@ func (s *profileStore) makeNewFileName(timestamp time.Time, curHeap int64) strin

// PreFilter is part of the dumpstore.Dumper interface.
func (s *profileStore) PreFilter(
ctx context.Context, files []os.FileInfo, cleanupFn func(fileName string) error,
ctx context.Context, files []os.DirEntry, cleanupFn func(fileName string) error,
) (preserved map[int]bool, _ error) {
maxP := maxProfiles.Get(&s.st.SV)
preserved = s.cleanupLastRampup(ctx, files, maxP, cleanupFn)
return
}

// CheckOwnsFile is part of the dumpstore.Dumper interface.
func (s *profileStore) CheckOwnsFile(ctx context.Context, fi os.FileInfo) bool {
func (s *profileStore) CheckOwnsFile(ctx context.Context, fi os.DirEntry) bool {
ok, _, _ := s.parseFileName(ctx, fi.Name())
return ok
}
Expand All @@ -91,7 +91,7 @@ func (s *profileStore) CheckOwnsFile(ctx context.Context, fi os.FileInfo) bool {
// The preserved return value contains the indexes in files
// corresponding to the last ramp-up that were not passed to fn.
func (s *profileStore) cleanupLastRampup(
ctx context.Context, files []os.FileInfo, maxP int64, fn func(string) error,
ctx context.Context, files []os.DirEntry, maxP int64, fn func(string) error,
) (preserved map[int]bool) {
preserved = make(map[int]bool)
curMaxHeap := uint64(math.MaxUint64)
Expand Down
13 changes: 2 additions & 11 deletions pkg/server/profiler/profilestore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ package profiler
import (
"context"
"fmt"
"io/fs"
"os"
"path/filepath"
"testing"
Expand Down Expand Up @@ -208,7 +207,7 @@ func TestCleanupLastRampup(t *testing.T) {
}
}

func populate(t *testing.T, dirName string, fileNames []string) []os.FileInfo {
func populate(t *testing.T, dirName string, fileNames []string) []os.DirEntry {
for _, fn := range fileNames {
f, err := os.Create(filepath.Join(dirName, fn))
if err != nil {
Expand All @@ -221,17 +220,9 @@ func populate(t *testing.T, dirName string, fileNames []string) []os.FileInfo {
}

// Retrieve the file list for the remainder of the test.
entries, err := os.ReadDir(dirName)
files, err := os.ReadDir(dirName)
if err != nil {
t.Fatal(err)
}
files := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
info, err := entry.Info()
if err != nil {
t.Fatal(err)
}
files = append(files, info)
}
return files
}
4 changes: 2 additions & 2 deletions pkg/server/tracedumper/tracedumper.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ type TraceDumper struct {

// PreFilter is part of the dumpstore.Dumper interface.
func (t *TraceDumper) PreFilter(
ctx context.Context, files []os.FileInfo, _ func(fileName string) error,
ctx context.Context, files []os.DirEntry, _ func(fileName string) error,
) (preserved map[int]bool, err error) {
preserved = make(map[int]bool)
for i := len(files) - 1; i >= 0; i-- {
Expand All @@ -47,7 +47,7 @@ func (t *TraceDumper) PreFilter(
}

// CheckOwnsFile is part of the dumpstore.Dumper interface.
func (t *TraceDumper) CheckOwnsFile(ctx context.Context, fi os.FileInfo) bool {
func (t *TraceDumper) CheckOwnsFile(ctx context.Context, fi os.DirEntry) bool {
return strings.HasPrefix(fi.Name(), jobTraceDumpPrefix)
}

Expand Down
4 changes: 2 additions & 2 deletions pkg/util/tracing/goexectrace/simple_flight_recorder.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,14 +103,14 @@ func (sfr *SimpleFlightRecorder) TimestampedFilename() string {
var fileMatchRegexp = regexp.MustCompile("^executiontrace.*out$")

// CheckOwnsFile is part of the `Dumper` interface.
func (sfr *SimpleFlightRecorder) CheckOwnsFile(ctx context.Context, fi os.FileInfo) bool {
func (sfr *SimpleFlightRecorder) CheckOwnsFile(ctx context.Context, fi os.DirEntry) bool {
return fileMatchRegexp.MatchString(fi.Name())
}

// PreFilter is part of the `Dumper` interface. In this case we do not mark any
// files for preservation.
func (sfr *SimpleFlightRecorder) PreFilter(
ctx context.Context, files []os.FileInfo, cleanupFn func(fileName string) error,
ctx context.Context, files []os.DirEntry, cleanupFn func(fileName string) error,
) (preserved map[int]bool, err error) {
return nil, nil
}
Expand Down