diff --git a/.github/scripts/command/gateway.sh b/.github/scripts/command/gateway.sh new file mode 100755 index 000000000000..a6e944738ced --- /dev/null +++ b/.github/scripts/command/gateway.sh @@ -0,0 +1,214 @@ +#!/bin/bash -e +source .github/scripts/common/common.sh + +[[ -z "$META" ]] && META=redis +source .github/scripts/start_meta_engine.sh +start_meta_engine $META +META_URL=$(get_meta_url $META) +wget https://dl.min.io/client/mc/release/linux-amd64/archive/mc.RELEASE.2021-04-22T17-40-00Z -O mc +chmod +x mc +export MINIO_ROOT_USER=admin +export MINIO_ROOT_PASSWORD=admin123 +export MINIO_REFRESH_IAM_INTERVAL=10s + +prepare_test() +{ + umount_jfs /tmp/jfs $META_URL + lsof -i :9001 | awk 'NR!=1 {print $2}' | xargs -r kill -9 || true + lsof -i :9002 | awk 'NR!=1 {print $2}' | xargs -r kill -9 || true + python3 .github/scripts/flush_meta.py $META_URL + rm -rf /var/jfs/myjfs || true + rm -rf /var/jfsCache/myjfs || true +} + +start_two_gateway() +{ + prepare_test + ./juicefs format $META_URL myjfs --trash-days 0 + ./juicefs mount -d $META_URL /tmp/jfs + export MINIO_ROOT_USER=admin + export MINIO_ROOT_PASSWORD=admin123 + ./juicefs gateway $META_URL 127.0.0.1:9001 --multi-buckets --keep-etag --object-tag -background + sleep 1 + ./juicefs gateway $META_URL 127.0.0.1:9002 --multi-buckets --keep-etag --object-tag -background + sleep 2 + ./mc alias set gateway1 http://127.0.0.1:9001 admin admin123 + ./mc alias set gateway2 http://127.0.0.1:9002 admin admin123 +} + +test_user_management() +{ + prepare_test + start_two_gateway + ./mc admin user add gateway1 user1 admin123 + sleep 12 + user=$(./mc admin user list gateway2 | grep user1) || true + if [ -z "$user" ] + then + echo "user synchronization error" + exit 1 + fi + ./mc mb gateway1/test1 + ./mc alias set gateway1_user1 http://127.0.0.1:9001 user1 admin123 + if ./mc cp mc gateway1_user1/test1/file1 + then + echo "By default, the user has no read and write permission" + exit 1 + fi + ./mc admin policy set gateway1 readwrite user=user1 + if ./mc cp mc gateway1_user1/test1/file1 + then + echo "readwrite policy can read and write objects" + else + echo "set readwrite policy fail" + exit 1 + fi + ./mc cp gateway2/test1/file1 . + compare_md5sum file1 mc + ./mc admin user disable gateway1 user1 + ./mc admin user remove gateway2 user1 + sleep 12 + user=$(./mc admin user list gateway1 | grep user1) || true + if [ ! -z "$user" ] + then + echo "remove user user1 fail" + echo $user + exit 1 + fi +} + +test_group_management() +{ + prepare_test + start_two_gateway + ./mc admin user add gateway1 user1 admin123 + ./mc admin user add gateway1 user2 admin123 + ./mc admin user add gateway1 user3 admin123 + ./mc admin group add gateway1 testcents user1 user2 user3 + result=$(./mc admin group info gateway1 testcents | grep Members |awk '{print $2}') || true + if [ "$result" != "user1,user2,user3" ] + then + echo "error,result is '$result'" + exit 1 + fi + ./mc admin policy set gateway1 readwrite group=testcents + sleep 5 + ./mc alias set gateway1_user1 http://127.0.0.1:9001 user1 admin123 + ./mc mb gateway1/test1 + if ./mc cp mc gateway1_user1/test1/file1 + then + echo "readwrite policy can read write" + else + echo "the readwrite group has no read and write permission" + exit 1 + fi + ./mc admin policy set gateway1 readonly group=testcents + sleep 5 + if ./mc cp mc gateway1_user1/test1/file1 + then + echo "readonly group policy can not write" + exit 1 + else + echo "the readonly group has no write permission" + fi + + ./mc admin group remove gateway1 testcents user1 user2 user3 + ./mc admin group remove gateway1 testcents +} + +test_mult_gateways_set_group() +{ + prepare_test + start_two_gateway + ./mc admin user add gateway1 user1 admin123 + ./mc admin user add gateway1 user2 admin123 + ./mc admin user add gateway1 user3 admin123 + ./mc admin group add gateway1 testcents user1 user2 user3 + ./mc admin group disable gateway2 testcents + sleep 12 + result=$(./mc admin group info gateway2 testcents | grep Members |awk '{print $2}') || true + if [ "$result" != "user1,user2,user3" ] + then + echo "error,result is '$result'" + exit 1 + fi + ./mc admin group enable gateway1 testcents + ./mc admin user add gateway1 user4 admin123 + ./mc admin group add gateway1 testcents user4 + sleep 1 + ./mc admin group disable gateway2 testcents + sleep 12 + result=$(./mc admin group info gateway2 testcents | grep Members |awk '{print $2}') || true + if [ "$result" != "user1,user2,user3,user4" ] + then + echo "error,result is '$result'" + exit 1 + fi +} + +test_user_svcacct_add() +{ + prepare_test + start_two_gateway + ./mc admin user add gateway1 user1 admin123 + ./mc admin policy set gateway1 consoleAdmin user=user1 + ./mc alias set gateway1_user1 http://127.0.0.1:9001 user1 admin123 + ./mc admin user svcacct add gateway1_user1 user1 --access-key 12345678 --secret-key 12345678 + ./mc admin user svcacct info gateway1_user1 12345678 + ./mc admin user svcacct set gateway1_user1 12345678 --secret-key 123456789 + ./mc alias set svcacct1 http://127.0.0.1:9001 12345678 123456789 + ./mc mb svcacct1/test1 + if ./mc cp mc svcacct1/test1/file1 + then + echo "svcacct user consoleAdmin policy can read write" + else + echo "the svcacct user has no read and write permission" + exit 1 + fi + ./mc admin user svcacct disable gateway1_user1 12345678 + ./mc admin user svcacct rm gateway1_user1 12345678 +} + +test_user_sts() +{ + prepare_test + start_two_gateway + ./mc admin user add gateway1 user1 admin123 + ./mc admin policy set gateway1 consoleAdmin user=user1 + ./mc alias set gateway1_user1 http://127.0.0.1:9001 user1 admin123 + git clone https://github.com/juicedata/minio.git -b gateway-1.1 + ./mc mb gateway1_user1/test1 + ./mc cp mc gateway1_user1/test1/mc + cd minio + go run docs/sts/assume-role.go -sts-ep http://127.0.0.1:9001 -u user1 -p admin123 -b test1 -d + go run docs/sts/assume-role.go -sts-ep http://127.0.0.1:9001 -u user1 -p admin123 -b test1 + cd - + ./mc admin user remove gateway1 user1 +} + + +test_change_credentials() +{ + prepare_test + start_two_gateway + ./mc mb gateway1/test1 + ./mc cp mc gateway1/test1/file1 + lsof -i :9001 | awk 'NR!=1 {print $2}' | xargs -r kill -9 || true + lsof -i :9002 | awk 'NR!=1 {print $2}' | xargs -r kill -9 || true + export MINIO_ROOT_USER=newadmin + export MINIO_ROOT_PASSWORD=newadmin123 + export MINIO_ROOT_USER_OLD=admin + export MINIO_ROOT_PASSWORD_OLD=admin123 + ./juicefs gateway $META_URL 127.0.0.1:9001 --multi-buckets --keep-etag --object-tag -background + ./juicefs gateway $META_URL 127.0.0.1:9002 --multi-buckets --keep-etag --object-tag -background + sleep 5 + ./mc alias set gateway1 http://127.0.0.1:9001 newadmin newadmin123 + ./mc alias set gateway2 http://127.0.0.1:9002 newadmin newadmin123 + ./mc cp gateway1/test1/file1 file1 + ./mc cp gateway2/test1/file1 file2 + compare_md5sum file1 mc + compare_md5sum file2 mc +} + +source .github/scripts/common/run_test.sh && run_test $@ + diff --git a/.github/workflows/command.yml b/.github/workflows/command.yml index 6f508de0f3bf..a58cce9c13b3 100644 --- a/.github/workflows/command.yml +++ b/.github/workflows/command.yml @@ -99,7 +99,12 @@ jobs: timeout-minutes: 30 run: | sudo META=${{matrix.meta}} .github/scripts/command/fsck.sh - + + - name: Test Gateway + timeout-minutes: 30 + run: | + sudo META=${{matrix.meta}} .github/scripts/command/gateway.sh + - name: Test Info run: | sudo META=${{matrix.meta}} .github/scripts/command/info.sh diff --git a/cmd/flags.go b/cmd/flags.go index 5faef10dd63e..50af6f061557 100644 --- a/cmd/flags.go +++ b/cmd/flags.go @@ -17,14 +17,11 @@ package cmd import ( + "fmt" + "github.com/urfave/cli/v2" "os" "path" "runtime" - "strconv" - "strings" - "time" - - "github.com/urfave/cli/v2" ) func globalFlags() []cli.Flag { @@ -109,14 +106,14 @@ func storageFlags() []cli.Flag { Name: "storage-class", Usage: "the storage class for data written by current client", }, - &cli.IntFlag{ + &cli.StringFlag{ Name: "get-timeout", - Value: 60, + Value: "60s", Usage: "the max number of seconds to download an object", }, - &cli.IntFlag{ + &cli.StringFlag{ Name: "put-timeout", - Value: 60, + Value: "60s", Usage: "the max number of seconds to upload an object", }, &cli.IntFlag{ @@ -314,27 +311,27 @@ func shareInfoFlags() []cli.Flag { func metaCacheFlags(defaultEntryCache float64) []cli.Flag { return addCategories("META CACHE", []cli.Flag{ - &cli.Float64Flag{ + &cli.StringFlag{ Name: "attr-cache", - Value: 1.0, - Usage: "attributes cache timeout in seconds", + Value: "1s", + Usage: "attributes cache timeout", }, - &cli.Float64Flag{ + &cli.StringFlag{ Name: "entry-cache", - Value: defaultEntryCache, - Usage: "file entry cache timeout in seconds", + Value: fmt.Sprintf("%.1f", defaultEntryCache), + Usage: "file entry cache timeout", }, - &cli.Float64Flag{ + &cli.StringFlag{ Name: "dir-entry-cache", - Value: 1.0, - Usage: "dir entry cache timeout in seconds", + Value: "1s", + Usage: "dir entry cache timeout", }, - &cli.Float64Flag{ + &cli.StringFlag{ Name: "open-cache", - Value: 0.0, + Value: "0", Usage: "The seconds to reuse open file without checking update (0 means disable this feature)", }, - &cli.IntFlag{ + &cli.Uint64Flag{ Name: "open-cache-limit", Value: 10000, Usage: "max number of open files to cache (soft limit, 0 means unlimited)", @@ -349,26 +346,3 @@ func expandFlags(compoundFlags ...[]cli.Flag) []cli.Flag { } return flags } - -func duration(s string) time.Duration { - v, err := strconv.Atoi(s) - if err == nil { - return time.Second * time.Duration(v) - } - - err = nil - var d time.Duration - p := strings.Index(s, "d") - if p >= 0 { - v, err = strconv.Atoi(s[:p]) - } - if err == nil && s[p+1:] != "" { - d, err = time.ParseDuration(s[p+1:]) - } - - if err != nil { - logger.Warnf("Invalid duration value: %s, setting it to 0", s) - return 0 - } - return d + time.Hour*time.Duration(v*24) -} diff --git a/cmd/flags_test.go b/cmd/flags_test.go index 0bbb487ca857..f1c0090dd859 100644 --- a/cmd/flags_test.go +++ b/cmd/flags_test.go @@ -1,6 +1,7 @@ package cmd import ( + "github.com/juicedata/juicefs/pkg/utils" "testing" "time" @@ -49,7 +50,7 @@ func Test_duration(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - assert.Equalf(t, tt.want, duration(tt.args.s), "duration(%v)", tt.args.s) + assert.Equalf(t, tt.want, utils.Duration(tt.args.s), "duration(%v)", tt.args.s) }) } } diff --git a/cmd/format.go b/cmd/format.go index bc987a22f017..7269f350b847 100644 --- a/cmd/format.go +++ b/cmd/format.go @@ -247,7 +247,10 @@ func createStorage(format meta.Format) (object.ObjectStorage, error) { blob = object.WithPrefix(blob, format.Name+"/") if format.StorageClass != "" { if os, ok := blob.(object.SupportStorageClass); ok { - os.SetStorageClass(format.StorageClass) + err := os.SetStorageClass(format.StorageClass) + if err != nil { + logger.Warnf("set storage class %q: %v", format.StorageClass, err) + } } else { logger.Warnf("Storage class is not supported by %q, will ignore", format.Storage) } diff --git a/cmd/gateway.go b/cmd/gateway.go index 3d23707ecd53..ae1ee51495aa 100644 --- a/cmd/gateway.go +++ b/cmd/gateway.go @@ -22,13 +22,13 @@ package cmd import ( "context" "errors" + "github.com/juicedata/juicefs/pkg/utils" _ "net/http/pprof" "os" "os/signal" "path" "strconv" "syscall" - "time" "github.com/juicedata/juicefs/pkg/chunk" "github.com/juicedata/juicefs/pkg/fs" @@ -83,6 +83,10 @@ func cmdGateway() *cli.Command { Name: "domain", Usage: "domain for virtual-host-style requests", }, + &cli.StringFlag{ + Name: "refresh-iam-interval", + Usage: "interval to reload gateway IAM from configuration", + }, } return &cli.Command{ @@ -126,6 +130,10 @@ func gateway(c *cli.Context) error { os.Setenv("MINIO_DOMAIN", c.String("domain")) } + if c.IsSet("refresh-iam-interval") { + os.Setenv("MINIO_REFRESH_IAM_INTERVAL", c.String("refresh-iam-interval")) + } + metaAddr := c.Args().Get(0) listenAddr := c.Args().Get(1) conf, jfs := initForSvc(c, "s3gateway", metaAddr) @@ -154,7 +162,6 @@ func gateway(c *cli.Context) error { logger.Fatalf("init MinioMetaBucket error %s: %s", minio.MinioMetaBucket, err) } } - args := []string{"server", "--address", listenAddr, "--anonymous"} if c.Bool("no-banner") { args = append(args, "--quiet") @@ -195,9 +202,8 @@ func initForSvc(c *cli.Context, mp string, metaUrl string) (*vfs.Config, *fs.Fil removePassword(metaUrl) metaConf := getMetaConf(c, mp, c.Bool("read-only")) metaCli := meta.NewClient(metaUrl, metaConf) - if c.Bool("background") { - if err := makeDaemonForSvc(c, metaCli); err != nil { + if err := makeDaemonForSvc(c, metaCli, metaUrl); err != nil { logger.Fatalf("make daemon: %s", err) } } @@ -244,9 +250,9 @@ func initForSvc(c *cli.Context, mp string, metaUrl string) (*vfs.Config, *fs.Fil }() vfsConf := getVfsConf(c, metaConf, format, chunkConf) vfsConf.AccessLog = c.String("access-log") - vfsConf.AttrTimeout = time.Millisecond * time.Duration(c.Float64("attr-cache")*1000) - vfsConf.EntryTimeout = time.Millisecond * time.Duration(c.Float64("entry-cache")*1000) - vfsConf.DirEntryTimeout = time.Millisecond * time.Duration(c.Float64("dir-entry-cache")*1000) + vfsConf.AttrTimeout = utils.Duration(c.String("attr-cache")) + vfsConf.EntryTimeout = utils.Duration(c.String("entry-cache")) + vfsConf.DirEntryTimeout = utils.Duration(c.String("dir-entry-cache")) initBackgroundTasks(c, vfsConf, metaConf, metaCli, blob, registerer, registry) jfs, err := fs.NewFileSystem(vfsConf, metaCli, store) diff --git a/cmd/mdtest.go b/cmd/mdtest.go index 870d02937917..472311d26405 100644 --- a/cmd/mdtest.go +++ b/cmd/mdtest.go @@ -217,9 +217,9 @@ func initForMdtest(c *cli.Context, mp string, metaUrl string) *fs.FileSystem { conf := getVfsConf(c, metaConf, format, chunkConf) conf.AccessLog = c.String("access-log") - conf.AttrTimeout = time.Millisecond * time.Duration(c.Float64("attr-cache")*1000) - conf.EntryTimeout = time.Millisecond * time.Duration(c.Float64("entry-cache")*1000) - conf.DirEntryTimeout = time.Millisecond * time.Duration(c.Float64("dir-entry-cache")*1000) + conf.AttrTimeout = utils.Duration(c.String("attr-cache")) + conf.EntryTimeout = utils.Duration(c.String("entry-cache")) + conf.DirEntryTimeout = utils.Duration(c.String("dir-entry-cache")) metricsAddr := exposeMetrics(c, registerer, registry) m.InitMetrics(registerer) diff --git a/cmd/mount.go b/cmd/mount.go index 2859618a371c..94c29dfa8d37 100644 --- a/cmd/mount.go +++ b/cmd/mount.go @@ -184,7 +184,7 @@ func updateFormat(c *cli.Context) func(*meta.Format) { } } -func daemonRun(c *cli.Context, addr string, vfsConf *vfs.Config) { +func cacheDirPathToAbs(c *cli.Context) { if runtime.GOOS != "windows" { if cd := c.String("cache-dir"); cd != "memory" { ds := utils.SplitDir(cd) @@ -212,6 +212,10 @@ func daemonRun(c *cli.Context, addr string, vfsConf *vfs.Config) { } } } +} + +func daemonRun(c *cli.Context, addr string, vfsConf *vfs.Config) { + cacheDirPathToAbs(c) _ = expandPathForEmbedded(addr) // The default log to syslog is only in daemon mode. utils.InitLoggers(!c.Bool("no-syslog")) @@ -250,7 +254,7 @@ func getVfsConf(c *cli.Context, metaConf *meta.Config, format *meta.Format, chun Format: *format, Version: version.Version(), Chunk: chunkConf, - BackupMeta: duration(c.String("backup-meta")), + BackupMeta: utils.Duration(c.String("backup-meta")), BackupSkipTrash: c.Bool("backup-skip-trash"), Port: &vfs.Port{DebugAgent: debugAgent, PyroscopeAddr: c.String("pyroscope")}, PrefixInternal: c.Bool("prefix-internal"), @@ -288,12 +292,12 @@ func getMetaConf(c *cli.Context, mp string, readOnly bool) *meta.Config { conf.SkipDirNlink = c.Int("skip-dir-nlink") conf.ReadOnly = readOnly conf.NoBGJob = c.Bool("no-bgjob") - conf.OpenCache = time.Duration(c.Float64("open-cache") * 1e9) + conf.OpenCache = utils.Duration(c.String("open-cache")) conf.OpenCacheLimit = c.Uint64("open-cache-limit") - conf.Heartbeat = duration(c.String("heartbeat")) + conf.Heartbeat = utils.Duration(c.String("heartbeat")) conf.MountPoint = mp conf.Subdir = c.String("subdir") - conf.SkipDirMtime = duration(c.String("skip-dir-mtime")) + conf.SkipDirMtime = utils.Duration(c.String("skip-dir-mtime")) conf.Sid, _ = strconv.ParseUint(os.Getenv("_JFS_META_SID"), 10, 64) atimeMode := c.String("atime-mode") @@ -316,8 +320,8 @@ func getChunkConf(c *cli.Context, format *meta.Format) *chunk.Config { Compress: format.Compression, HashPrefix: format.HashPrefix, - GetTimeout: time.Second * time.Duration(c.Int("get-timeout")), - PutTimeout: time.Second * time.Duration(c.Int("put-timeout")), + GetTimeout: utils.Duration(c.String("get-timeout")), + PutTimeout: utils.Duration(c.String("put-timeout")), MaxUpload: c.Int("max-uploads"), MaxRetries: c.Int("io-retries"), Writeback: c.Bool("writeback"), @@ -325,7 +329,7 @@ func getChunkConf(c *cli.Context, format *meta.Format) *chunk.Config { BufferSize: utils.ParseBytes(c, "buffer-size", 'M'), UploadLimit: utils.ParseMbps(c, "upload-limit") * 1e6 / 8, DownloadLimit: utils.ParseMbps(c, "download-limit") * 1e6 / 8, - UploadDelay: duration(c.String("upload-delay")), + UploadDelay: utils.Duration(c.String("upload-delay")), UploadHours: c.String("upload-hours"), CacheDir: c.String("cache-dir"), @@ -335,8 +339,8 @@ func getChunkConf(c *cli.Context, format *meta.Format) *chunk.Config { CacheFullBlock: !c.Bool("cache-partial-only"), CacheChecksum: c.String("verify-cache-checksum"), CacheEviction: c.String("cache-eviction"), - CacheScanInterval: duration(c.String("cache-scan-interval")), - CacheExpire: duration(c.String("cache-expire")), + CacheScanInterval: utils.Duration(c.String("cache-scan-interval")), + CacheExpire: utils.Duration(c.String("cache-expire")), AutoCreate: true, } if chunkConf.UploadLimit == 0 { diff --git a/cmd/mount_unix.go b/cmd/mount_unix.go index 9c51ea08f1d2..55a0131841b3 100644 --- a/cmd/mount_unix.go +++ b/cmd/mount_unix.go @@ -206,7 +206,10 @@ func checkMountpoint(name, mp, logPath string, background bool) { } } -func makeDaemonForSvc(c *cli.Context, m meta.Meta) error { +func makeDaemonForSvc(c *cli.Context, m meta.Meta, metaUrl string) error { + cacheDirPathToAbs(c) + _ = expandPathForEmbedded(metaUrl) + var attrs godaemon.DaemonAttr logfile := c.String("log") attrs.OnExit = func(stage int) error { @@ -721,9 +724,9 @@ func mountMain(v *vfs.VFS, c *cli.Context) { disableUpdatedb() } conf := v.Conf - conf.AttrTimeout = time.Millisecond * time.Duration(c.Float64("attr-cache")*1000) - conf.EntryTimeout = time.Millisecond * time.Duration(c.Float64("entry-cache")*1000) - conf.DirEntryTimeout = time.Millisecond * time.Duration(c.Float64("dir-entry-cache")*1000) + conf.AttrTimeout = utils.Duration(c.String("attr-cache")) + conf.EntryTimeout = utils.Duration(c.String("entry-cache")) + conf.DirEntryTimeout = utils.Duration(c.String("dir-entry-cache")) conf.NonDefaultPermission = c.Bool("non-default-permission") rootSquash := c.String("root-squash") if rootSquash != "" { diff --git a/cmd/mount_windows.go b/cmd/mount_windows.go index 1f02475698f2..673af614751f 100644 --- a/cmd/mount_windows.go +++ b/cmd/mount_windows.go @@ -50,7 +50,7 @@ func makeDaemon(c *cli.Context, conf *vfs.Config) error { return nil } -func makeDaemonForSvc(c *cli.Context, m meta.Meta) error { +func makeDaemonForSvc(c *cli.Context, m meta.Meta, metaUrl string) error { logger.Warnf("Cannot run in background in Windows.") return nil } diff --git a/cmd/objbench.go b/cmd/objbench.go index e3b33993e236..0d2cd8871baa 100644 --- a/cmd/objbench.go +++ b/cmd/objbench.go @@ -347,6 +347,14 @@ func objbench(ctx *cli.Context) error { threads: threads, seed: make([]byte, bSize), smallSeed: make([]byte, smallBSize), + buffPool: &sync.Pool{New: func() interface{} { + buff := make([]byte, bSize) + return &buff + }}, + smallBuffPool: &sync.Pool{New: func() interface{} { + buff := make([]byte, smallBSize) + return &buff + }}, } randRead(bm.seed) randRead(bm.smallSeed) @@ -424,10 +432,11 @@ type apiInfo struct { } type benchMarkObj struct { - progressBar *utils.Progress - blob object.ObjectStorage - threads int - seed, smallSeed []byte + progressBar *utils.Progress + blob object.ObjectStorage + threads int + seed, smallSeed []byte + buffPool, smallBuffPool *sync.Pool } func (bm *benchMarkObj) run(api apiInfo) []string { @@ -503,40 +512,57 @@ func (bm *benchMarkObj) run(api apiInfo) []string { return line } -func getMockData(seed []byte, idx int) []byte { +func getMockData(seed []byte, idx int, result *[]byte) { size := len(seed) - if size == 0 { - return nil + rSize := len(*result) + if size == 0 || rSize == 0 { + return } - content := make([]byte, size) - if idx == 0 { - content = seed + i := idx % size + if size-i > rSize { + copy(*result, seed[i:i+rSize]) } else { - i := idx % size - copy(content[:size-i], seed[i:size]) - copy(content[size-i:size], seed[:i]) + copy((*result)[:size-i], seed[i:size]) + copy((*result)[size-i:rSize], seed[:rSize-(size-i)]) } - return content + } func (bm *benchMarkObj) put(key string, startKey int) error { idx, _ := strconv.Atoi(key) - return bm.blob.Put(key, bytes.NewReader(getMockData(bm.seed, idx-startKey))) + if idx-startKey == 0 { + return bm.blob.Put(key, bytes.NewReader(bm.seed)) + } + buff := bm.buffPool.Get().(*[]byte) + defer bm.buffPool.Put(buff) + getMockData(bm.seed, idx-startKey, buff) + return bm.blob.Put(key, bytes.NewReader(*buff)) } func (bm *benchMarkObj) smallPut(key string, startKey int) error { idx, _ := strconv.Atoi(key) - return bm.blob.Put(key, bytes.NewReader(getMockData(bm.smallSeed, idx))) + if idx == 0 { + return bm.blob.Put(key, bytes.NewReader(bm.smallSeed)) + } + + buff := bm.smallBuffPool.Get().(*[]byte) + defer bm.smallBuffPool.Put(buff) + getMockData(bm.smallSeed, idx-startKey, buff) + return bm.blob.Put(key, bytes.NewReader(*buff)) } -func getAndCheckN(blob object.ObjectStorage, key string, seed []byte, getOrgIdx func(idx int) int) error { +func getAndCheckN(blob object.ObjectStorage, key string, seed []byte, pool *sync.Pool, getOrgIdx func(idx int) int) error { idx, _ := strconv.Atoi(key) r, err := blob.Get(key, 0, -1) if err != nil { return err } defer r.Close() - content, err := io.ReadAll(r) + content := pool.Get().(*[]byte) + defer pool.Put(content) + + var n int + n, err = io.ReadFull(r, *content) if err != nil { return err } @@ -546,20 +572,32 @@ func getAndCheckN(blob object.ObjectStorage, key string, seed []byte, getOrgIdx if l < checkN { checkN = l } - if len(content) != len(seed) || !bytes.Equal(content[:checkN], getMockData(seed, orgIdx)[:checkN]) { + + // if orgIdx is 0, mockdata is the same as the seed + var preNMockData []byte + if orgIdx == 0 { + preNMockData = seed[:checkN] + } else { + mockResult := pool.Get().(*[]byte) + defer pool.Put(mockResult) + preNMockData = (*mockResult)[:checkN] + getMockData(seed, orgIdx, &preNMockData) + } + + if n != len(seed) || !bytes.Equal((*content)[:checkN], preNMockData) { return fmt.Errorf("the downloaded content is incorrect") } return nil } func (bm *benchMarkObj) get(key string, startKey int) error { - return getAndCheckN(bm.blob, key, bm.seed, func(idx int) int { + return getAndCheckN(bm.blob, key, bm.seed, bm.buffPool, func(idx int) int { return idx - startKey }) } func (bm *benchMarkObj) smallGet(key string, startKey int) error { - return getAndCheckN(bm.blob, key, bm.smallSeed, func(idx int) int { + return getAndCheckN(bm.blob, key, bm.smallSeed, bm.smallBuffPool, func(idx int) int { return idx }) } @@ -962,7 +1000,8 @@ func functionalTesting(blob object.ObjectStorage, result *[][]string, colorful b parts := make([]*object.Part, total) content := make([][]byte, total) for i := 0; i < total; i++ { - content[i] = getMockData(seed, i) + content[i] = make([]byte, upload.MinPartSize) + getMockData(seed, i, &content[i]) } var eg errgroup.Group eg.SetLimit(4) @@ -982,7 +1021,7 @@ func functionalTesting(blob object.ObjectStorage, result *[][]string, colorful b return err } // overwrite the first part - firstPartContent := append(getMockData(seed, 0), getMockData(seed, 0)...) + firstPartContent := append(seed, seed...) if parts[0], err = blob.UploadPart(key, upload.UploadID, 1, firstPartContent); err != nil { return fmt.Errorf("multipart upload error: %v", err) } diff --git a/cmd/object.go b/cmd/object.go index 1a281c33f329..66c183fbb93e 100644 --- a/cmd/object.go +++ b/cmd/object.go @@ -95,7 +95,7 @@ func (f *jFile) Close() error { return toError(f.f.Close(ctx)) } -func (j *juiceFS) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (j *juiceFS) Get(key string, off, limit int64, getters ...object.AttrGetter) (io.ReadCloser, error) { f, err := j.jfs.Open(ctx, j.path(key), vfs.MODE_MASK_R) if err != 0 { return nil, err @@ -116,7 +116,7 @@ var bufPool = sync.Pool{ }, } -func (j *juiceFS) Put(key string, in io.Reader) (err error) { +func (j *juiceFS) Put(key string, in io.Reader, getters ...object.AttrGetter) (err error) { p := j.path(key) if strings.HasSuffix(p, "/") { eno := j.jfs.MkdirAll(ctx, p, 0777, j.umask) @@ -171,7 +171,7 @@ func (j *juiceFS) Put(key string, in io.Reader) (err error) { return nil } -func (j *juiceFS) Delete(key string) error { +func (j *juiceFS) Delete(key string, getters ...object.AttrGetter) error { if key == "" { return nil } diff --git a/cmd/sync.go b/cmd/sync.go index f7a7db58d938..23d79f95f1dd 100644 --- a/cmd/sync.go +++ b/cmd/sync.go @@ -437,7 +437,10 @@ func doSync(c *cli.Context) error { } if config.StorageClass != "" { if os, ok := dst.(object.SupportStorageClass); ok { - os.SetStorageClass(config.StorageClass) + err := os.SetStorageClass(config.StorageClass) + if err != nil { + logger.Errorf("set storage class %s: %s", config.StorageClass, err) + } } } diff --git a/docs/en/deployment/hadoop_java_sdk.md b/docs/en/deployment/hadoop_java_sdk.md index 4a473ee4b8f0..05944cd333a7 100644 --- a/docs/en/deployment/hadoop_java_sdk.md +++ b/docs/en/deployment/hadoop_java_sdk.md @@ -190,27 +190,28 @@ Please refer to the following table to set the relevant parameters of the JuiceF #### Other Configurations -| Configuration | Default Value | Description | -|---------------------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `juicefs.bucket` | | Specify a different endpoint for object storage | -| `juicefs.debug` | `false` | Whether enable debug log | -| `juicefs.access-log` | | Access log path. Ensure Hadoop application has write permission, e.g. `/tmp/juicefs.access.log`. The log file will rotate automatically to keep at most 7 files. | -| `juicefs.superuser` | `hdfs` | The super user | -| `juicefs.supergroup` | `supergroup` | The super user group | -| `juicefs.users` | `null` | The path of username and UID list file, e.g. `jfs://name/etc/users`. The file format is `:`, one user per line. | -| `juicefs.groups` | `null` | The path of group name, GID and group members list file, e.g. `jfs://name/etc/groups`. The file format is `::,`, one group per line. | -| `juicefs.umask` | `null` | The umask used when creating files and directories (e.g. `0022`), default value is `fs.permissions.umask-mode`. | -| `juicefs.push-gateway` | | [Prometheus Pushgateway](https://github.com/prometheus/pushgateway) address, format is `:`. | -| `juicefs.push-auth` | | [Prometheus basic auth](https://prometheus.io/docs/guides/basic-auth) information, format is `:`. | -| `juicefs.push-graphite` | | [Graphite](https://graphiteapp.org) address, format is `:`. | -| `juicefs.push-interval` | 10 | Metric push interval (in seconds) | -| `juicefs.push-labels` | | Metric labels, format is `key1:value1;key2:value2`. | -| `juicefs.fast-resolve` | `true` | Whether enable faster metadata lookup using Redis Lua script | +| Configuration | Default Value | Description | +|-------------------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `juicefs.bucket` | | Specify a different endpoint for object storage | +| `juicefs.debug` | `false` | Whether enable debug log | +| `juicefs.access-log` | | Access log path. Ensure Hadoop application has write permission, e.g. `/tmp/juicefs.access.log`. The log file will rotate automatically to keep at most 7 files. | +| `juicefs.superuser` | `hdfs` | The super user | +| `juicefs.supergroup` | `supergroup` | The super user group | +| `juicefs.users` | `null` | The path of username and UID list file, e.g. `jfs://name/etc/users`. The file format is `:`, one user per line. | +| `juicefs.groups` | `null` | The path of group name, GID and group members list file, e.g. `jfs://name/etc/groups`. The file format is `::,`, one group per line. | +| `juicefs.umask` | `null` | The umask used when creating files and directories (e.g. `0022`), default value is `fs.permissions.umask-mode`. | +| `juicefs.push-gateway` | | [Prometheus Pushgateway](https://github.com/prometheus/pushgateway) address, format is `:`. | +| `juicefs.push-auth` | | [Prometheus basic auth](https://prometheus.io/docs/guides/basic-auth) information, format is `:`. | +| `juicefs.push-graphite` | | [Graphite](https://graphiteapp.org) address, format is `:`. | +| `juicefs.push-interval` | 10 | Metric push interval (in seconds) | +| `juicefs.push-labels` | | Metric labels, format is `key1:value1;key2:value2`. | +| `juicefs.fast-resolve` | `true` | Whether enable faster metadata lookup using Redis Lua script | | `juicefs.no-usage-report` | `false` | Whether disable usage reporting. JuiceFS only collects anonymous usage data (e.g. version number), no user or any sensitive data will be collected. | -| `juicefs.no-bgjob` | `false` | Disable background jobs (clean-up, backup, etc.) | -| `juicefs.backup-meta` | 3600 | Interval (in seconds) to automatically backup metadata in the object storage (0 means disable backup) | -|`juicefs.backup-skip-trash`| `false` | Skip files and directories in trash when backup metadata. | -| `juicefs.heartbeat` | 12 | Heartbeat interval (in seconds) between client and metadata engine. It's recommended that all clients use the same value. | +| `juicefs.no-bgjob` | `false` | Disable background jobs (clean-up, backup, etc.) | +| `juicefs.backup-meta` | 3600 | Interval (in seconds) to automatically backup metadata in the object storage (0 means disable backup) | +| `juicefs.backup-skip-trash` | `false` | Skip files and directories in trash when backup metadata. | +| `juicefs.heartbeat` | 12 | Heartbeat interval (in seconds) between client and metadata engine. It's recommended that all clients use the same value. | +| `juicefs.skip-dir-mtime` | 100ms | Minimal duration to modify parent dir mtime. | #### Multiple file systems configuration diff --git a/docs/zh_cn/deployment/hadoop_java_sdk.md b/docs/zh_cn/deployment/hadoop_java_sdk.md index 055a8c282130..ee925582d8c6 100644 --- a/docs/zh_cn/deployment/hadoop_java_sdk.md +++ b/docs/zh_cn/deployment/hadoop_java_sdk.md @@ -204,15 +204,16 @@ make win | `juicefs.push-auth` | | [Prometheus 基本认证](https://prometheus.io/docs/guides/basic-auth)信息,格式为 `:`。 | | `juicefs.push-graphite` | | [Graphite](https://graphiteapp.org) 地址,格式为 `:`。 | | `juicefs.push-interval` | 10 | 指标推送的时间间隔,单位为秒。 | -| `juicefs.push-labels` | | 指标额外标签,格式为 `key1:value1;key2:value2`。 | +| `juicefs.push-labels` | | 指标额外标签,格式为 `key1:value1;key2:value2`。 | | `juicefs.fast-resolve` | `true` | 是否开启快速元数据查找(通过 Redis Lua 脚本实现) | | `juicefs.no-usage-report` | `false` | 是否上报数据。仅上版本号等使用量数据,不包含任何用户信息。 | | `juicefs.block.size` | `134217728` | 单位为字节,同 HDFS 的 `dfs.blocksize`,默认 128 MB | | `juicefs.file.checksum` | `false` | DistCp 使用 `-update` 参数时,是否计算文件 Checksum | | `juicefs.no-bgjob` | `false` | 是否关闭后台任务(清理、备份等) | | `juicefs.backup-meta` | 3600 | 自动将 JuiceFS 元数据备份到对象存储间隔(单位:秒),设置为 0 关闭自动备份 | -|`juicefs.backup-skip-trash`| `false` | 备份元数据时忽略回收站中的文件和目录。 | +|`juicefs.backup-skip-trash`| `false` | 备份元数据时忽略回收站中的文件和目录。 | | `juicefs.heartbeat` | 12 | 客户端和元数据引擎之间的心跳间隔(单位:秒),建议所有客户端都设置一样 | +| `juicefs.skip-dir-mtime` | 100ms | 修改父目录 mtime 间隔。 | #### 多文件系统配置 diff --git a/go.mod b/go.mod index a62b854df584..01853800199d 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/mattn/go-sqlite3 v1.14.16 github.com/minio/cli v1.24.2 github.com/minio/minio v0.0.0-20210206053228-97fe57bba92c - github.com/minio/minio-go/v7 v7.0.10 + github.com/minio/minio-go/v7 v7.0.11-0.20210302210017-6ae69c73ce78 github.com/ncw/swift/v2 v2.0.1 github.com/pingcap/log v1.1.1-0.20221015072633-39906604fb81 github.com/pkg/errors v0.9.1 @@ -122,7 +122,6 @@ require ( github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/djherbis/atime v1.0.0 // indirect github.com/dswarbrick/smart v0.0.0-20190505152634-909a45200d6d // indirect - github.com/elazarl/go-bindata-assetfs v1.0.0 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fatih/structs v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect @@ -146,19 +145,12 @@ require ( github.com/gorilla/mux v1.8.0 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.6 github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.1 // indirect github.com/hashicorp/go-immutable-radix v1.3.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.5.4 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/serf v0.9.7 // indirect - github.com/hashicorp/vault/api v1.0.4 // indirect - github.com/hashicorp/vault/sdk v0.1.13 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect @@ -171,7 +163,7 @@ require ( github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/klauspost/compress v1.17.3 // indirect github.com/klauspost/cpuid v1.3.1 // indirect - github.com/klauspost/cpuid/v2 v2.0.3 // indirect + github.com/klauspost/cpuid/v2 v2.0.4 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/klauspost/readahead v1.3.1 // indirect github.com/klauspost/reedsolomon v1.9.11 // indirect @@ -184,7 +176,7 @@ require ( github.com/minio/highwayhash v1.0.2 // indirect github.com/minio/md5-simd v1.1.1 // indirect github.com/minio/selfupdate v0.3.1 // indirect - github.com/minio/sha256-simd v0.1.1 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect github.com/minio/simdjson-go v0.2.1 // indirect github.com/minio/sio v0.2.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -214,7 +206,6 @@ require ( github.com/rs/cors v1.7.0 // indirect github.com/rs/xid v1.2.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/ryanuber/go-glob v1.0.0 // indirect github.com/secure-io/sio-go v0.3.1 // indirect github.com/shirou/gopsutil/v3 v3.23.11 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect @@ -249,13 +240,12 @@ require ( google.golang.org/grpc v1.56.3 // indirect gopkg.in/ini.v1 v1.57.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect - gopkg.in/square/go-jose.v2 v2.3.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect xorm.io/builder v0.3.7 // indirect ) -replace github.com/minio/minio v0.0.0-20210206053228-97fe57bba92c => github.com/juicedata/minio v0.0.0-20231213085529-c243663574ba +replace github.com/minio/minio v0.0.0-20210206053228-97fe57bba92c => github.com/juicedata/minio v0.0.0-20240402044808-f3358f0455a9 replace github.com/hanwen/go-fuse/v2 v2.1.1-0.20210611132105-24a1dfe6b4f8 => github.com/juicedata/go-fuse/v2 v2.1.1-0.20240202080323-002ef792942e diff --git a/go.sum b/go.sum index 9034eccd08ca..312494e490e4 100644 --- a/go.sum +++ b/go.sum @@ -218,8 +218,6 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= -github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -259,7 +257,6 @@ github.com/go-ini/ini v1.44.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-ldap/ldap/v3 v3.2.4 h1:PFavAq2xTgzo/loE8qNXcQaofAaqIpI4WgaLdv+1l3E= github.com/go-ldap/ldap/v3 v3.2.4/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -278,7 +275,6 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -326,7 +322,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -414,8 +409,6 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= @@ -430,11 +423,7 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= -github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -446,14 +435,11 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= @@ -461,12 +447,6 @@ github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1 github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.9.7 h1:hkdgbqizGQHuU5IPqYM1JdSMV8nKfpuOnZYXssk9muY= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/vault/api v1.0.4 h1:j08Or/wryXT4AcHj1oCbMd7IijXcKzYUGw59LGu9onU= -github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= -github.com/hashicorp/vault/sdk v0.1.13 h1:mOEPeOhT7jl0J4AMl1E705+BcmeRs1VmKNb9F0sMLy8= -github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hungys/go-lz4 v0.0.0-20170805124057-19ff7f07f099 h1:heHZCso/ytvpYr+hp2cDxlZfA/jTw46aHSvT9kZnJ7o= github.com/hungys/go-lz4 v0.0.0-20170805124057-19ff7f07f099/go.mod h1:h44tqw4M3GN0Woo9KBStxJxm8huNi+9+tOHoeqSvhaY= @@ -527,8 +507,8 @@ github.com/juicedata/gogfapi v0.0.0-20230626071140-fc28e5537825 h1:7KrwI4HPqvNLK github.com/juicedata/gogfapi v0.0.0-20230626071140-fc28e5537825/go.mod h1:Ho5G4KgrgbMKW0buAJdOmYoJcOImkzznJQaLiATrsx4= github.com/juicedata/huaweicloud-sdk-go-obs v3.22.12-0.20230228031208-386e87b5c091+incompatible h1:2/ttSmYoX+QMegpNyAJR0Y6aHcVk57F7RJit5xN2T/s= github.com/juicedata/huaweicloud-sdk-go-obs v3.22.12-0.20230228031208-386e87b5c091+incompatible/go.mod h1:Ukwa8ffRQLV6QRwpqGioPjn2Wnf7TBDA4DbennDOqHE= -github.com/juicedata/minio v0.0.0-20231213085529-c243663574ba h1:cdGhPBD5bFjeFMHUeN58ljWqh2OSK0IN/CaPyPFERss= -github.com/juicedata/minio v0.0.0-20231213085529-c243663574ba/go.mod h1:v8gav+TZKKXpR1MoNASW28g3ezdOpPSlPMs0zuC3D74= +github.com/juicedata/minio v0.0.0-20240402044808-f3358f0455a9 h1:4aNcV/q+zoKDH9CbtDgiQXzKEtUCB8Dv89FZAVCKr9k= +github.com/juicedata/minio v0.0.0-20240402044808-f3358f0455a9/go.mod h1:UOWyfa3ls1tnpJrNw2yzGqfrwM4nzsZq/qz+zd6H+/Q= github.com/juicedata/mpb/v7 v7.0.4-0.20231024073412-2b8d31be510b h1:0/6suPNZnrOlRlBaU/Bnitu8HiKkkLSzQhHbwQ9AysM= github.com/juicedata/mpb/v7 v7.0.4-0.20231024073412-2b8d31be510b/go.mod h1:NXGsfPGx6G2JssqvEcULtDqUrxuuYs4llpv8W6ZUpzk= github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= @@ -546,8 +526,9 @@ github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/klauspost/cpuid/v2 v2.0.2/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.3 h1:DNljyrHyxlkk8139OXIAAauCwV8eQGDD6Z8YqnDXdZw= github.com/klauspost/cpuid/v2 v2.0.3/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/readahead v1.3.1 h1:QqXNYvm+VvqYcbrRT4LojUciM0XrznFRIDrbHiJtu/0= @@ -583,6 +564,7 @@ github.com/liquidweb/liquidweb-go v1.6.0/go.mod h1:UDcVnAMDkZxpw4Y7NOHkqoeiGacVL github.com/lucas-clemente/quic-go v0.13.1/go.mod h1:Vn3/Fb0/77b02SGhQk36KzOUmXgVpFfizUfW5WMaqyU= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/marten-seemann/chacha20 v0.2.0/go.mod h1:HSdjFau7GzYRj+ahFNwsO3ouVJr1HFkWoEwNDb4TMtE= github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= github.com/marten-seemann/qtls v0.4.1/go.mod h1:pxVXcHHw1pNIt8Qo0pwSYQEoZ8yYOOPXTCZLQQunvRc= @@ -627,23 +609,21 @@ github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/md5-simd v1.1.1 h1:9ojcLbuZ4gXbB2sX53MKn8JUZ0sB/2wfwsEcRw+I08U= github.com/minio/md5-simd v1.1.1/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v7 v7.0.10 h1:1oUKe4EOPUEhw2qnPQaPsJ0lmVTYLFu03SiItauXs94= -github.com/minio/minio-go/v7 v7.0.10/go.mod h1:td4gW1ldOsj1PbSNS+WYK43j+P1XVhX/8W8awaYlBFo= +github.com/minio/minio-go/v7 v7.0.11-0.20210302210017-6ae69c73ce78 h1:v7OMbUnWkyRlO2MZ5AuYioELhwXF/BgZEznrQ1drBEM= +github.com/minio/minio-go/v7 v7.0.11-0.20210302210017-6ae69c73ce78/go.mod h1:mTh2uJuAbEqdhMVl6CMIIZLUeiMiWtJR4JB8/5g2skw= github.com/minio/selfupdate v0.3.1 h1:BWEFSNnrZVMUWXbXIgLDNDjbejkmpAmZvy/nCz1HlEs= github.com/minio/selfupdate v0.3.1/go.mod h1:b8ThJzzH7u2MkF6PcIra7KaXO9Khf6alWPvMSyTDCFM= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/simdjson-go v0.2.1 h1:nxYlp4Qd0w2pwLlif00l5vTFL6PcNAKpyHq27/pageg= github.com/minio/simdjson-go v0.2.1/go.mod h1:JPUSkRykfSPS+AhO0YPA1h0l5vY7NqrF4zel2b12wxc= github.com/minio/sio v0.2.1 h1:NjzKiIMSMcHediVQR0AFVx2tp7Wxh9tKPfDI3kH7aHQ= github.com/minio/sio v0.2.1/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -651,7 +631,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mmcloughlin/avo v0.0.0-20201105074841-5d2f697d268f/go.mod h1:6aKT4zZIrpGqB3RpFU14ByCSSyKY6LfJz4J/JJChHfI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -680,7 +659,6 @@ github.com/nrdcg/goinwx v0.6.1/go.mod h1:XPiut7enlbEdntAqalBIqcYcTEVhpv/dKWgDCX2 github.com/nrdcg/namesilo v0.2.1/go.mod h1:lwMvfQTyYq+BbjJd30ylEG4GPSS6PII0Tia4rRpRiyw= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/oliverisaac/shellescape v0.0.0-20220131224704-1b6c6b87b668 h1:WUilXdVrxYH+fFkmstviAOj1o9CfoW5O/Sd0LWPIVUA= @@ -811,8 +789,6 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sacloud/libsacloud v1.26.1/go.mod h1:79ZwATmHLIFZIMd7sxA3LwzVy/B77uj3LDoToVTxDoQ= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -830,6 +806,7 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= @@ -1088,7 +1065,6 @@ golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1222,7 +1198,6 @@ google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1233,13 +1208,11 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= @@ -1266,7 +1239,6 @@ google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cn google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/DataDog/dd-trace-go.v1 v1.19.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1290,7 +1262,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc/go.mod h1:VV+3haRsgDiVLxyifmMBrBIuCWFBPYKbRssXB9z67Hw= gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= diff --git a/pkg/chunk/cached_store.go b/pkg/chunk/cached_store.go index c7c9ef72cf76..a65c84e1133f 100644 --- a/pkg/chunk/cached_store.go +++ b/pkg/chunk/cached_store.go @@ -149,17 +149,20 @@ func (s *rSlice) ReadAt(ctx context.Context, page *Page, off int) (n int, err er s.store.cacheMiss.Add(1) s.store.cacheMissBytes.Add(float64(len(p))) - var in io.ReadCloser if s.store.seekable && boff > 0 && len(p) <= blockSize/4 { if s.store.downLimit != nil { s.store.downLimit.Wait(int64(len(p))) } // partial read st := time.Now() + var ( + reqID string + sc = object.DefaultStorageClass + ) page.Acquire() err := utils.WithTimeout(func() error { defer page.Release() - in, err = s.store.storage.Get(key, int64(boff), int64(len(p))) + in, err := s.store.storage.Get(key, int64(boff), int64(len(p)), object.WithRequestID(&reqID), object.WithStorageClass(&sc)) if err == nil { n, err = io.ReadFull(in, p) _ = in.Close() @@ -167,8 +170,7 @@ func (s *rSlice) ReadAt(ctx context.Context, page *Page, off int) (n int, err er return err }, s.store.conf.GetTimeout) used := time.Since(st) - sc := object.GetStorageClassOrDefault(in) - logRequest("GET", key, fmt.Sprintf("RANGE(%d,%d) ", boff, len(p)), err, used) + logRequest("GET", key, fmt.Sprintf("RANGE(%d,%d) ", boff, len(p)), reqID, err, used) s.store.objectDataBytes.WithLabelValues("GET", sc).Add(float64(n)) s.store.objectReqsHistogram.WithLabelValues("GET", sc).Observe(used.Seconds()) s.store.fetcher.fetch(key) @@ -202,8 +204,9 @@ func (s *rSlice) ReadAt(ctx context.Context, page *Page, off int) (n int, err er func (s *rSlice) delete(indx int) error { key := s.key(indx) st := time.Now() + var reqID string err := utils.WithTimeout(func() error { - return s.store.storage.Delete(key) + return s.store.storage.Delete(key, object.WithRequestID(&reqID)) }, s.store.conf.PutTimeout) used := time.Since(st) if err != nil && (strings.Contains(err.Error(), "NoSuchKey") || @@ -211,7 +214,7 @@ func (s *rSlice) delete(indx int) error { strings.Contains(err.Error(), "No such file")) { err = nil } - logRequest("DELETE", key, "", err, used) + logRequest("DELETE", key, "", reqID, err, used) s.store.objectReqsHistogram.WithLabelValues("DELETE", "").Observe(used.Seconds()) if err != nil { s.store.objectReqErrors.Add(1) @@ -341,13 +344,16 @@ func (store *cachedStore) put(key string, p *Page) error { store.upLimit.Wait(int64(len(p.Data))) } p.Acquire() + var ( + reqID string + sc = object.DefaultStorageClass + ) return utils.WithTimeout(func() error { defer p.Release() st := time.Now() - err := store.storage.Put(key, bytes.NewReader(p.Data)) + err := store.storage.Put(key, bytes.NewReader(p.Data), object.WithRequestID(&reqID), object.WithStorageClass(&sc)) used := time.Since(st) - sc := object.GetStorageClassOrDefault(store.storage) - logRequest("PUT", key, "", err, used) + logRequest("PUT", key, "", reqID, err, used) store.objectDataBytes.WithLabelValues("PUT", sc).Add(float64(len(p.Data))) store.objectReqsHistogram.WithLabelValues("PUT", sc).Observe(used.Seconds()) if err != nil { @@ -427,6 +433,7 @@ func (s *wSlice) upload(indx int) { if s.store.conf.Writeback { stagingPath, err := s.store.bcache.stage(key, block.Data, s.store.shouldCache(blen)) if err != nil { + s.store.stageBlockErrors.Add(1) logger.Warnf("write %s to disk: %s, upload it directly", stagingPath, err) } else { s.errors <- nil @@ -638,19 +645,14 @@ type cachedStore struct { objectReqErrors prometheus.Counter objectDataBytes *prometheus.CounterVec stageBlockDelay prometheus.Counter + stageBlockErrors prometheus.Counter } -func logRequest(typeStr string, key string, param string, err error, used time.Duration) { - var info string - if id := object.ReqIDCache.Get(key); id != "" { - info += fmt.Sprintf("RequestID: %s ", id) - } - if err != nil { - info += err.Error() - } - logger.Debugf("%s %s %s(%v, %.3fs)", typeStr, key, param, info, used.Seconds()) +func logRequest(typeStr, key, param, reqID string, err error, used time.Duration) { if used > SlowRequest { - logger.Infof("slow request: %s %s %s(%v, %.3fs)", typeStr, key, param, info, used.Seconds()) + logger.Warnf("slow request: %s %s %s(req_id: %q, err: %v, cost: %s)", typeStr, key, param, reqID, err, used) + } else { + logger.Debugf("%s %s %s(req_id: %q, err: %v, cost: %s)", typeStr, key, param, reqID, err, used) } } @@ -681,6 +683,10 @@ func (store *cachedStore) load(key string, page *Page, cache bool, forceCache bo } p.Acquire() var n int + var ( + reqID string + sc = object.DefaultStorageClass + ) err = utils.WithTimeout(func() error { defer p.Release() // it will be retried outside @@ -691,7 +697,7 @@ func (store *cachedStore) load(key string, page *Page, cache bool, forceCache bo store.objectReqErrors.Add(1) start = time.Now() } - in, err = store.storage.Get(key, 0, -1) + in, err = store.storage.Get(key, 0, -1, object.WithRequestID(&reqID), object.WithStorageClass(&sc)) tried++ } if err == nil { @@ -704,11 +710,10 @@ func (store *cachedStore) load(key string, page *Page, cache bool, forceCache bo return err }, store.conf.GetTimeout) used := time.Since(start) - logRequest("GET", key, "", err, used) + logRequest("GET", key, "", reqID, err, used) if store.downLimit != nil && compressed { store.downLimit.Wait(int64(n)) } - sc := object.GetStorageClassOrDefault(in) store.objectDataBytes.WithLabelValues("GET", sc).Add(float64(n)) store.objectReqsHistogram.WithLabelValues("GET", sc).Observe(used.Seconds()) if err != nil { @@ -864,6 +869,10 @@ func (store *cachedStore) initMetrics() { Name: "staging_block_delay_seconds", Help: "Total seconds of delay for staging blocks", }) + store.stageBlockErrors = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "staging_block_errors", + Help: "Total errors when staging blocks", + }) } func (store *cachedStore) regMetrics(reg prometheus.Registerer) { @@ -879,6 +888,7 @@ func (store *cachedStore) regMetrics(reg prometheus.Registerer) { reg.MustRegister(store.objectReqErrors) reg.MustRegister(store.objectDataBytes) reg.MustRegister(store.stageBlockDelay) + reg.MustRegister(store.stageBlockErrors) reg.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ Name: "blockcache_blocks", diff --git a/pkg/chunk/disk_cache.go b/pkg/chunk/disk_cache.go index c1745d520a42..c8e9fd09a761 100644 --- a/pkg/chunk/disk_cache.go +++ b/pkg/chunk/disk_cache.go @@ -719,7 +719,7 @@ func (cache *cacheStore) cleanupFull() { freed += int64(lastValue.size + 4096) cache.used -= int64(lastValue.size + 4096) todel = append(todel, lastK) - logger.Debugf("remove %s from cache, age: %d", lastK, now-lastValue.atime) + logger.Debugf("remove %s from cache, age: %ds", lastK, now-lastValue.atime) cache.m.cacheEvicts.Add(1) cnt = 0 if len(cache.keys) < num && cache.used < goal { @@ -854,7 +854,7 @@ func (cache *cacheStore) scanStaging() { var start = time.Now() var oneMinAgo = start.Add(-time.Minute) - var count int + var count, usage uint64 stagingPrefix := filepath.Join(cache.dir, stagingDir) logger.Debugf("Scan %s to find staging blocks", stagingPrefix) _ = filepath.WalkDir(stagingPrefix, func(path string, d fs.DirEntry, err error) error { @@ -889,12 +889,13 @@ func (cache *cacheStore) scanStaging() { cache.m.stageBlockBytes.Add(float64(origSize)) cache.uploader(key, path, false) count++ + usage += uint64(origSize) } } return nil }) if count > 0 { - logger.Infof("Found %d staging blocks (%d bytes) in %s with %s", count, cache.used, cache.dir, time.Since(start)) + logger.Infof("Found %d staging blocks (%s) in %s with %s", count, humanize.IBytes(usage), cache.dir, time.Since(start)) } } diff --git a/pkg/fuse/fuse.go b/pkg/fuse/fuse.go index 3f33a8a1c93f..7ef1054f7931 100644 --- a/pkg/fuse/fuse.go +++ b/pkg/fuse/fuse.go @@ -473,7 +473,7 @@ func Serve(v *vfs.VFS, options string, xattrs, ioctl bool) error { } else if n == "nonempty" || n == "ro" { } else if n == "debug" { opt.Debug = true - } else if n == "writeback_cache" || n == "writeback" { + } else if n == "writeback_cache" { opt.EnableWriteback = true } else if strings.TrimSpace(n) != "" { opt.Options = append(opt.Options, strings.TrimSpace(n)) diff --git a/pkg/gateway/gateway.go b/pkg/gateway/gateway.go index b6d9d6a6353c..86c50aa48ea4 100644 --- a/pkg/gateway/gateway.go +++ b/pkg/gateway/gateway.go @@ -18,6 +18,7 @@ package gateway import ( "context" + "errors" "fmt" "io" "net/http" @@ -76,6 +77,14 @@ type jfsObjects struct { gConf *Config } +func (n *jfsObjects) PutObjectMetadata(ctx context.Context, s string, s2 string, options minio.ObjectOptions) (minio.ObjectInfo, error) { + return minio.ObjectInfo{}, minio.NotImplemented{} +} + +func (n *jfsObjects) NSScanner(ctx context.Context, bf *minio.BloomFilter, updates chan<- madmin.DataUsageInfo) error { + return nil +} + func (n *jfsObjects) IsCompressionSupported() bool { return false } @@ -95,7 +104,7 @@ func (n *jfsObjects) Shutdown(ctx context.Context) error { func (n *jfsObjects) StorageInfo(ctx context.Context) (info minio.StorageInfo, errors []error) { sinfo := minio.StorageInfo{} - sinfo.Backend.Type = minio.BackendFS + sinfo.Backend.Type = madmin.FS return sinfo, nil } @@ -1148,12 +1157,92 @@ func (n *jfsObjects) cleanup() { } } +type jfsFLock struct { + inode meta.Ino + owner uint64 + meta meta.Meta +} + +func (j *jfsFLock) GetLock(ctx context.Context, timeout *minio.DynamicTimeout) (newCtx context.Context, timedOutErr error) { + return j.getFlockWithTimeOut(ctx, meta.F_WRLCK, timeout) +} + +func (j *jfsFLock) getFlockWithTimeOut(ctx context.Context, ltype uint32, timeout *minio.DynamicTimeout) (context.Context, error) { + if j.inode == 0 { + logger.Warnf("failed to get lock") + return ctx, nil + } + start := time.Now() + deadline := start.Add(timeout.Timeout()) + lockStr := "write" + if ltype == meta.F_RDLCK { + lockStr = "read" + } + for { + if errno := j.meta.Flock(mctx, j.inode, j.owner, ltype, false); errno != 0 && !errors.Is(errno, syscall.EAGAIN) { + logger.Errorf("failed to get %s lock for inode %d by owner %d, error : %s", lockStr, j.inode, j.owner, errno) + } else { + timeout.LogSuccess(time.Since(start)) + return ctx, nil + } + if time.Now().After(deadline) { + timeout.LogFailure() + logger.Errorf("get write lock timed out ino:%d", j.inode) + return ctx, minio.OperationTimedOut{} + } + time.Sleep(5 * time.Millisecond) + } +} + +func (j *jfsFLock) Unlock() { + if j.inode == 0 { + return + } + if errno := j.meta.Flock(mctx, j.inode, j.owner, meta.F_UNLCK, true); errno != 0 { + logger.Errorf("failed to release lock for inode %d by owner %d, error : %s", j.inode, j.owner, errno) + } +} + +func (j *jfsFLock) GetRLock(ctx context.Context, timeout *minio.DynamicTimeout) (newCtx context.Context, timedOutErr error) { + return j.getFlockWithTimeOut(ctx, meta.F_RDLCK, timeout) +} + +func (j *jfsFLock) RUnlock() { + j.Unlock() +} + func (n *jfsObjects) NewNSLock(bucket string, objects ...string) minio.RWLocker { - return n.nsMutex.NewNSLock(nil, bucket, objects...) + if len(objects) != 1 { + panic(fmt.Errorf("jfsObjects.NewNSLock: the length of the objects parameter must be 1, current %s", objects)) + } + + lockfile := path.Join(minio.MinioMetaBucket, minio.MinioMetaLockFile) + var file *fs.File + var errno syscall.Errno + file, errno = n.fs.Open(mctx, lockfile, vfs.MODE_MASK_W) + if errno != 0 && !errors.Is(errno, syscall.ENOENT) { + logger.Errorf("failed to open the file to be locked: %s error %s", lockfile, errno) + return &jfsFLock{} + } + if errors.Is(errno, syscall.ENOENT) { + if file, errno = n.fs.Create(mctx, lockfile, 0666, n.gConf.Umask); errno != 0 { + if errors.Is(errno, syscall.EEXIST) { + if file, errno = n.fs.Open(mctx, lockfile, vfs.MODE_MASK_W); errno != 0 { + logger.Errorf("failed to open the file to be locked: %s error %s", lockfile, errno) + return &jfsFLock{} + } + } else { + logger.Errorf("failed to create gateway lock file err %s", errno) + return &jfsFLock{} + } + } + } + defer file.Close(mctx) + return &jfsFLock{owner: n.conf.Meta.Sid, inode: file.Inode(), meta: n.fs.Meta()} } -func (n *jfsObjects) BackendInfo() minio.BackendInfo { - return minio.BackendInfo{Type: minio.BackendFS} +func (n *jfsObjects) BackendInfo() madmin.BackendInfo { + return madmin.BackendInfo{Type: madmin.FS} } func (n *jfsObjects) LocalStorageInfo(ctx context.Context) (minio.StorageInfo, []error) { @@ -1278,10 +1367,6 @@ func (n *jfsObjects) DeleteObjectTags(ctx context.Context, bucket, object string return n.GetObjectInfo(ctx, bucket, object, opts) } -func (n *jfsObjects) CrawlAndGetDataUsage(ctx context.Context, bf *minio.BloomFilter, updates chan<- minio.DataUsageInfo) error { - return nil -} - func (n *jfsObjects) IsNotificationSupported() bool { return true } diff --git a/pkg/meta/base.go b/pkg/meta/base.go index 8617a9bf1ec4..6cf5c4db88d0 100644 --- a/pkg/meta/base.go +++ b/pkg/meta/base.go @@ -67,7 +67,6 @@ type engine interface { doInit(format *Format, force bool) error scanAllChunks(ctx Context, ch chan<- cchunk, bar *utils.Bar) error - compactChunk(inode Ino, indx uint32, once, force bool) doDeleteSustainedInode(sid uint64, inode Ino) error doFindDeletedFiles(ts int64, limit int) (map[Ino]uint64, error) // limit < 0 means all doDeleteFileData(inode Ino, length uint64) @@ -101,9 +100,11 @@ type engine interface { doRemoveXattr(ctx Context, inode Ino, name string) syscall.Errno doRepair(ctx Context, inode Ino, attr *Attr) syscall.Errno doTouchAtime(ctx Context, inode Ino, attr *Attr, ts time.Time) (bool, error) + doRead(ctx Context, inode Ino, indx uint32) ([]*slice, syscall.Errno) doWrite(ctx Context, inode Ino, indx uint32, off uint32, slice Slice, mtime time.Time, numSlices *int, delta *dirStat, attr *Attr) syscall.Errno doTruncate(ctx Context, inode Ino, flags uint8, length uint64, delta *dirStat, attr *Attr, skipPermCheck bool) syscall.Errno doFallocate(ctx Context, inode Ino, mode uint8, off uint64, size uint64, delta *dirStat, attr *Attr) syscall.Errno + doCompactChunk(inode Ino, indx uint32, origin []byte, ss []*slice, skipped int, pos uint32, id uint64, size uint32, delayed []byte) syscall.Errno doGetParents(ctx Context, inode Ino) map[Ino]int doUpdateDirStat(ctx Context, batch map[Ino]dirStat) error @@ -1058,6 +1059,9 @@ func (m *baseMeta) Link(ctx Context, inode, parent Ino, name string, attr *Attr) if name == "" { return syscall.ENOENT } + if name == "." || name == ".." { + return syscall.EEXIST + } defer m.timeit("Link", time.Now()) if attr == nil { @@ -1367,6 +1371,51 @@ func (m *baseMeta) InvalidateChunkCache(ctx Context, inode Ino, indx uint32) sys return 0 } +func (m *baseMeta) Read(ctx Context, inode Ino, indx uint32, slices *[]Slice) (st syscall.Errno) { + defer func() { + if st == 0 { + m.touchAtime(ctx, inode, nil) + } + }() + + f := m.of.find(inode) + if f != nil { + f.RLock() + defer f.RUnlock() + } + if ss, ok := m.of.ReadChunk(inode, indx); ok { + *slices = ss + return 0 + } + + *slices = nil + defer m.timeit("Read", time.Now()) + ss, st := m.en.doRead(ctx, inode, indx) + if st != 0 { + return st + } + if ss == nil { + return syscall.EIO + } + if len(ss) == 0 { + var attr Attr + if st = m.en.doGetAttr(ctx, inode, &attr); st != 0 { + return st + } + if attr.Typ != TypeFile { + return syscall.EPERM + } + return 0 + } + + *slices = buildSlice(ss) + m.of.CacheChunk(inode, indx, *slices) + if !m.conf.ReadOnly && (len(ss) >= 5 || len(*slices) >= 5) { + go m.compactChunk(inode, indx, false, false) + } + return 0 +} + func (m *baseMeta) NewSlice(ctx Context, id *uint64) syscall.Errno { m.freeMu.Lock() defer m.freeMu.Unlock() @@ -1414,9 +1463,9 @@ func (m *baseMeta) Write(ctx Context, inode Ino, indx uint32, off uint32, slice m.updateParentStat(ctx, inode, attr.Parent, delta.length, delta.space) if numSlices%100 == 99 || numSlices > 350 { if numSlices < maxSlices { - go m.en.compactChunk(inode, indx, false, false) + go m.compactChunk(inode, indx, false, false) } else { - m.en.compactChunk(inode, indx, true, false) + m.compactChunk(inode, indx, true, false) } } } @@ -1925,7 +1974,7 @@ func (m *baseMeta) CompactAll(ctx Context, threads int, bar *utils.Bar) syscall. go func() { for c := range ch { logger.Debugf("Compacting chunk %d:%d (%d slices)", c.inode, c.indx, c.slices) - m.en.compactChunk(c.inode, c.indx, false, true) + m.compactChunk(c.inode, c.indx, false, true) bar.Increment() } wg.Done() @@ -1942,6 +1991,101 @@ func (m *baseMeta) CompactAll(ctx Context, threads int, bar *utils.Bar) syscall. return 0 } +func (m *baseMeta) compactChunk(inode Ino, indx uint32, once, force bool) { + // avoid too many or duplicated compaction + k := uint64(inode) + (uint64(indx) << 40) + m.Lock() + if once || force { + for m.compacting[k] { + m.Unlock() + time.Sleep(time.Millisecond * 10) + m.Lock() + } + } else if len(m.compacting) > 10 || m.compacting[k] { + m.Unlock() + return + } + m.compacting[k] = true + m.Unlock() + defer func() { + m.Lock() + delete(m.compacting, k) + m.Unlock() + }() + + ss, st := m.en.doRead(Background, inode, indx) + if st != 0 { + return + } + if ss == nil { + logger.Errorf("Corrupt value for inode %d chunk indx %d", inode, indx) + return + } + if once && len(ss) < maxSlices { + return + } + if len(ss) > maxCompactSlices { + ss = ss[:maxCompactSlices] + } + skipped := skipSome(ss) + var first, last *slice + if skipped > 0 { + first, last = ss[0], ss[skipped-1] + } + compacted := ss[skipped:] + pos, size, slices := compactChunk(compacted) + if len(compacted) < 2 || size == 0 { + return + } + if first != nil && last != nil && pos+size > first.pos && last.pos+last.len > pos { + panic(fmt.Sprintf("invalid compaction: skipped slices [%+v, %+v], pos %d, size %d", *first, *last, pos, size)) + } + + var id uint64 + if st = m.NewSlice(Background, &id); st != 0 { + return + } + logger.Debugf("compact %d:%d: skipped %d slices (%d bytes) %d slices (%d bytes)", inode, indx, skipped, pos, len(compacted), size) + err := m.newMsg(CompactChunk, slices, id) + if err != nil { + if !strings.Contains(err.Error(), "not exist") && !strings.Contains(err.Error(), "not found") { + logger.Warnf("compact %d %d with %d slices: %s", inode, indx, len(compacted), err) + } + return + } + + var dsbuf []byte + trash := m.toTrash(0) + if trash { + dsbuf = make([]byte, 0, len(compacted)*12) + for _, s := range compacted { + if s.id > 0 { + dsbuf = append(dsbuf, m.encodeDelayedSlice(s.id, s.size)...) + } + } + } + origin := make([]byte, 0, len(ss)*sliceBytes) + for _, s := range ss { + origin = append(origin, marshalSlice(s.pos, s.id, s.size, s.off, s.len)...) + } + st = m.en.doCompactChunk(inode, indx, origin, compacted, skipped, pos, id, size, dsbuf) + if st == syscall.EINVAL { + logger.Infof("compaction for %d:%d is wasted, delete slice %d (%d bytes)", inode, indx, id, size) + m.deleteSlice(id, size) + } else if st == 0 { + m.of.InvalidateChunk(inode, indx) + } else { + logger.Warnf("compact %d %d: %s", inode, indx, err) + } + + if force { + m.Lock() + delete(m.compacting, k) + m.Unlock() + m.compactChunk(inode, indx, once, force) + } +} + func (m *baseMeta) Compact(ctx Context, inode Ino, concurrency int, preFunc, postFunc func()) syscall.Errno { var attr Attr if st := m.GetAttr(ctx, inode, &attr); st != 0 { @@ -1957,7 +2101,7 @@ func (m *baseMeta) Compact(ctx Context, inode Ino, concurrency int, preFunc, pos go func() { defer wg.Done() for c := range chunkChan { - m.en.compactChunk(c.inode, c.indx, false, true) + m.compactChunk(c.inode, c.indx, false, true) postFunc() } }() diff --git a/pkg/meta/redis.go b/pkg/meta/redis.go index 03f9e509ef2a..f7cd2569246b 100644 --- a/pkg/meta/redis.go +++ b/pkg/meta/redis.go @@ -1892,7 +1892,7 @@ func (m *redisMeta) doLink(ctx Context, inode, parent Ino, name string, attr *At if pattr.Parent > TrashInode { return syscall.ENOENT } - if st := m.Access(ctx, parent, MODE_MASK_W, &pattr); st != 0 { + if st := m.Access(ctx, parent, MODE_MASK_W|MODE_MASK_X, &pattr); st != 0 { return st } if pattr.Flags&FlagImmutable != 0 { @@ -2186,51 +2186,12 @@ func (m *redisMeta) doDeleteSustainedInode(sid uint64, inode Ino) error { return err } -func (m *redisMeta) Read(ctx Context, inode Ino, indx uint32, slices *[]Slice) (rerr syscall.Errno) { - defer func() { - if rerr == 0 { - m.touchAtime(ctx, inode, nil) - } - }() - - if slices != nil { - *slices = nil - } - f := m.of.find(inode) - if f != nil { - f.RLock() - defer f.RUnlock() - } - if ss, ok := m.of.ReadChunk(inode, indx); ok { - *slices = ss - return 0 - } - defer m.timeit("Read", time.Now()) +func (m *redisMeta) doRead(ctx Context, inode Ino, indx uint32) ([]*slice, syscall.Errno) { vals, err := m.rdb.LRange(ctx, m.chunkKey(inode, indx), 0, -1).Result() if err != nil { - return errno(err) + return nil, errno(err) } - if len(vals) == 0 { - var attr Attr - eno := m.doGetAttr(ctx, inode, &attr) - if eno != 0 { - return eno - } - if attr.Typ != TypeFile { - return syscall.EPERM - } - return 0 - } - ss := readSlices(vals) - if ss == nil { - return syscall.EIO - } - *slices = buildSlice(ss) - m.of.CacheChunk(inode, indx, *slices) - if !m.conf.ReadOnly && (len(vals) >= 5 || len(*slices) >= 5) { - go m.compactChunk(inode, indx, false, false) - } - return 0 + return readSlices(vals), 0 } func (m *redisMeta) doWrite(ctx Context, inode Ino, indx uint32, off uint32, slice Slice, mtime time.Time, numSlices *int, delta *dirStat, attr *Attr) syscall.Errno { @@ -2868,106 +2829,38 @@ func (r *redisMeta) doCleanupDelayedSlices(edge int64) (int, error) { return count, err } -func (m *redisMeta) compactChunk(inode Ino, indx uint32, once, force bool) { - // avoid too many or duplicated compaction - k := uint64(inode) + (uint64(indx) << 40) - m.Lock() - if once || force { - for m.compacting[k] { - m.Unlock() - time.Sleep(time.Millisecond * 10) - m.Lock() - } - } else if len(m.compacting) > 10 || m.compacting[k] { - m.Unlock() - return - } - m.compacting[k] = true - defer func() { - m.Lock() - delete(m.compacting, k) - m.Unlock() - }() - m.Unlock() - - var ctx = Background - if once && m.rdb.LLen(ctx, m.chunkKey(inode, indx)).Val() < int64(maxSlices) { - return - } - vals, err := m.rdb.LRange(ctx, m.chunkKey(inode, indx), 0, int64(maxCompactSlices)).Result() - if err != nil { - return - } - - ss := readSlices(vals) - if ss == nil { - logger.Errorf("Corrupt value for inode %d chunk indx %d", inode, indx) - return - } - skipped := skipSome(ss) - var first, last *slice - if skipped > 0 { - first, last = ss[0], ss[skipped-1] - } - ss = ss[skipped:] - pos, size, slices := compactChunk(ss) - if len(ss) < 2 || size == 0 { - return - } - if first != nil && last != nil && pos+size > first.pos && last.pos+last.len > pos { - panic(fmt.Sprintf("invalid compaction: skipped slices [%+v, %+v], pos %d, size %d", *first, *last, pos, size)) - } - - var id uint64 - st := m.NewSlice(ctx, &id) - if st != 0 { - return - } - logger.Debugf("compact %d:%d: skipped %d slices (%d bytes) %d slices (%d bytes)", inode, indx, skipped, pos, len(ss), size) - err = m.newMsg(CompactChunk, slices, id) - if err != nil { - if !strings.Contains(err.Error(), "not exist") && !strings.Contains(err.Error(), "not found") { - logger.Warnf("compact %d %d with %d slices: %s", inode, indx, len(ss), err) - } - return - } - var buf []byte // trash enabled: track delayed slices +func (m *redisMeta) doCompactChunk(inode Ino, indx uint32, origin []byte, ss []*slice, skipped int, pos uint32, id uint64, size uint32, delayed []byte) syscall.Errno { var rs []*redis.IntCmd // trash disabled: check reference of slices - trash := m.toTrash(0) - if trash { - for _, s := range ss { - if s.id > 0 { - buf = append(buf, m.encodeDelayedSlice(s.id, s.size)...) - } - } - } else { + if delayed == nil { rs = make([]*redis.IntCmd, len(ss)) } key := m.chunkKey(inode, indx) - errno := errno(m.txn(ctx, func(tx *redis.Tx) error { - vals2, err := tx.LRange(ctx, key, 0, int64(len(vals)-1)).Result() + ctx := Background + st := errno(m.txn(ctx, func(tx *redis.Tx) error { + n := len(origin) / sliceBytes + vals2, err := tx.LRange(ctx, key, 0, int64(n-1)).Result() if err != nil { return err } - if len(vals2) != len(vals) { + if len(vals2) != n { return syscall.EINVAL } for i, val := range vals2 { - if val != vals[i] { + if val != string(origin[i*sliceBytes:(i+1)*sliceBytes]) { return syscall.EINVAL } } _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.LTrim(ctx, key, int64(len(vals)), -1) + pipe.LTrim(ctx, key, int64(n), -1) pipe.LPush(ctx, key, marshalSlice(pos, id, size, 0, size)) for i := skipped; i > 0; i-- { - pipe.LPush(ctx, key, vals[i-1]) + pipe.LPush(ctx, key, origin[(i-1)*sliceBytes:i*sliceBytes]) } pipe.HSet(ctx, m.sliceRefs(), m.sliceKey(id, size), "0") // create the key to tracking it - if trash { - if len(buf) > 0 { - pipe.HSet(ctx, m.delSlices(), fmt.Sprintf("%d_%d", id, time.Now().Unix()), buf) + if delayed != nil { + if len(delayed) > 0 { + pipe.HSet(ctx, m.delSlices(), fmt.Sprintf("%d_%d", id, time.Now().Unix()), delayed) } } else { for i, s := range ss { @@ -2981,38 +2874,28 @@ func (m *redisMeta) compactChunk(inode Ino, indx uint32, once, force bool) { return err }, key)) // there could be false-negative that the compaction is successful, double-check - if errno != 0 && errno != syscall.EINVAL { - if e := m.rdb.HGet(ctx, m.sliceRefs(), m.sliceKey(id, size)).Err(); e == redis.Nil { - errno = syscall.EINVAL // failed - } else if e == nil { - errno = 0 // successful + if st != 0 && st != syscall.EINVAL { + if e := m.rdb.HGet(ctx, m.sliceRefs(), m.sliceKey(id, size)).Err(); e == nil { + st = 0 // successful + } else if e == redis.Nil { + logger.Infof("compacted chunk %d was not used", id) + st = syscall.EINVAL // failed } } - if errno == syscall.EINVAL { + if st == syscall.EINVAL { m.rdb.HIncrBy(ctx, m.sliceRefs(), m.sliceKey(id, size), -1) - logger.Infof("compaction for %d:%d is wasted, delete slice %d (%d bytes)", inode, indx, id, size) - m.deleteSlice(id, size) - } else if errno == 0 { - m.of.InvalidateChunk(inode, indx) + } else if st == 0 { m.cleanupZeroRef(m.sliceKey(id, size)) - if !trash { + if delayed == nil { for i, s := range ss { if s.id > 0 && rs[i].Err() == nil && rs[i].Val() < 0 { m.deleteSlice(s.id, s.size) } } } - } else { - logger.Warnf("compact %s: %s", key, errno) - } - - if force { - m.Lock() - delete(m.compacting, k) - m.Unlock() - m.compactChunk(inode, indx, once, force) } + return st } func (m *redisMeta) scanAllChunks(ctx Context, ch chan<- cchunk, bar *utils.Bar) error { diff --git a/pkg/meta/sql.go b/pkg/meta/sql.go index 55cde1e93046..b3735ec52c72 100644 --- a/pkg/meta/sql.go +++ b/pkg/meta/sql.go @@ -2028,7 +2028,7 @@ func (m *dbMeta) doLink(ctx Context, inode, parent Ino, name string, attr *Attr) } var pattr Attr m.parseAttr(&pn, &pattr) - if st := m.Access(ctx, parent, MODE_MASK_W, &pattr); st != 0 { + if st := m.Access(ctx, parent, MODE_MASK_W|MODE_MASK_X, &pattr); st != 0 { return st } if pn.Flags&FlagImmutable != 0 { @@ -2257,55 +2257,15 @@ func (m *dbMeta) doDeleteSustainedInode(sid uint64, inode Ino) error { return err } -func (m *dbMeta) Read(ctx Context, inode Ino, indx uint32, slices *[]Slice) (rerr syscall.Errno) { - defer func() { - if rerr == 0 { - m.touchAtime(ctx, inode, nil) - } - }() - - if slices != nil { - *slices = nil - } - f := m.of.find(inode) - if f != nil { - f.RLock() - defer f.RUnlock() - } - if ss, ok := m.of.ReadChunk(inode, indx); ok { - *slices = ss - return 0 - } - defer m.timeit("Read", time.Now()) +func (m *dbMeta) doRead(ctx Context, inode Ino, indx uint32) ([]*slice, syscall.Errno) { var c = chunk{Inode: inode, Indx: indx} - err := m.roTxn(func(s *xorm.Session) error { + if err := m.roTxn(func(s *xorm.Session) error { _, err := s.MustCols("indx").Get(&c) return err - }) - if err != nil { - return errno(err) - } - if len(c.Slices) == 0 { - var attr Attr - eno := m.doGetAttr(ctx, inode, &attr) - if eno != 0 { - return eno - } - if attr.Typ != TypeFile { - return syscall.EPERM - } - return 0 - } - ss := readSliceBuf(c.Slices) - if ss == nil { - return syscall.EIO - } - *slices = buildSlice(ss) - m.of.CacheChunk(inode, indx, *slices) - if !m.conf.ReadOnly && (len(c.Slices)/sliceBytes >= 5 || len(*slices) >= 5) { - go m.compactChunk(inode, indx, false, false) + }); err != nil { + return nil, errno(err) } - return 0 + return readSliceBuf(c.Slices), 0 } func (m *dbMeta) doWrite(ctx Context, inode Ino, indx uint32, off uint32, slice Slice, mtime time.Time, numSlices *int, delta *dirStat, attr *Attr) syscall.Errno { @@ -2332,10 +2292,10 @@ func (m *dbMeta) doWrite(ctx Context, inode Ino, indx uint32, off uint32, slice return err } nodeAttr.Mtime = mtime.UnixNano() / 1e3 - nodeAttr.Mtimensec = int16(mtime.Nanosecond()) + nodeAttr.Mtimensec = int16(mtime.Nanosecond() % 1e3) ctime := time.Now() nodeAttr.Ctime = ctime.UnixNano() / 1e3 - nodeAttr.Ctimensec = int16(ctime.Nanosecond()) + nodeAttr.Ctimensec = int16(ctime.Nanosecond() % 1e3) m.parseAttr(&nodeAttr, attr) buf := marshalSlice(off, slice.Id, slice.Size, slice.Off, slice.Len) @@ -2797,96 +2757,19 @@ func (m *dbMeta) doCleanupDelayedSlices(edge int64) (int, error) { return count, nil } -func (m *dbMeta) compactChunk(inode Ino, indx uint32, once, force bool) { - // avoid too many or duplicated compaction - k := uint64(inode) + (uint64(indx) << 40) - m.Lock() - if once || force { - for m.compacting[k] { - m.Unlock() - time.Sleep(time.Millisecond * 10) - m.Lock() - } - } else if len(m.compacting) > 10 || m.compacting[k] { - m.Unlock() - return - } - m.compacting[k] = true - defer func() { - m.Lock() - delete(m.compacting, k) - m.Unlock() - }() - m.Unlock() - - var c = chunk{Inode: inode, Indx: indx} - err := m.roTxn(func(s *xorm.Session) error { - _, err := s.MustCols("indx").Get(&c) - return err - }) - if err != nil { - return - } - if once && len(c.Slices) < sliceBytes*maxSlices { - return - } - if len(c.Slices) > sliceBytes*maxCompactSlices { - c.Slices = c.Slices[:sliceBytes*maxCompactSlices] - } - - ss := readSliceBuf(c.Slices) - if ss == nil { - logger.Errorf("Corrupt value for inode %d chunk indx %d", inode, indx) - return - } - skipped := skipSome(ss) - var first, last *slice - if skipped > 0 { - first, last = ss[0], ss[skipped-1] - } - ss = ss[skipped:] - pos, size, slices := compactChunk(ss) - if len(ss) < 2 || size == 0 { - return - } - if first != nil && last != nil && pos+size > first.pos && last.pos+last.len > pos { - panic(fmt.Sprintf("invalid compaction: skipped slices [%+v, %+v], pos %d, size %d", *first, *last, pos, size)) - } - - var id uint64 - st := m.NewSlice(Background, &id) - if st != 0 { - return - } - logger.Debugf("compact %d:%d: skipped %d slices (%d bytes) %d slices (%d bytes)", inode, indx, skipped, pos, len(ss), size) - err = m.newMsg(CompactChunk, slices, id) - if err != nil { - if !strings.Contains(err.Error(), "not exist") && !strings.Contains(err.Error(), "not found") { - logger.Warnf("compact %d %d with %d slices: %s", inode, indx, len(ss), err) - } - return - } - var buf []byte - trash := m.toTrash(0) - if trash { - for _, s := range ss { - if s.id > 0 { - buf = append(buf, m.encodeDelayedSlice(s.id, s.size)...) - } - } - } - err = m.txn(func(s *xorm.Session) error { +func (m *dbMeta) doCompactChunk(inode Ino, indx uint32, origin []byte, ss []*slice, skipped int, pos uint32, id uint64, size uint32, delayed []byte) syscall.Errno { + st := errno(m.txn(func(s *xorm.Session) error { var c2 = chunk{Inode: inode, Indx: indx} _, err := s.ForUpdate().MustCols("indx").Get(&c2) if err != nil { return err } - if len(c2.Slices) < len(c.Slices) || !bytes.Equal(c.Slices, c2.Slices[:len(c.Slices)]) { - logger.Infof("chunk %d:%d was changed %d -> %d", inode, indx, len(c.Slices), len(c2.Slices)) + if len(c2.Slices) < len(origin) || !bytes.Equal(origin, c2.Slices[:len(origin)]) { + logger.Infof("chunk %d:%d was changed %d -> %d", inode, indx, len(origin), len(c2.Slices)) return syscall.EINVAL } - c2.Slices = append(append(c2.Slices[:skipped*sliceBytes], marshalSlice(pos, id, size, 0, size)...), c2.Slices[len(c.Slices):]...) + c2.Slices = append(append(c2.Slices[:skipped*sliceBytes], marshalSlice(pos, id, size, 0, size)...), c2.Slices[len(origin):]...) if _, err := s.Where("Inode = ? AND indx = ?", inode, indx).Update(c2); err != nil { return err } @@ -2894,9 +2777,9 @@ func (m *dbMeta) compactChunk(inode Ino, indx uint32, once, force bool) { if err = mustInsert(s, sliceRef{id, size, 1}); err != nil { return err } - if trash { - if len(buf) > 0 { - if err = mustInsert(s, &delslices{id, time.Now().Unix(), buf}); err != nil { + if delayed != nil { + if len(delayed) > 0 { + if err = mustInsert(s, &delslices{id, time.Now().Unix(), delayed}); err != nil { return err } } @@ -2911,58 +2794,46 @@ func (m *dbMeta) compactChunk(inode Ino, indx uint32, once, force bool) { } } return nil - }) + })) // there could be false-negative that the compaction is successful, double-check - if err != nil { - var c = sliceRef{Id: id} + if st != 0 && st != syscall.EINVAL { var ok bool - e := m.roTxn(func(s *xorm.Session) error { + if err := m.roTxn(func(s *xorm.Session) error { var e error - ok, e = s.Get(&c) + ok, e = s.Get(&sliceRef{Id: id}) return e - }) - if e == nil { + }); err == nil { if ok { - err = nil + st = 0 } else { logger.Infof("compacted chunk %d was not used", id) - err = syscall.EINVAL + st = syscall.EINVAL } } } - if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL { - logger.Infof("compaction for %d:%d is wasted, delete slice %d (%d bytes)", inode, indx, id, size) - m.deleteSlice(id, size) - } else if err == nil { - m.of.InvalidateChunk(inode, indx) - if !trash { - for _, s := range ss { - if s.id == 0 { - continue - } - var ref = sliceRef{Id: s.id} - var ok bool - err := m.roTxn(func(s *xorm.Session) error { - var e error - ok, e = s.Get(&ref) - return e - }) - if err == nil && ok && ref.Refs <= 0 { - m.deleteSlice(s.id, s.size) - } + if st == syscall.EINVAL { + _ = m.txn(func(s *xorm.Session) error { + return mustInsert(s, &sliceRef{id, size, 0}) + }) + } else if st == 0 && delayed == nil { + for _, s := range ss { + if s.id == 0 { + continue + } + var ref = sliceRef{Id: s.id} + var ok bool + err := m.roTxn(func(s *xorm.Session) error { + var e error + ok, e = s.Get(&ref) + return e + }) + if err == nil && ok && ref.Refs <= 0 { + m.deleteSlice(s.id, s.size) } } - } else { - logger.Warnf("compact %d %d: %s", inode, indx, err) - } - - if force { - m.Lock() - delete(m.compacting, k) - m.Unlock() - m.compactChunk(inode, indx, once, force) } + return st } func dup(b []byte) []byte { diff --git a/pkg/meta/tkv.go b/pkg/meta/tkv.go index 11f83033d19e..613d6569f9f4 100644 --- a/pkg/meta/tkv.go +++ b/pkg/meta/tkv.go @@ -1730,7 +1730,7 @@ func (m *kvMeta) doLink(ctx Context, inode, parent Ino, name string, attr *Attr) if pattr.Parent > TrashInode { return syscall.ENOENT } - if st := m.Access(ctx, parent, MODE_MASK_W, &pattr); st != 0 { + if st := m.Access(ctx, parent, MODE_MASK_W|MODE_MASK_X, &pattr); st != 0 { return st } if pattr.Flags&FlagImmutable != 0 { @@ -1879,51 +1879,12 @@ func (m *kvMeta) doDeleteSustainedInode(sid uint64, inode Ino) error { return err } -func (m *kvMeta) Read(ctx Context, inode Ino, indx uint32, slices *[]Slice) (rerr syscall.Errno) { - defer func() { - if rerr == 0 { - m.touchAtime(ctx, inode, nil) - } - }() - - if slices != nil { - *slices = nil - } - f := m.of.find(inode) - if f != nil { - f.RLock() - defer f.RUnlock() - } - if ss, ok := m.of.ReadChunk(inode, indx); ok { - *slices = ss - return 0 - } - defer m.timeit("Read", time.Now()) +func (m *kvMeta) doRead(ctx Context, inode Ino, indx uint32) ([]*slice, syscall.Errno) { val, err := m.get(m.chunkKey(inode, indx)) if err != nil { - return errno(err) - } - if len(val) == 0 { - var attr Attr - eno := m.doGetAttr(ctx, inode, &attr) - if eno != 0 { - return eno - } - if attr.Typ != TypeFile { - return syscall.EPERM - } - return 0 - } - ss := readSliceBuf(val) - if ss == nil { - return syscall.EIO - } - *slices = buildSlice(ss) - m.of.CacheChunk(inode, indx, *slices) - if !m.conf.ReadOnly && (len(val)/sliceBytes >= 5 || len(*slices) >= 5) { - go m.compactChunk(inode, indx, false, false) + return nil, errno(err) } - return 0 + return readSliceBuf(val), 0 } func (m *kvMeta) doWrite(ctx Context, inode Ino, indx uint32, off uint32, slice Slice, mtime time.Time, numSlices *int, delta *dirStat, attr *Attr) syscall.Errno { @@ -2360,81 +2321,8 @@ func (m *kvMeta) doCleanupDelayedSlices(edge int64) (int, error) { return count, nil } -func (m *kvMeta) compactChunk(inode Ino, indx uint32, once, force bool) { - // avoid too many or duplicated compaction - k := uint64(inode) + (uint64(indx) << 40) - m.Lock() - if once || force { - for m.compacting[k] { - m.Unlock() - time.Sleep(time.Millisecond * 10) - m.Lock() - } - } else if len(m.compacting) > 10 || m.compacting[k] { - m.Unlock() - return - } - m.compacting[k] = true - defer func() { - m.Lock() - delete(m.compacting, k) - m.Unlock() - }() - m.Unlock() - - buf, err := m.get(m.chunkKey(inode, indx)) - if err != nil { - return - } - if once && len(buf) < sliceBytes*maxSlices { - return - } - if len(buf) > sliceBytes*maxCompactSlices { - buf = buf[:sliceBytes*maxCompactSlices] - } - - ss := readSliceBuf(buf) - if ss == nil { - logger.Errorf("Corrupt value for inode %d chunk indx %d", inode, indx) - return - } - skipped := skipSome(ss) - var first, last *slice - if skipped > 0 { - first, last = ss[0], ss[skipped-1] - } - ss = ss[skipped:] - pos, size, slices := compactChunk(ss) - if len(ss) < 2 || size == 0 { - return - } - if first != nil && last != nil && pos+size > first.pos && last.pos+last.len > pos { - panic(fmt.Sprintf("invalid compaction: skipped slices [%+v, %+v], pos %d, size %d", *first, *last, pos, size)) - } - - var id uint64 - st := m.NewSlice(Background, &id) - if st != 0 { - return - } - logger.Debugf("compact %d:%d: skipped %d slices (%d bytes) %d slices (%d bytes)", inode, indx, skipped, pos, len(ss), size) - err = m.newMsg(CompactChunk, slices, id) - if err != nil { - if !strings.Contains(err.Error(), "not exist") && !strings.Contains(err.Error(), "not found") { - logger.Warnf("compact %d %d with %d slices: %s", inode, indx, len(ss), err) - } - return - } - var dsbuf []byte - trash := m.toTrash(0) - if trash { - for _, s := range ss { - if s.id > 0 { - dsbuf = append(dsbuf, m.encodeDelayedSlice(s.id, s.size)...) - } - } - } - err = m.txn(func(tx *kvTxn) error { +func (m *kvMeta) doCompactChunk(inode Ino, indx uint32, buf []byte, ss []*slice, skipped int, pos uint32, id uint64, size uint32, delayed []byte) syscall.Errno { + st := errno(m.txn(func(tx *kvTxn) error { buf2 := tx.get(m.chunkKey(inode, indx)) if len(buf2) < len(buf) || !bytes.Equal(buf, buf2[:len(buf)]) { logger.Infof("chunk %d:%d was changed %d -> %d", inode, indx, len(buf), len(buf2)) @@ -2445,9 +2333,9 @@ func (m *kvMeta) compactChunk(inode Ino, indx uint32, once, force bool) { tx.set(m.chunkKey(inode, indx), buf2) // create the key to tracking it tx.set(m.sliceKey(id, size), make([]byte, 8)) - if trash { - if len(dsbuf) > 0 { - tx.set(m.delSliceKey(time.Now().Unix(), id), dsbuf) + if delayed != nil { + if len(delayed) > 0 { + tx.set(m.delSliceKey(time.Now().Unix(), id), delayed) } } else { for _, s := range ss { @@ -2457,28 +2345,28 @@ func (m *kvMeta) compactChunk(inode Ino, indx uint32, once, force bool) { } } return nil - }) + })) // there could be false-negative that the compaction is successful, double-check - if err != nil { - logger.Warnf("compact %d:%d failed: %s", inode, indx, err) + if st != 0 && st != syscall.EINVAL { refs, e := m.get(m.sliceKey(id, size)) if e == nil { if len(refs) > 0 { - err = nil + st = 0 } else { logger.Infof("compacted chunk %d was not used", id) - err = syscall.EINVAL + st = syscall.EINVAL } } } - if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL { - logger.Infof("compaction for %d:%d is wasted, delete slice %d (%d bytes)", inode, indx, id, size) - m.deleteSlice(id, size) - } else if err == nil { - m.of.InvalidateChunk(inode, indx) + if st == syscall.EINVAL { + _ = m.txn(func(tx *kvTxn) error { + tx.incrBy(m.sliceKey(id, size), -1) + return nil + }) + } else if st == 0 { m.cleanupZeroRef(id, size) - if !trash { + if delayed == nil { var refs int64 for _, s := range ss { if s.id > 0 && m.client.txn(func(tx *kvTxn) error { @@ -2489,16 +2377,8 @@ func (m *kvMeta) compactChunk(inode Ino, indx uint32, once, force bool) { } } } - } else { - logger.Warnf("compact %d %d: %s", inode, indx, err) - } - - if force { - m.Lock() - delete(m.compacting, k) - m.Unlock() - m.compactChunk(inode, indx, once, force) } + return st } func (m *kvMeta) scanAllChunks(ctx Context, ch chan<- cchunk, bar *utils.Bar) error { diff --git a/pkg/object/azure.go b/pkg/object/azure.go index 25b555852a51..abe77141018a 100644 --- a/pkg/object/azure.go +++ b/pkg/object/azure.go @@ -78,14 +78,15 @@ func (b *wasb) Head(key string) (Object, error) { }, nil } -func (b *wasb) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (b *wasb) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { download, err := b.container.NewBlobClient(key).DownloadStream(ctx, &azblob.DownloadStreamOptions{Range: blob2.HTTPRange{Offset: off, Count: limit}}) if err != nil { return nil, err } - ReqIDCache.put(key, aws.StringValue(download.RequestID)) + attrs := applyGetters(getters...) // TODO fire another property request to get the actual storage class - return scReadCloser{download.Body, b.sc}, err + attrs.SetRequestID(aws.StringValue(download.RequestID)).SetStorageClass(b.sc) + return download.Body, err } func str2Tier(tier string) *blob2.AccessTier { @@ -97,13 +98,14 @@ func str2Tier(tier string) *blob2.AccessTier { return nil } -func (b *wasb) Put(key string, data io.Reader) error { +func (b *wasb) Put(key string, data io.Reader, getters ...AttrGetter) error { options := azblob.UploadStreamOptions{} if b.sc != "" { options.AccessTier = str2Tier(b.sc) } resp, err := b.azblobCli.UploadStream(ctx, b.cName, key, data, &options) - ReqIDCache.put(key, aws.StringValue(resp.RequestID)) + attrs := applyGetters(getters...) + attrs.SetRequestID(aws.StringValue(resp.RequestID)).SetStorageClass(b.sc) return err } @@ -122,14 +124,15 @@ func (b *wasb) Copy(dst, src string) error { return err } -func (b *wasb) Delete(key string) error { +func (b *wasb) Delete(key string, getters ...AttrGetter) error { resp, err := b.container.NewBlobClient(key).Delete(ctx, nil) if err != nil { if e, ok := err.(*azcore.ResponseError); ok && e.ErrorCode == string(bloberror.BlobNotFound) { err = nil } } - ReqIDCache.put(key, aws.StringValue(resp.RequestID)) + attrs := applyGetters(getters...) + attrs.SetRequestID(aws.StringValue(resp.RequestID)) return err } @@ -177,12 +180,9 @@ func (b *wasb) List(prefix, marker, delimiter string, limit int64, followLink bo return objs, nil } -func (b *wasb) SetStorageClass(sc string) { +func (b *wasb) SetStorageClass(sc string) error { b.sc = sc -} - -func (b *wasb) StorageClass() string { - return b.sc + return nil } func autoWasbEndpoint(containerName, accountName, scheme string, credential *azblob.SharedKeyCredential) (string, error) { diff --git a/pkg/object/b2.go b/pkg/object/b2.go index 0e8e8dd20626..56ed8ae071a3 100644 --- a/pkg/object/b2.go +++ b/pkg/object/b2.go @@ -82,7 +82,7 @@ func (c *b2client) Head(key string) (Object, error) { }, nil } -func (c *b2client) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (c *b2client) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { if off == 0 && limit == -1 { _, r, err := c.bucket.DownloadFileByName(key) return r, err @@ -95,7 +95,7 @@ func (c *b2client) Get(key string, off, limit int64) (io.ReadCloser, error) { return r, err } -func (c *b2client) Put(key string, data io.Reader) error { +func (c *b2client) Put(key string, data io.Reader, getters ...AttrGetter) error { _, err := c.bucket.UploadFile(key, nil, data) return err } @@ -110,7 +110,7 @@ func (c *b2client) Copy(dst, src string) error { return err } -func (c *b2client) Delete(key string) error { +func (c *b2client) Delete(key string, getters ...AttrGetter) error { f, err := c.getFileInfo(key) if err != nil { if strings.HasPrefix(err.Error(), "not_found") { diff --git a/pkg/object/bos.go b/pkg/object/bos.go index 1c87b37f7e93..f46a6dd9e2ee 100644 --- a/pkg/object/bos.go +++ b/pkg/object/bos.go @@ -57,6 +57,11 @@ func (q *bosclient) Limits() Limits { } } +func (q *bosclient) SetStorageClass(sc string) error { + q.sc = sc + return nil +} + func (q *bosclient) Create() error { _, err := q.c.PutBucket(q.bucket) if err == nil && q.sc != "" { @@ -88,7 +93,7 @@ func (q *bosclient) Head(key string) (Object, error) { }, nil } -func (q *bosclient) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (q *bosclient) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { var r *api.GetObjectResult var err error if limit > 0 { @@ -101,10 +106,12 @@ func (q *bosclient) Get(key string, off, limit int64) (io.ReadCloser, error) { if err != nil { return nil, err } - return scReadCloser{r.Body, r.StorageClass}, nil + attrs := applyGetters(getters...) + attrs.SetStorageClass(r.StorageClass) + return r.Body, nil } -func (q *bosclient) Put(key string, in io.Reader) error { +func (q *bosclient) Put(key string, in io.Reader, getters ...AttrGetter) error { b, vlen, err := findLen(in) if err != nil { return err @@ -118,6 +125,8 @@ func (q *bosclient) Put(key string, in io.Reader) error { args.StorageClass = q.sc } _, err = q.c.PutObject(q.bucket, key, body, args) + attrs := applyGetters(getters...) + attrs.SetStorageClass(q.sc) return err } @@ -130,7 +139,7 @@ func (q *bosclient) Copy(dst, src string) error { return err } -func (q *bosclient) Delete(key string) error { +func (q *bosclient) Delete(key string, getters ...AttrGetter) error { err := q.c.DeleteObject(q.bucket, key) if err != nil && strings.Contains(err.Error(), "NoSuchKey") { err = nil diff --git a/pkg/object/ceph.go b/pkg/object/ceph.go index 930a65f7fcce..c048ac60dd6a 100644 --- a/pkg/object/ceph.go +++ b/pkg/object/ceph.go @@ -127,7 +127,7 @@ func (r *cephReader) Close() error { return nil } -func (c *ceph) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (c *ceph) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { if _, err := c.Head(key); err != nil { return nil, err } @@ -144,7 +144,7 @@ var cephPool = sync.Pool{ }, } -func (c *ceph) Put(key string, in io.Reader) error { +func (c *ceph) Put(key string, in io.Reader, getters ...AttrGetter) error { // ceph default osd_max_object_size = 128M return c.do(func(ctx *rados.IOContext) error { if b, ok := in.(*bytes.Reader); ok { @@ -181,7 +181,7 @@ func (c *ceph) Put(key string, in io.Reader) error { }) } -func (c *ceph) Delete(key string) error { +func (c *ceph) Delete(key string, getters ...AttrGetter) error { err := c.do(func(ctx *rados.IOContext) error { return ctx.Delete(key) }) diff --git a/pkg/object/cos.go b/pkg/object/cos.go index e3908883d2a3..caf0d0cab2e5 100644 --- a/pkg/object/cos.go +++ b/pkg/object/cos.go @@ -100,7 +100,7 @@ func (c *COS) Head(key string) (Object, error) { return &obj{key, size, mtime, strings.HasSuffix(key, "/"), sc}, nil } -func (c *COS) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (c *COS) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { params := &cos.ObjectGetOptions{Range: getRange(off, limit)} resp, err := c.c.Object.Get(ctx, key, params) if err != nil { @@ -119,15 +119,13 @@ func (c *COS) Get(key string, off, limit int64) (io.ReadCloser, error) { resp.Body = verifyChecksum(resp.Body, resp.Header.Get(cosChecksumKey), length) } if resp != nil { - ReqIDCache.put(key, resp.Header.Get(cosRequestIDKey)) - } - if sc := resp.Header.Get(cosStorageClassHeader); sc != "" { - return scReadCloser{resp.Body, sc}, nil + attrs := applyGetters(getters...) + attrs.SetRequestID(resp.Header.Get(cosRequestIDKey)).SetStorageClass(resp.Header.Get(cosStorageClassHeader)) } return resp.Body, nil } -func (c *COS) Put(key string, in io.Reader) error { +func (c *COS) Put(key string, in io.Reader, getters ...AttrGetter) error { var options cos.ObjectPutOptions if ins, ok := in.(io.ReadSeeker); ok { header := http.Header(map[string][]string{ @@ -143,7 +141,8 @@ func (c *COS) Put(key string, in io.Reader) error { } resp, err := c.c.Object.Put(ctx, key, in, &options) if resp != nil { - ReqIDCache.put(key, resp.Header.Get(cosRequestIDKey)) + attrs := applyGetters(getters...) + attrs.SetRequestID(resp.Header.Get(cosRequestIDKey)).SetStorageClass(c.sc) } return err } @@ -158,10 +157,11 @@ func (c *COS) Copy(dst, src string) error { return err } -func (c *COS) Delete(key string) error { +func (c *COS) Delete(key string, getters ...AttrGetter) error { resp, err := c.c.Object.Delete(ctx, key) if resp != nil { - ReqIDCache.put(key, resp.Header.Get(cosRequestIDKey)) + attrs := applyGetters(getters...) + attrs.SetRequestID(resp.Header.Get(cosRequestIDKey)) } return err } @@ -271,12 +271,9 @@ func (c *COS) ListUploads(marker string) ([]*PendingPart, string, error) { return parts, result.NextKeyMarker, nil } -func (c *COS) SetStorageClass(sc string) { +func (c *COS) SetStorageClass(sc string) error { c.sc = sc -} - -func (c *COS) StorageClass() string { - return c.sc + return nil } func autoCOSEndpoint(bucketName, accessKey, secretKey, token string) (string, error) { diff --git a/pkg/object/dragonfly.go b/pkg/object/dragonfly.go index 47795d606f07..7c91ef06756a 100644 --- a/pkg/object/dragonfly.go +++ b/pkg/object/dragonfly.go @@ -260,7 +260,7 @@ func (d *dragonfly) Head(key string) (Object, error) { } // Get returns the object if it exists. -func (d *dragonfly) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (d *dragonfly) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { u, err := url.Parse(d.endpoint) if err != nil { return nil, err @@ -291,15 +291,14 @@ func (d *dragonfly) Get(key string, off, limit int64) (io.ReadCloser, error) { if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("bad response status %s", resp.Status) } + attrs := applyGetters(getters...) + attrs.SetStorageClass(resp.Header.Get(HeaderDragonflyObjectMetaStorageClass)) - if sc := resp.Header.Get(HeaderDragonflyObjectMetaStorageClass); sc != "" { - return scReadCloser{resp.Body, sc}, nil - } return resp.Body, nil } // Put creates or replaces the object. -func (d *dragonfly) Put(key string, data io.Reader) error { +func (d *dragonfly) Put(key string, data io.Reader, getters ...AttrGetter) error { body := &bytes.Buffer{} writer := multipart.NewWriter(body) @@ -407,7 +406,7 @@ func (d *dragonfly) Copy(dst, src string) error { } // Delete deletes the object if it exists. -func (d *dragonfly) Delete(key string) error { +func (d *dragonfly) Delete(key string, getters ...AttrGetter) error { // get delete object request. u, err := url.Parse(d.endpoint) if err != nil { diff --git a/pkg/object/encrypt.go b/pkg/object/encrypt.go index bbff660b28a6..fed48e3f5ebe 100644 --- a/pkg/object/encrypt.go +++ b/pkg/object/encrypt.go @@ -229,8 +229,8 @@ func (e *encrypted) String() string { return fmt.Sprintf("%s(encrypted)", e.ObjectStorage) } -func (e *encrypted) Get(key string, off, limit int64) (io.ReadCloser, error) { - r, err := e.ObjectStorage.Get(key, 0, -1) +func (e *encrypted) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { + r, err := e.ObjectStorage.Get(key, 0, -1, getters...) if err != nil { return nil, err } @@ -254,7 +254,7 @@ func (e *encrypted) Get(key string, off, limit int64) (io.ReadCloser, error) { return io.NopCloser(bytes.NewBuffer(data)), nil } -func (e *encrypted) Put(key string, in io.Reader) error { +func (e *encrypted) Put(key string, in io.Reader, getters ...AttrGetter) error { plain, err := io.ReadAll(in) if err != nil { return err @@ -263,7 +263,7 @@ func (e *encrypted) Put(key string, in io.Reader) error { if err != nil { return err } - return e.ObjectStorage.Put(key, bytes.NewReader(ciphertext)) + return e.ObjectStorage.Put(key, bytes.NewReader(ciphertext), getters...) } var _ ObjectStorage = &encrypted{} diff --git a/pkg/object/etcd.go b/pkg/object/etcd.go index 58d82ed400c7..efaf20e0a735 100644 --- a/pkg/object/etcd.go +++ b/pkg/object/etcd.go @@ -46,7 +46,7 @@ func (c *etcdClient) String() string { return fmt.Sprintf("etcd://%s/", c.addr) } -func (c *etcdClient) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (c *etcdClient) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { resp, err := c.kv.Get(context.TODO(), key, etcd.WithLimit(1)) if err != nil { return nil, err @@ -66,7 +66,7 @@ func (c *etcdClient) Get(key string, off, limit int64) (io.ReadCloser, error) { return nil, os.ErrNotExist } -func (c *etcdClient) Put(key string, in io.Reader) error { +func (c *etcdClient) Put(key string, in io.Reader, getters ...AttrGetter) error { d, err := io.ReadAll(in) if err != nil { return err @@ -94,7 +94,7 @@ func (c *etcdClient) Head(key string) (Object, error) { return nil, os.ErrNotExist } -func (c *etcdClient) Delete(key string) error { +func (c *etcdClient) Delete(key string, getters ...AttrGetter) error { _, err := c.kv.Delete(context.TODO(), key) return err } diff --git a/pkg/object/file.go b/pkg/object/file.go index e4f5f6dc53c8..64f144af7d43 100644 --- a/pkg/object/file.go +++ b/pkg/object/file.go @@ -109,7 +109,7 @@ type SectionReaderCloser struct { io.Closer } -func (d *filestore) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (d *filestore) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { p := d.path(key) f, err := os.Open(p) @@ -136,7 +136,7 @@ func (d *filestore) Get(key string, off, limit int64) (io.ReadCloser, error) { return f, nil } -func (d *filestore) Put(key string, in io.Reader) (err error) { +func (d *filestore) Put(key string, in io.Reader, getters ...AttrGetter) (err error) { p := d.path(key) if strings.HasSuffix(key, dirSuffix) || key == "" && strings.HasSuffix(d.root, dirSuffix) { @@ -199,7 +199,7 @@ func (d *filestore) Copy(dst, src string) error { return d.Put(dst, r) } -func (d *filestore) Delete(key string) error { +func (d *filestore) Delete(key string, getters ...AttrGetter) error { err := os.Remove(d.path(key)) if err != nil && os.IsNotExist(err) { err = nil diff --git a/pkg/object/gluster.go b/pkg/object/gluster.go index 81d68ec0551f..3b388adb9001 100644 --- a/pkg/object/gluster.go +++ b/pkg/object/gluster.go @@ -85,7 +85,7 @@ func (g *gluster) toFile(key string, fi fs.FileInfo, isSymlink bool) *file { } } -func (g *gluster) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (g *gluster) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { f, err := g.vol().Open(key) if err != nil { return nil, err @@ -110,7 +110,7 @@ func (g *gluster) Get(key string, off, limit int64) (io.ReadCloser, error) { return f, nil } -func (g *gluster) Put(key string, in io.Reader) error { +func (g *gluster) Put(key string, in io.Reader, getters ...AttrGetter) error { v := g.vol() if strings.HasSuffix(key, dirSuffix) { return v.MkdirAll(key, os.FileMode(0777)) @@ -146,7 +146,7 @@ func (g *gluster) Put(key string, in io.Reader) error { return err } -func (g *gluster) Delete(key string) error { +func (g *gluster) Delete(key string, getters ...AttrGetter) error { v := g.vol() err := v.Unlink(key) if err != nil && strings.Contains(err.Error(), "is a directory") { diff --git a/pkg/object/gs.go b/pkg/object/gs.go index 53f4ac39752a..bb080477012b 100644 --- a/pkg/object/gs.go +++ b/pkg/object/gs.go @@ -105,22 +105,26 @@ func (g *gs) Head(key string) (Object, error) { }, nil } -func (g *gs) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (g *gs) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { reader, err := g.client.Bucket(g.bucket).Object(key).NewRangeReader(ctx, off, limit) if err != nil { return nil, err } // TODO fire another attr request to get the actual storage class - return scReadCloser{reader, g.sc}, nil + attrs := applyGetters(getters...) + attrs.SetStorageClass(g.sc) + return reader, nil } -func (g *gs) Put(key string, data io.Reader) error { +func (g *gs) Put(key string, data io.Reader, getters ...AttrGetter) error { writer := g.client.Bucket(g.bucket).Object(key).NewWriter(ctx) writer.StorageClass = g.sc _, err := io.Copy(writer, data) if err != nil { return err } + attrs := applyGetters(getters...) + attrs.SetStorageClass(g.sc) return writer.Close() } @@ -135,7 +139,7 @@ func (g *gs) Copy(dst, src string) error { return err } -func (g *gs) Delete(key string) error { +func (g *gs) Delete(key string, getters ...AttrGetter) error { if err := g.client.Bucket(g.bucket).Object(key).Delete(ctx); err != storage.ErrObjectNotExist { return err } @@ -171,12 +175,9 @@ func (g *gs) List(prefix, marker, delimiter string, limit int64, followLink bool return objs, nil } -func (g *gs) SetStorageClass(sc string) { +func (g *gs) SetStorageClass(sc string) error { g.sc = sc -} - -func (g *gs) StorageClass() string { - return g.sc + return nil } func newGS(endpoint, accessKey, secretKey, token string) (ObjectStorage, error) { diff --git a/pkg/object/hdfs.go b/pkg/object/hdfs.go index aaab874dfa10..1500fbec8adc 100644 --- a/pkg/object/hdfs.go +++ b/pkg/object/hdfs.go @@ -102,7 +102,7 @@ func (h *hdfsclient) toFile(key string, info os.FileInfo) *file { return f } -func (h *hdfsclient) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (h *hdfsclient) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { f, err := h.c.Open(h.path(key)) if err != nil { return nil, err @@ -122,7 +122,7 @@ func (h *hdfsclient) Get(key string, off, limit int64) (io.ReadCloser, error) { return f, nil } -func (h *hdfsclient) Put(key string, in io.Reader) (err error) { +func (h *hdfsclient) Put(key string, in io.Reader, getters ...AttrGetter) (err error) { p := h.path(key) if strings.HasSuffix(p, dirSuffix) { return h.c.MkdirAll(p, 0777&^h.umask) @@ -178,7 +178,7 @@ func IsErrReplicating(err error) bool { return ok && pe.Err == hdfs.ErrReplicating } -func (h *hdfsclient) Delete(key string) error { +func (h *hdfsclient) Delete(key string, getters ...AttrGetter) error { err := h.c.Remove(h.path(key)) if err != nil && os.IsNotExist(err) { err = nil diff --git a/pkg/object/ibmcos.go b/pkg/object/ibmcos.go index f9768dbdf284..78c2aa162e6f 100644 --- a/pkg/object/ibmcos.go +++ b/pkg/object/ibmcos.go @@ -71,7 +71,7 @@ func (s *ibmcos) Limits() Limits { } } -func (s *ibmcos) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (s *ibmcos) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { params := &s3.GetObjectInput{Bucket: &s.bucket, Key: &key} if off > 0 || limit > 0 { var r string @@ -84,17 +84,18 @@ func (s *ibmcos) Get(key string, off, limit int64) (io.ReadCloser, error) { } var reqID string resp, err := s.s3.GetObjectWithContext(ctx, params, request.WithGetResponseHeader(s3RequestIDKey, &reqID)) - ReqIDCache.put(key, reqID) + attrs := applyGetters(getters...) + attrs.SetRequestID(reqID) if err != nil { return nil, err } if resp.StorageClass != nil { - return scReadCloser{resp.Body, *resp.StorageClass}, nil + attrs.SetStorageClass(*resp.StorageClass) } return resp.Body, nil } -func (s *ibmcos) Put(key string, in io.Reader) error { +func (s *ibmcos) Put(key string, in io.Reader, getters ...AttrGetter) error { var body io.ReadSeeker if b, ok := in.(io.ReadSeeker); ok { body = b @@ -117,7 +118,8 @@ func (s *ibmcos) Put(key string, in io.Reader) error { } var reqID string _, err := s.s3.PutObjectWithContext(ctx, params, request.WithGetResponseHeader(s3RequestIDKey, &reqID)) - ReqIDCache.put(key, reqID) + attrs := applyGetters(getters...) + attrs.SetRequestID(reqID).SetStorageClass(s.sc) return err } @@ -156,14 +158,15 @@ func (s *ibmcos) Head(key string) (Object, error) { }, nil } -func (s *ibmcos) Delete(key string) error { +func (s *ibmcos) Delete(key string, getters ...AttrGetter) error { param := s3.DeleteObjectInput{ Bucket: &s.bucket, Key: &key, } var reqID string _, err := s.s3.DeleteObjectWithContext(ctx, ¶m, request.WithGetResponseHeader(s3RequestIDKey, &reqID)) - ReqIDCache.put(key, reqID) + attrs := applyGetters(getters...) + attrs.SetRequestID(reqID) return err } @@ -291,12 +294,9 @@ func (s *ibmcos) ListUploads(marker string) ([]*PendingPart, string, error) { return parts, nextMarker, nil } -func (s *ibmcos) SetStorageClass(sc string) { +func (s *ibmcos) SetStorageClass(sc string) error { s.sc = sc -} - -func (s *ibmcos) StorageClass() string { - return s.sc + return nil } func newIBMCOS(endpoint, apiKey, serviceInstanceID, token string) (ObjectStorage, error) { diff --git a/pkg/object/interface.go b/pkg/object/interface.go index 4f2f129cba94..e882edbb0764 100644 --- a/pkg/object/interface.go +++ b/pkg/object/interface.go @@ -81,13 +81,13 @@ type ObjectStorage interface { // Create the bucket if not existed. Create() error // Get the data for the given object specified by key. - Get(key string, off, limit int64) (io.ReadCloser, error) + Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) // Put data read from a reader to an object specified by key. - Put(key string, in io.Reader) error + Put(key string, in io.Reader, getters ...AttrGetter) error // Copy an object from src to dst. Copy(dst, src string) error // Delete a object. - Delete(key string) error + Delete(key string, getters ...AttrGetter) error // Head returns some information about the object or an error if not found. Head(key string) (Object, error) diff --git a/pkg/object/ks3.go b/pkg/object/ks3.go index 65b106c193f7..8890942c9d82 100644 --- a/pkg/object/ks3.go +++ b/pkg/object/ks3.go @@ -99,7 +99,7 @@ func (s *ks3) Head(key string) (Object, error) { }, nil } -func (s *ks3) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (s *ks3) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { params := &s3.GetObjectInput{Bucket: &s.bucket, Key: &key} if off > 0 || limit > 0 { var r string @@ -112,18 +112,17 @@ func (s *ks3) Get(key string, off, limit int64) (io.ReadCloser, error) { } resp, err := s.s3.GetObject(params) if resp != nil { - ReqIDCache.put(key, aws2.StringValue(resp.Metadata[s3RequestIDKey])) + attrs := applyGetters(getters...) + attrs.SetRequestID(aws2.StringValue(resp.Metadata[s3RequestIDKey])) + attrs.SetStorageClass(aws2.StringValue(resp.Metadata[s3StorageClassHdr])) } if err != nil { return nil, err } - if sc, ok := resp.Metadata[s3StorageClassHdr]; ok && sc != nil { - return scReadCloser{resp.Body, *sc}, nil - } return resp.Body, nil } -func (s *ks3) Put(key string, in io.Reader) error { +func (s *ks3) Put(key string, in io.Reader, getters ...AttrGetter) error { var body io.ReadSeeker if b, ok := in.(io.ReadSeeker); ok { body = b @@ -146,7 +145,8 @@ func (s *ks3) Put(key string, in io.Reader) error { } resp, err := s.s3.PutObject(params) if resp != nil { - ReqIDCache.put(key, aws2.StringValue(resp.Metadata[s3RequestIDKey])) + attrs := applyGetters(getters...) + attrs.SetRequestID(aws2.StringValue(resp.Metadata[s3RequestIDKey])).SetStorageClass(s.sc) } return err } @@ -164,14 +164,15 @@ func (s *ks3) Copy(dst, src string) error { return err } -func (s *ks3) Delete(key string) error { +func (s *ks3) Delete(key string, getters ...AttrGetter) error { param := s3.DeleteObjectInput{ Bucket: &s.bucket, Key: &key, } resp, err := s.s3.DeleteObject(¶m) if resp != nil { - ReqIDCache.put(key, aws2.StringValue(resp.Metadata[s3RequestIDKey])) + attrs := applyGetters(getters...) + attrs.SetRequestID(aws2.StringValue(resp.Metadata[s3RequestIDKey])) } if e, ok := err.(awserr.RequestFailure); ok && e.StatusCode() == http.StatusNotFound { return nil @@ -314,12 +315,9 @@ func (s *ks3) ListUploads(marker string) ([]*PendingPart, string, error) { return parts, nextMarker, nil } -func (s *ks3) SetStorageClass(sc string) { +func (s *ks3) SetStorageClass(sc string) error { s.sc = sc -} - -func (s *ks3) StorageClass() string { - return s.sc + return nil } var ks3Regions = map[string]string{ diff --git a/pkg/object/mem.go b/pkg/object/mem.go index 4ca9f52e273e..1a9051217abd 100644 --- a/pkg/object/mem.go +++ b/pkg/object/mem.go @@ -74,7 +74,7 @@ func (m *memStore) Head(key string) (Object, error) { return f, nil } -func (m *memStore) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (m *memStore) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { m.Lock() defer m.Unlock() // Minimum length is 1. @@ -95,7 +95,7 @@ func (m *memStore) Get(key string, off, limit int64) (io.ReadCloser, error) { return io.NopCloser(bytes.NewBuffer(data)), nil } -func (m *memStore) Put(key string, in io.Reader) error { +func (m *memStore) Put(key string, in io.Reader, getters ...AttrGetter) error { m.Lock() defer m.Unlock() // Minimum length is 1. @@ -122,7 +122,7 @@ func (m *memStore) Copy(dst, src string) error { return m.Put(dst, d) } -func (m *memStore) Delete(key string) error { +func (m *memStore) Delete(key string, getters ...AttrGetter) error { m.Lock() defer m.Unlock() delete(m.objects, key) diff --git a/pkg/object/minio.go b/pkg/object/minio.go index 2ae0d13aed58..bf5355291fc6 100644 --- a/pkg/object/minio.go +++ b/pkg/object/minio.go @@ -39,7 +39,9 @@ func (m *minio) String() string { return fmt.Sprintf("minio://%s/%s/", *m.s3client.ses.Config.Endpoint, m.s3client.bucket) } -func (m *minio) SetStorageClass(_ string) {} +func (m *minio) SetStorageClass(_ string) error { + return notSupported +} func (m *minio) Limits() Limits { return Limits{ diff --git a/pkg/object/nfs.go b/pkg/object/nfs.go index c4eba6f08567..71c0acb8edfa 100644 --- a/pkg/object/nfs.go +++ b/pkg/object/nfs.go @@ -110,7 +110,7 @@ func (n *nfsStore) Head(key string) (Object, error) { return n.fileInfo(key, fi), nil } -func (n *nfsStore) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (n *nfsStore) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { p := n.path(key) if strings.HasSuffix(p, "/") { return io.NopCloser(bytes.NewBuffer([]byte{})), nil @@ -154,7 +154,7 @@ func (n *nfsStore) mkdirAll(p string, perm fs.FileMode) error { return err } -func (n *nfsStore) Put(key string, in io.Reader) (err error) { +func (n *nfsStore) Put(key string, in io.Reader, getters ...AttrGetter) (err error) { p := n.path(key) if strings.HasSuffix(p, dirSuffix) { return n.mkdirAll(p, 0777) @@ -209,7 +209,7 @@ func (n *nfsStore) Put(key string, in io.Reader) (err error) { return err } -func (n *nfsStore) Delete(key string) error { +func (n *nfsStore) Delete(key string, getters ...AttrGetter) error { path := n.path(key) if path == "./" { return nil diff --git a/pkg/object/object_storage.go b/pkg/object/object_storage.go index 6e9a895ac253..ad1dd8d088f7 100644 --- a/pkg/object/object_storage.go +++ b/pkg/object/object_storage.go @@ -277,52 +277,3 @@ func ListAllWithDelimiter(store ObjectStorage, prefix, start, end string, follow }() return listed, nil } - -func init() { - ReqIDCache = reqIDCache{cache: make(map[string]reqItem)} - go ReqIDCache.clean() -} - -type reqItem struct { - reqID string - time time.Time -} - -var ReqIDCache reqIDCache - -type reqIDCache struct { - sync.Mutex - cache map[string]reqItem -} - -func (*reqIDCache) clean() { - for range time.Tick(time.Second) { - ReqIDCache.Lock() - for k, v := range ReqIDCache.cache { - if time.Since(v.time) > time.Second { - delete(ReqIDCache.cache, k) - } - } - ReqIDCache.Unlock() - } -} - -func (*reqIDCache) put(key, reqID string) { - if reqID == "" { - return - } - if part := strings.Split(key, "chunks"); len(part) == 2 { - ReqIDCache.Lock() - defer ReqIDCache.Unlock() - ReqIDCache.cache[part[1]] = reqItem{reqID: reqID, time: time.Now()} - } -} - -func (*reqIDCache) Get(key string) string { - if part := strings.Split(key, "chunks"); len(part) == 2 { - ReqIDCache.Lock() - defer ReqIDCache.Unlock() - return ReqIDCache.cache[part[1]].reqID - } - return "" -} diff --git a/pkg/object/object_storage_test.go b/pkg/object/object_storage_test.go index fb9f865bb8c9..42a065fa47e3 100644 --- a/pkg/object/object_storage_test.go +++ b/pkg/object/object_storage_test.go @@ -46,8 +46,8 @@ import ( "github.com/redis/go-redis/v9" ) -func get(s ObjectStorage, k string, off, limit int64) (string, error) { - r, err := s.Get(k, off, limit) +func get(s ObjectStorage, k string, off, limit int64, getters ...AttrGetter) (string, error) { + r, err := s.Get(k, off, limit, getters...) if err != nil { return "", err } @@ -73,52 +73,32 @@ func listAll(s ObjectStorage, prefix, marker string, limit int64, followLink boo } func setStorageClass(o ObjectStorage) string { - switch s := o.(type) { - case *wasb: - s.sc = string(blob2.AccessTierCool) - return s.sc - case *bosclient: - s.sc = "STANDARD_IA" - return s.sc - case *COS: - s.sc = "STANDARD_IA" - return s.sc - case *ks3: - s.sc = "STANDARD_IA" - return s.sc - case *gs: - s.sc = "NEARLINE" - return s.sc - case *obsClient: - s.sc = "STANDARD_IA" - return s.sc - case *ossClient: - s.sc = string(oss.StorageIA) - return s.sc - case *qingstor: - s.sc = "STANDARD_IA" - return s.sc - case *s3client: - s.sc = "STANDARD_IA" - return s.sc - case *tosClient: - s.sc = string(enum.StorageClassIa) - return s.sc - default: - return "" + if osc, ok := o.(SupportStorageClass); ok { + var sc = "STANDARD_IA" + switch o.(type) { + case *wasb: + sc = string(blob2.AccessTierCool) + case *gs: + sc = "NEARLINE" + case *ossClient: + sc = string(oss.StorageIA) + case *tosClient: + sc = string(enum.StorageClassIa) + } + err := osc.SetStorageClass(sc) + if err != nil { + sc = "" + } } + return "" } // nolint:errcheck func testStorage(t *testing.T, s ObjectStorage) { sc := setStorageClass(s) - scSupported := sc != "" // StorageClass supported if err := s.Create(); err != nil { t.Fatalf("Can't create bucket %s: %s", s, err) } - if scSupported && GetStorageClassOrDefault(s) != sc { - t.Fatalf("Storage class should be %q, got %q", sc, GetStorageClassOrDefault(s)) - } if err := s.Create(); err != nil { t.Fatalf("err should be nil when creating a bucket with the same name") } @@ -130,10 +110,14 @@ func testStorage(t *testing.T, s ObjectStorage) { } }() + var scPut string key := "测试编码文件" + `{"name":"juicefs"}` + string('\u001F') + "%uFF081%uFF09.jpg" - if err := s.Put(key, bytes.NewReader(nil)); err != nil { + if err := s.Put(key, bytes.NewReader(nil), WithStorageClass(&scPut)); err != nil { t.Logf("PUT testEncodeFile failed: %s", err.Error()) } else { + if scPut != sc { + t.Fatalf("Storage class should be %q, got %q", sc, scPut) + } if resp, err := s.List("", "测试编码文件", "", 1, true); err != nil && err != notSupported { t.Logf("List testEncodeFile Failed: %s", err) } else if len(resp) == 1 && resp[0].Key() != key { @@ -142,26 +126,25 @@ func testStorage(t *testing.T, s ObjectStorage) { } _ = s.Delete(key) - resp, err := s.Get("not_exists", 0, -1) + _, err := s.Get("not_exists", 0, -1) if err == nil { t.Fatalf("Get should failed: %s", err) } - if scSupported { - // assert resp is an instance of scReadCloser - if _, ok := resp.(scReadCloser); !ok { - t.Fatalf("Storage class is supported by %s, but not returned", s) - } - } br := []byte("hello") if err := s.Put("test", bytes.NewReader(br)); err != nil { t.Fatalf("PUT failed: %s", err.Error()) } + var scGet string // get all - if d, e := get(s, "test", 0, -1); e != nil || d != "hello" { + if d, e := get(s, "test", 0, -1, WithStorageClass(&scGet)); e != nil || d != "hello" { t.Fatalf("expect hello, but got %v, error: %s", d, e) } + if scGet != sc { // Relax me when testing against a storage that doesnot use specified storage class + t.Fatalf("Storage class should be %q, got %q", sc, scGet) + } + if d, e := get(s, "test", 0, 5); e != nil || d != "hello" { t.Fatalf("expect hello, but got %v, error: %s", d, e) } diff --git a/pkg/object/obs.go b/pkg/object/obs.go index acca0544d438..9f9a8a5efdaf 100644 --- a/pkg/object/obs.go +++ b/pkg/object/obs.go @@ -106,7 +106,7 @@ func (s *obsClient) Head(key string) (Object, error) { }, nil } -func (s *obsClient) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (s *obsClient) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { params := &obs.GetObjectInput{} params.Bucket = s.bucket params.Key = key @@ -119,7 +119,8 @@ func (s *obsClient) Get(key string, off, limit int64) (io.ReadCloser, error) { resp, err = s.c.GetObject(params) } if resp != nil { - ReqIDCache.put(key, resp.RequestId) + attrs := applyGetters(getters...) + attrs.SetRequestID(resp.RequestId).SetStorageClass(string(resp.StorageClass)) } if err != nil { return nil, err @@ -128,10 +129,10 @@ func (s *obsClient) Get(key string, off, limit int64) (io.ReadCloser, error) { _ = resp.Body.Close() return nil, err } - return scReadCloser{resp.Body, string(resp.StorageClass)}, nil + return resp.Body, nil } -func (s *obsClient) Put(key string, in io.Reader) error { +func (s *obsClient) Put(key string, in io.Reader, getters ...AttrGetter) error { var body io.ReadSeeker var vlen int64 var sum []byte @@ -174,7 +175,8 @@ func (s *obsClient) Put(key string, in io.Reader) error { err = fmt.Errorf("unexpected ETag: %s != %s", strings.Trim(resp.ETag, "\""), obs.Hex(sum)) } if resp != nil { - ReqIDCache.put(key, resp.RequestId) + attrs := applyGetters(getters...) + attrs.SetRequestID(resp.RequestId).SetStorageClass(s.sc) } return err } @@ -190,13 +192,14 @@ func (s *obsClient) Copy(dst, src string) error { return err } -func (s *obsClient) Delete(key string) error { +func (s *obsClient) Delete(key string, getters ...AttrGetter) error { params := obs.DeleteObjectInput{} params.Bucket = s.bucket params.Key = key resp, err := s.c.DeleteObject(¶ms) if resp != nil { - ReqIDCache.put(key, resp.RequestId) + attrs := applyGetters(getters...) + attrs.SetRequestID(resp.RequestId) } return err } @@ -328,12 +331,9 @@ func (s *obsClient) ListUploads(marker string) ([]*PendingPart, string, error) { return parts, nextMarker, nil } -func (s *obsClient) SetStorageClass(sc string) { +func (s *obsClient) SetStorageClass(sc string) error { s.sc = sc -} - -func (s *obsClient) StorageClass() string { - return s.sc + return nil } func autoOBSEndpoint(bucketName, accessKey, secretKey, token string) (string, error) { diff --git a/pkg/object/oss.go b/pkg/object/oss.go index cf59c7543607..f221771b73bc 100644 --- a/pkg/object/oss.go +++ b/pkg/object/oss.go @@ -113,7 +113,7 @@ func (o *ossClient) Head(key string) (Object, error) { }, nil } -func (o *ossClient) Get(key string, off, limit int64) (resp io.ReadCloser, err error) { +func (o *ossClient) Get(key string, off, limit int64, getters ...AttrGetter) (resp io.ReadCloser, err error) { var respHeader http.Header if off > 0 || limit > 0 { var r string @@ -136,16 +136,14 @@ func (o *ossClient) Get(key string, off, limit int64) (resp io.ReadCloser, err e length) } } - ReqIDCache.put(key, respHeader.Get(oss.HTTPHeaderOssRequestID)) + attrs := applyGetters(getters...) + attrs.SetRequestID(respHeader.Get(oss.HTTPHeaderOssRequestID)) + attrs.SetStorageClass(respHeader.Get(oss.HTTPHeaderOssStorageClass)) err = o.checkError(err) - if err == nil { - sc := respHeader.Get(oss.HTTPHeaderOssStorageClass) - resp = scReadCloser{resp, sc} - } return } -func (o *ossClient) Put(key string, in io.Reader) error { +func (o *ossClient) Put(key string, in io.Reader, getters ...AttrGetter) error { var option []oss.Option if ins, ok := in.(io.ReadSeeker); ok { option = append(option, oss.Meta(checksumAlgr, generateChecksum(ins))) @@ -156,7 +154,8 @@ func (o *ossClient) Put(key string, in io.Reader) error { var respHeader http.Header option = append(option, oss.GetResponseHeader(&respHeader)) err := o.bucket.PutObject(key, in, option...) - ReqIDCache.put(key, respHeader.Get(oss.HTTPHeaderOssRequestID)) + attrs := applyGetters(getters...) + attrs.SetRequestID(respHeader.Get(oss.HTTPHeaderOssRequestID)).SetStorageClass(o.sc) return o.checkError(err) } @@ -169,10 +168,11 @@ func (o *ossClient) Copy(dst, src string) error { return o.checkError(err) } -func (o *ossClient) Delete(key string) error { +func (o *ossClient) Delete(key string, getters ...AttrGetter) error { var respHeader http.Header err := o.bucket.DeleteObject(key, oss.GetResponseHeader(&respHeader)) - ReqIDCache.put(key, respHeader.Get(oss.HTTPHeaderOssRequestID)) + attrs := applyGetters(getters...) + attrs.SetRequestID(respHeader.Get(oss.HTTPHeaderOssRequestID)) return o.checkError(err) } @@ -271,12 +271,9 @@ func (o *ossClient) ListUploads(marker string) ([]*PendingPart, string, error) { return parts, result.NextKeyMarker, nil } -func (o *ossClient) SetStorageClass(sc string) { +func (o *ossClient) SetStorageClass(sc string) error { o.sc = sc -} - -func (o *ossClient) StorageClass() string { - return o.sc + return nil } type stsCred struct { diff --git a/pkg/object/prefix.go b/pkg/object/prefix.go index 629fc387bebd..eb2c2bae77cb 100644 --- a/pkg/object/prefix.go +++ b/pkg/object/prefix.go @@ -33,17 +33,11 @@ func WithPrefix(os ObjectStorage, prefix string) ObjectStorage { return &withPrefix{os, prefix} } -func (s *withPrefix) SetStorageClass(sc string) { +func (s *withPrefix) SetStorageClass(sc string) error { if o, ok := s.os.(SupportStorageClass); ok { - o.SetStorageClass(sc) + return o.SetStorageClass(sc) } -} - -func (s *withPrefix) StorageClass() string { - if o, ok := s.os.(StorageClassGetter); ok { - return o.StorageClass() - } - return "" + return notSupported } func (s *withPrefix) Symlink(oldName, newName string) error { @@ -113,23 +107,23 @@ func (p *withPrefix) Head(key string) (Object, error) { return p.updateKey(o), nil } -func (p *withPrefix) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (p *withPrefix) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { if off > 0 && limit < 0 { return nil, fmt.Errorf("invalid range: %d-%d", off, limit) } - return p.os.Get(p.prefix+key, off, limit) + return p.os.Get(p.prefix+key, off, limit, getters...) } -func (p *withPrefix) Put(key string, in io.Reader) error { - return p.os.Put(p.prefix+key, in) +func (p *withPrefix) Put(key string, in io.Reader, getters ...AttrGetter) error { + return p.os.Put(p.prefix+key, in, getters...) } func (p *withPrefix) Copy(dst, src string) error { return p.os.Copy(dst, src) } -func (p *withPrefix) Delete(key string) error { - return p.os.Delete(p.prefix + key) +func (p *withPrefix) Delete(key string, getters ...AttrGetter) error { + return p.os.Delete(p.prefix+key, getters...) } func (p *withPrefix) List(prefix, marker, delimiter string, limit int64, followLink bool) ([]Object, error) { diff --git a/pkg/object/qingstor.go b/pkg/object/qingstor.go index 78af88a5a08e..d30272ded7bd 100644 --- a/pkg/object/qingstor.go +++ b/pkg/object/qingstor.go @@ -81,7 +81,7 @@ func (q *qingstor) Head(key string) (Object, error) { }, nil } -func (q *qingstor) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (q *qingstor) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { input := &qs.GetObjectInput{} rangeStr := getRange(off, limit) if rangeStr != "" { @@ -89,7 +89,11 @@ func (q *qingstor) Get(key string, off, limit int64) (io.ReadCloser, error) { } output, err := q.bucket.GetObject(key, input) if output != nil { - ReqIDCache.put(key, aws.StringValue(output.RequestID)) + attrs := applyGetters(getters...) + attrs.SetRequestID(aws.StringValue(output.RequestID)) + if output.XQSStorageClass != nil { + attrs.SetStorageClass(*output.XQSStorageClass) + } } if err != nil { return nil, err @@ -98,9 +102,6 @@ func (q *qingstor) Get(key string, off, limit int64) (io.ReadCloser, error) { _ = output.Body.Close() return nil, err } - if output.XQSStorageClass != nil { - return scReadCloser{output.Body, *output.XQSStorageClass}, nil - } return output.Body, nil } @@ -139,7 +140,7 @@ func findLen(in io.Reader) (io.Reader, int64, error) { return in, vlen, nil } -func (q *qingstor) Put(key string, in io.Reader) error { +func (q *qingstor) Put(key string, in io.Reader, getters ...AttrGetter) error { body, vlen, err := findLen(in) if err != nil { return err @@ -155,7 +156,8 @@ func (q *qingstor) Put(key string, in io.Reader) error { } out, err := q.bucket.PutObject(key, input) if out != nil { - ReqIDCache.put(key, aws.StringValue(out.RequestID)) + attrs := applyGetters(getters...) + attrs.SetRequestID(aws.StringValue(out.RequestID)).SetStorageClass(q.sc) } if err != nil { return err @@ -184,10 +186,11 @@ func (q *qingstor) Copy(dst, src string) error { return nil } -func (q *qingstor) Delete(key string) error { +func (q *qingstor) Delete(key string, getters ...AttrGetter) error { output, err := q.bucket.DeleteObject(key) if output != nil { - ReqIDCache.put(key, aws.StringValue(output.RequestID)) + attrs := applyGetters(getters...) + attrs.SetRequestID(aws.StringValue(output.RequestID)) } return err } @@ -315,12 +318,9 @@ func (q *qingstor) ListUploads(marker string) ([]*PendingPart, string, error) { return parts, nextMarker, nil } -func (q *qingstor) SetStorageClass(sc string) { +func (q *qingstor) SetStorageClass(sc string) error { q.sc = sc -} - -func (q *qingstor) StorageClass() string { - return q.sc + return nil } func newQingStor(endpoint, accessKey, secretKey, token string) (ObjectStorage, error) { diff --git a/pkg/object/qiniu.go b/pkg/object/qiniu.go index 214d06708535..eb6d67fc073f 100644 --- a/pkg/object/qiniu.go +++ b/pkg/object/qiniu.go @@ -49,7 +49,9 @@ func (q *qiniu) String() string { return fmt.Sprintf("qiniu://%s/", q.bucket) } -func (q *qiniu) SetStorageClass(_ string) {} +func (q *qiniu) SetStorageClass(_ string) error { + return notSupported +} func (q *qiniu) Limits() Limits { return Limits{} @@ -102,7 +104,7 @@ func (q *qiniu) Head(key string) (Object, error) { }, nil } -func (q *qiniu) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (q *qiniu) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { // S3 SDK cannot get objects with prefix "/" in the key if strings.HasPrefix(key, "/") && os.Getenv("QINIU_DOMAIN") != "" { return q.download(key, off, limit) @@ -111,10 +113,10 @@ func (q *qiniu) Get(key string, off, limit int64) (io.ReadCloser, error) { key = key[1:] } // S3ForcePathStyle = true - return q.s3client.Get("/"+key, off, limit) + return q.s3client.Get("/"+key, off, limit, getters...) } -func (q *qiniu) Put(key string, in io.Reader) error { +func (q *qiniu) Put(key string, in io.Reader, getters ...AttrGetter) error { body, vlen, err := findLen(in) if err != nil { return err @@ -134,7 +136,7 @@ func (q *qiniu) CreateMultipartUpload(key string) (*MultipartUpload, error) { return nil, notSupported } -func (q *qiniu) Delete(key string) error { +func (q *qiniu) Delete(key string, getters ...AttrGetter) error { err := q.bm.Delete(q.bucket, key) if err != nil && strings.Contains(err.Error(), notexist) { return nil diff --git a/pkg/object/redis.go b/pkg/object/redis.go index ad936829da2f..516947741eb4 100644 --- a/pkg/object/redis.go +++ b/pkg/object/redis.go @@ -50,7 +50,7 @@ func (r *redisStore) Create() error { return nil } -func (r *redisStore) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (r *redisStore) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { data, err := r.rdb.Get(ctx, key).Bytes() if err != nil { return nil, err @@ -65,7 +65,7 @@ func (r *redisStore) Get(key string, off, limit int64) (io.ReadCloser, error) { return io.NopCloser(bytes.NewBuffer(data)), nil } -func (r *redisStore) Put(key string, in io.Reader) error { +func (r *redisStore) Put(key string, in io.Reader, getters ...AttrGetter) error { data, err := io.ReadAll(in) if err != nil { return err @@ -73,7 +73,7 @@ func (r *redisStore) Put(key string, in io.Reader) error { return r.rdb.Set(ctx, key, data, 0).Err() } -func (r *redisStore) Delete(key string) error { +func (r *redisStore) Delete(key string, getters ...AttrGetter) error { return r.rdb.Del(ctx, key).Err() } diff --git a/pkg/object/response_attrs.go b/pkg/object/response_attrs.go new file mode 100644 index 000000000000..f9d1c1b3df47 --- /dev/null +++ b/pkg/object/response_attrs.go @@ -0,0 +1,66 @@ +/* + * JuiceFS, Copyright 2024 Juicedata, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package object + +const DefaultStorageClass = "STANDARD" + +type SupportStorageClass interface { + SetStorageClass(sc string) error +} + +// A generic way to get attributes from different object storage clients +type ResponseAttrs struct { + storageClass *string + requestID *string + // other interested attrs can be added here +} + +func (r *ResponseAttrs) SetRequestID(id string) *ResponseAttrs { + if r.requestID != nil { // Will be nil if caller is not interested in this attribute + *r.requestID = id + } + return r +} + +func (r *ResponseAttrs) SetStorageClass(sc string) *ResponseAttrs { + if r.storageClass != nil && sc != "" { // Donot overwrite default storage class + *r.storageClass = sc + } + return r +} + +type AttrGetter func(attrs *ResponseAttrs) + +func WithRequestID(id *string) AttrGetter { + return func(attrs *ResponseAttrs) { + attrs.requestID = id + } +} + +func WithStorageClass(sc *string) AttrGetter { + return func(attrs *ResponseAttrs) { + attrs.storageClass = sc + } +} + +func applyGetters(getters ...AttrGetter) ResponseAttrs { + var attrs ResponseAttrs + for _, getter := range getters { + getter(&attrs) + } + return attrs +} diff --git a/pkg/object/response_attrs_test.go b/pkg/object/response_attrs_test.go new file mode 100644 index 000000000000..39a3c8b5a704 --- /dev/null +++ b/pkg/object/response_attrs_test.go @@ -0,0 +1,44 @@ +/* + * JuiceFS, Copyright 2024 Juicedata, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package object + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +const reqIDExample = "c30c0107cd3a073f6607cd3a-ac103aa8-1rqU4w-PuO-cs-tos-front-azc-2" + +func apiCall(getters ...AttrGetter) { + attrs := applyGetters(getters...) + attrs.SetStorageClass("STANDARD") + attrs.SetRequestID(reqIDExample) + return +} + +func Test_api_call(t *testing.T) { + var reqID, sc string + + apiCall(WithRequestID(&reqID), WithStorageClass(&sc)) + assert.Equalf(t, reqIDExample, reqID, "expected %q, got %q", reqIDExample, reqID) + assert.Equalf(t, "STANDARD", sc, "expected %q, got %q", "STANDARD", sc) + + attrs := applyGetters(WithStorageClass(&sc)) + attrs.SetStorageClass("") // Won't overwrite by empty string + assert.Equalf(t, "STANDARD", sc, "expected %q, got %q", "STANDARD", sc) +} diff --git a/pkg/object/restful.go b/pkg/object/restful.go index eeb76a9d7d0f..dfa0b6d36762 100644 --- a/pkg/object/restful.go +++ b/pkg/object/restful.go @@ -183,7 +183,7 @@ func checkGetStatus(statusCode int, partial bool) error { return nil } -func (s *RestfulStorage) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (s *RestfulStorage) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { headers := make(map[string]string) if off > 0 || limit > 0 { headers["Range"] = getRange(off, limit) @@ -202,7 +202,7 @@ func (s *RestfulStorage) Get(key string, off, limit int64) (io.ReadCloser, error return resp.Body, nil } -func (u *RestfulStorage) Put(key string, body io.Reader) error { +func (u *RestfulStorage) Put(key string, body io.Reader, getters ...AttrGetter) error { resp, err := u.request("PUT", key, body, nil) if err != nil { return err @@ -227,7 +227,7 @@ func (s *RestfulStorage) Copy(dst, src string) error { return s.Put(dst, bytes.NewReader(d)) } -func (s *RestfulStorage) Delete(key string) error { +func (s *RestfulStorage) Delete(key string, getters ...AttrGetter) error { resp, err := s.request("DELETE", key, nil, nil) if err != nil { return err diff --git a/pkg/object/s3.go b/pkg/object/s3.go index e1b84053237b..8d0649a4839d 100644 --- a/pkg/object/s3.go +++ b/pkg/object/s3.go @@ -105,7 +105,7 @@ func (s *s3client) Head(key string) (Object, error) { } return nil, err } - var sc = defaultStorageClass + var sc = DefaultStorageClass if r.StorageClass != nil { sc = *r.StorageClass } @@ -118,7 +118,7 @@ func (s *s3client) Head(key string) (Object, error) { }, nil } -func (s *s3client) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (s *s3client) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { params := &s3.GetObjectInput{Bucket: &s.bucket, Key: &key} if off > 0 || limit > 0 { var r string @@ -131,7 +131,8 @@ func (s *s3client) Get(key string, off, limit int64) (io.ReadCloser, error) { } var reqID string resp, err := s.s3.GetObjectWithContext(ctx, params, request.WithGetResponseHeader(s3RequestIDKey, &reqID)) - ReqIDCache.put(key, reqID) // TODO: set reqID to the io.ReadCloser? + attrs := applyGetters(getters...) + attrs.SetRequestID(reqID) if err != nil { return nil, err } @@ -146,12 +147,12 @@ func (s *s3client) Get(key string, off, limit int64) (io.ReadCloser, error) { } } if resp.StorageClass != nil { - return scReadCloser{resp.Body, *resp.StorageClass}, nil + attrs.SetStorageClass(*resp.StorageClass) } return resp.Body, nil } -func (s *s3client) Put(key string, in io.Reader) error { +func (s *s3client) Put(key string, in io.Reader, getters ...AttrGetter) error { var body io.ReadSeeker if b, ok := in.(io.ReadSeeker); ok { body = b @@ -178,7 +179,8 @@ func (s *s3client) Put(key string, in io.Reader) error { } var reqID string _, err := s.s3.PutObjectWithContext(ctx, params, request.WithGetResponseHeader(s3RequestIDKey, &reqID)) - ReqIDCache.put(key, reqID) + attrs := applyGetters(getters...) + attrs.SetRequestID(reqID).SetStorageClass(s.sc) return err } @@ -196,7 +198,7 @@ func (s *s3client) Copy(dst, src string) error { return err } -func (s *s3client) Delete(key string) error { +func (s *s3client) Delete(key string, getters ...AttrGetter) error { param := s3.DeleteObjectInput{ Bucket: &s.bucket, Key: &key, @@ -206,7 +208,8 @@ func (s *s3client) Delete(key string) error { if err != nil && strings.Contains(err.Error(), "NoSuchKey") { err = nil } - ReqIDCache.put(key, reqID) + attrs := applyGetters(getters...) + attrs.SetRequestID(reqID) return err } @@ -236,7 +239,7 @@ func (s *s3client) List(prefix, marker, delimiter string, limit int64, followLin if !strings.HasPrefix(oKey, prefix) || oKey < marker { return nil, fmt.Errorf("found invalid key %s from List, prefix: %s, marker: %s", oKey, prefix, marker) } - var sc = defaultStorageClass + var sc = DefaultStorageClass if o.StorageClass != nil { sc = *o.StorageClass } @@ -358,12 +361,9 @@ func (s *s3client) ListUploads(marker string) ([]*PendingPart, string, error) { return parts, nextMarker, nil } -func (s *s3client) SetStorageClass(sc string) { +func (s *s3client) SetStorageClass(sc string) error { s.sc = sc -} - -func (s *s3client) StorageClass() string { - return s.sc + return nil } func autoS3Region(bucketName, accessKey, secretKey string) (string, error) { diff --git a/pkg/object/scs.go b/pkg/object/scs.go index e625a2892620..a9e43259d8eb 100644 --- a/pkg/object/scs.go +++ b/pkg/object/scs.go @@ -75,7 +75,7 @@ func (s *scsClient) Head(key string) (Object, error) { return &obj{key: key, size: om.ContentLength, mtime: mtime, isDir: strings.HasSuffix(key, "/")}, nil } -func (s *scsClient) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (s *scsClient) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { if off > 0 || limit > 0 { var r string if limit > 0 { @@ -88,11 +88,11 @@ func (s *scsClient) Get(key string, off, limit int64) (io.ReadCloser, error) { return s.b.Get(key, "") } -func (s *scsClient) Put(key string, in io.Reader) error { +func (s *scsClient) Put(key string, in io.Reader, getters ...AttrGetter) error { return s.b.Put(key, map[string]string{}, in) } -func (s *scsClient) Delete(key string) error { +func (s *scsClient) Delete(key string, getters ...AttrGetter) error { return s.b.Delete(key) } diff --git a/pkg/object/sftp.go b/pkg/object/sftp.go index fc9886838752..b2f3dd8b97b9 100644 --- a/pkg/object/sftp.go +++ b/pkg/object/sftp.go @@ -175,7 +175,7 @@ func (f *sftpStore) Head(key string) (Object, error) { return f.fileInfo(nil, key, info, true), nil } -func (f *sftpStore) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (f *sftpStore) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { c, err := f.getSftpConnection() if err != nil { return nil, err @@ -204,7 +204,7 @@ func (f *sftpStore) Get(key string, off, limit int64) (io.ReadCloser, error) { return ff, err } -func (f *sftpStore) Put(key string, in io.Reader) (err error) { +func (f *sftpStore) Put(key string, in io.Reader, getters ...AttrGetter) (err error) { c, err := f.getSftpConnection() if err != nil { return err @@ -309,7 +309,7 @@ func (f *sftpStore) Readlink(name string) (string, error) { return c.sftpClient.ReadLink(f.path(name)) } -func (f *sftpStore) Delete(key string) error { +func (f *sftpStore) Delete(key string, getters ...AttrGetter) error { c, err := f.getSftpConnection() if err != nil { return err diff --git a/pkg/object/sharding.go b/pkg/object/sharding.go index 5bfebf0c43f0..c7fb0dde7552 100644 --- a/pkg/object/sharding.go +++ b/pkg/object/sharding.go @@ -61,37 +61,30 @@ func (s *sharded) Head(key string) (Object, error) { return s.pick(key).Head(key) } -func (s *sharded) Get(key string, off, limit int64) (io.ReadCloser, error) { - return s.pick(key).Get(key, off, limit) +func (s *sharded) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { + return s.pick(key).Get(key, off, limit, getters...) } -func (s *sharded) Put(key string, body io.Reader) error { - return s.pick(key).Put(key, body) +func (s *sharded) Put(key string, body io.Reader, getters ...AttrGetter) error { + return s.pick(key).Put(key, body, getters...) } func (s *sharded) Copy(dst, src string) error { return notSupported } -func (s *sharded) Delete(key string) error { - return s.pick(key).Delete(key) +func (s *sharded) Delete(key string, getters ...AttrGetter) error { + return s.pick(key).Delete(key, getters...) } -func (s *sharded) SetStorageClass(sc string) { +func (s *sharded) SetStorageClass(sc string) error { + var err = notSupported for _, o := range s.stores { if os, ok := o.(SupportStorageClass); ok { - os.SetStorageClass(sc) + err = os.SetStorageClass(sc) } } -} - -func (s *sharded) StorageClass() string { - for _, o := range s.stores { - if os, ok := o.(StorageClassGetter); ok { - return os.StorageClass() - } - } - return "" + return err } const maxResults = 10000 diff --git a/pkg/object/sql.go b/pkg/object/sql.go index 21b7b1df1a3d..cdd51723dbf2 100644 --- a/pkg/object/sql.go +++ b/pkg/object/sql.go @@ -58,7 +58,7 @@ func (s *sqlStore) String() string { return fmt.Sprintf("%s://%s/", driver, s.addr) } -func (s *sqlStore) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (s *sqlStore) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { var b = blob{Key: []byte(key)} // TODO: range ok, err := s.db.Get(&b) @@ -78,7 +78,7 @@ func (s *sqlStore) Get(key string, off, limit int64) (io.ReadCloser, error) { return io.NopCloser(bytes.NewBuffer(data)), nil } -func (s *sqlStore) Put(key string, in io.Reader) error { +func (s *sqlStore) Put(key string, in io.Reader, getters ...AttrGetter) error { d, err := io.ReadAll(in) if err != nil { return err @@ -123,7 +123,7 @@ func (s *sqlStore) Head(key string) (Object, error) { }, nil } -func (s *sqlStore) Delete(key string) error { +func (s *sqlStore) Delete(key string, getters ...AttrGetter) error { _, err := s.db.Delete(&blob{Key: []byte(key)}) return err } diff --git a/pkg/object/storage_class.go b/pkg/object/storage_class.go deleted file mode 100644 index e39202ce8f4a..000000000000 --- a/pkg/object/storage_class.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * JuiceFS, Copyright 2024 Juicedata, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package object - -import "io" - -const defaultStorageClass = "STANDARD" - -type SupportStorageClass interface { - SetStorageClass(sc string) -} - -type StorageClassGetter interface { - StorageClass() string -} - -type scReadCloser struct { - io.ReadCloser - sc string -} - -func (s scReadCloser) StorageClass() string { - return s.sc -} - -func GetStorageClassOrDefault(o interface{}) (sc string) { - if o, ok := o.(StorageClassGetter); ok { - sc = o.StorageClass() - } - if sc == "" { - sc = defaultStorageClass - } - return sc -} - -var ( - _ StorageClassGetter = &s3client{} - _ StorageClassGetter = scReadCloser{} // All `Get` return a struct, not a pointer -) diff --git a/pkg/object/swift.go b/pkg/object/swift.go index 15000e34b8ec..bbe280d4cd38 100644 --- a/pkg/object/swift.go +++ b/pkg/object/swift.go @@ -51,7 +51,7 @@ func (s *swiftOSS) Create() error { return s.conn.ContainerCreate(context.Background(), s.container, nil) } -func (s *swiftOSS) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (s *swiftOSS) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { headers := make(map[string]string) if off > 0 || limit > 0 { if limit > 0 { @@ -64,13 +64,13 @@ func (s *swiftOSS) Get(key string, off, limit int64) (io.ReadCloser, error) { return f, err } -func (s *swiftOSS) Put(key string, in io.Reader) error { +func (s *swiftOSS) Put(key string, in io.Reader, getters ...AttrGetter) error { mimeType := utils.GuessMimeType(key) _, err := s.conn.ObjectPut(context.Background(), s.container, key, in, true, "", mimeType, nil) return err } -func (s *swiftOSS) Delete(key string) error { +func (s *swiftOSS) Delete(key string, getters ...AttrGetter) error { err := s.conn.ObjectDelete(context.Background(), s.container, key) if err != nil && errors.Is(err, swift.ObjectNotFound) { err = nil diff --git a/pkg/object/tikv.go b/pkg/object/tikv.go index 463327ff0c54..23d7912be196 100644 --- a/pkg/object/tikv.go +++ b/pkg/object/tikv.go @@ -45,7 +45,7 @@ func (t *tikv) String() string { return fmt.Sprintf("tikv://%s/", t.addr) } -func (t *tikv) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (t *tikv) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { d, err := t.c.Get(context.TODO(), []byte(key)) if len(d) == 0 { err = os.ErrNotExist @@ -63,7 +63,7 @@ func (t *tikv) Get(key string, off, limit int64) (io.ReadCloser, error) { return io.NopCloser(bytes.NewBuffer(data)), nil } -func (t *tikv) Put(key string, in io.Reader) error { +func (t *tikv) Put(key string, in io.Reader, getters ...AttrGetter) error { d, err := io.ReadAll(in) if err != nil { return err @@ -85,7 +85,7 @@ func (t *tikv) Head(key string) (Object, error) { }, err } -func (t *tikv) Delete(key string) error { +func (t *tikv) Delete(key string, getters ...AttrGetter) error { return t.c.Delete(context.TODO(), []byte(key)) } diff --git a/pkg/object/tos.go b/pkg/object/tos.go index 695adcabf731..3cdc8404b6e5 100644 --- a/pkg/object/tos.go +++ b/pkg/object/tos.go @@ -65,7 +65,7 @@ func (t *tosClient) Create() error { return err } -func (t *tosClient) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (t *tosClient) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { rangeStr := getRange(off, limit) resp, err := t.client.GetObjectV2(context.Background(), &tos.GetObjectV2Input{ Bucket: t.bucket, @@ -73,7 +73,8 @@ func (t *tosClient) Get(key string, off, limit int64) (io.ReadCloser, error) { Range: rangeStr, // When Range and RangeStart & RangeEnd appear together, range is preferred }) if resp != nil { - ReqIDCache.put(key, resp.RequestID) + attrs := applyGetters(getters...) + attrs.SetRequestID(resp.RequestID).SetStorageClass(string(resp.StorageClass)) } if err != nil { return nil, err @@ -82,10 +83,10 @@ func (t *tosClient) Get(key string, off, limit int64) (io.ReadCloser, error) { _ = resp.Content.Close() return nil, err } - return scReadCloser{resp.Content, string(resp.StorageClass)}, nil + return resp.Content, nil } -func (t *tosClient) Put(key string, in io.Reader) error { +func (t *tosClient) Put(key string, in io.Reader, getters ...AttrGetter) error { resp, err := t.client.PutObjectV2(context.Background(), &tos.PutObjectV2Input{ PutObjectBasicInput: tos.PutObjectBasicInput{ Bucket: t.bucket, @@ -95,18 +96,20 @@ func (t *tosClient) Put(key string, in io.Reader) error { Content: in, }) if resp != nil { - ReqIDCache.put(key, resp.RequestID) + attrs := applyGetters(getters...) + attrs.SetRequestID(resp.RequestID).SetStorageClass(t.sc) } return err } -func (t *tosClient) Delete(key string) error { +func (t *tosClient) Delete(key string, getters ...AttrGetter) error { resp, err := t.client.DeleteObjectV2(context.Background(), &tos.DeleteObjectV2Input{ Bucket: t.bucket, Key: key, }) if resp != nil { - ReqIDCache.put(key, resp.RequestID) + attrs := applyGetters(getters...) + attrs.SetRequestID(resp.RequestID) } return err } @@ -267,12 +270,9 @@ func (t *tosClient) Copy(dst, src string) error { return err } -func (t *tosClient) SetStorageClass(sc string) { +func (t *tosClient) SetStorageClass(sc string) error { t.sc = sc -} - -func (t *tosClient) StorageClass() string { - return t.sc + return nil } func newTOS(endpoint, accessKey, secretKey, token string) (ObjectStorage, error) { diff --git a/pkg/object/upyun.go b/pkg/object/upyun.go index 0e43d87e6562..87c9793754af 100644 --- a/pkg/object/upyun.go +++ b/pkg/object/upyun.go @@ -62,7 +62,7 @@ func (u *up) Head(key string) (Object, error) { }, nil } -func (u *up) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (u *up) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { w := bytes.NewBuffer(nil) _, err := u.c.Get(&upyun.GetObjectConfig{ Path: "/" + key, @@ -78,14 +78,14 @@ func (u *up) Get(key string, off, limit int64) (io.ReadCloser, error) { return io.NopCloser(bytes.NewBuffer(data)), nil } -func (u *up) Put(key string, in io.Reader) error { +func (u *up) Put(key string, in io.Reader, getters ...AttrGetter) error { return u.c.Put(&upyun.PutObjectConfig{ Path: "/" + key, Reader: in, }) } -func (u *up) Delete(key string) error { +func (u *up) Delete(key string, getters ...AttrGetter) error { return u.c.Delete(&upyun.DeleteObjectConfig{ Path: "/" + key, }) diff --git a/pkg/object/wasabi.go b/pkg/object/wasabi.go index 9f53fa9689cd..75fabb5781cd 100644 --- a/pkg/object/wasabi.go +++ b/pkg/object/wasabi.go @@ -38,8 +38,9 @@ func (s *wasabi) String() string { return fmt.Sprintf("wasabi://%s/", s.s3client.bucket) } -// SetStorageClass Wasabi only provides a single storage class which is most like the standard AWS S3 storage class -func (s *wasabi) SetStorageClass(_ string) {} +func (s *wasabi) SetStorageClass(_ string) error { + return notSupported +} func newWasabi(endpoint, accessKey, secretKey, token string) (ObjectStorage, error) { if !strings.Contains(endpoint, "://") { diff --git a/pkg/object/webdav.go b/pkg/object/webdav.go index 8636cfdde862..8528c30ef213 100644 --- a/pkg/object/webdav.go +++ b/pkg/object/webdav.go @@ -88,7 +88,7 @@ func (l *limitedReadCloser) Close() error { return l.rc.Close() } -func (w *webdav) Get(key string, off, limit int64) (io.ReadCloser, error) { +func (w *webdav) Get(key string, off, limit int64, getters ...AttrGetter) (io.ReadCloser, error) { if off == 0 && limit <= 0 { return w.c.ReadStream(key) } @@ -129,7 +129,7 @@ func (w *webdav) Get(key string, off, limit int64) (io.ReadCloser, error) { return nil, &os.PathError{Op: "ReadStreamRange", Path: key, Err: err} } -func (w *webdav) Put(key string, in io.Reader) error { +func (w *webdav) Put(key string, in io.Reader, getters ...AttrGetter) error { if key == "" { return nil } @@ -139,7 +139,7 @@ func (w *webdav) Put(key string, in io.Reader) error { return w.c.WriteStream(key, in, 0) } -func (w *webdav) Delete(key string) error { +func (w *webdav) Delete(key string, getters ...AttrGetter) error { info, err := w.c.Stat(key) if gowebdav.IsErrNotFound(err) { return nil diff --git a/pkg/sync/sync.go b/pkg/sync/sync.go index 5aaaf46584b7..80aa46431a96 100644 --- a/pkg/sync/sync.go +++ b/pkg/sync/sync.go @@ -1109,6 +1109,29 @@ func listCommonPrefix(store object.ObjectStorage, prefix string, cp chan object. } func startProducer(tasks chan<- object.Object, src, dst object.ObjectStorage, prefix string, config *Config) error { + if prefix == "" && config.Limit == 1 && len(config.rules) == 0 { + // fast path for single key + obj, err := src.Head(config.Start) + if err == nil && (!obj.IsDir() || config.Dirs) { + var srckeys = make(chan object.Object, 1) + srckeys <- obj + close(srckeys) + var dstkeys = make(chan object.Object, 1) + if dobj, err := dst.Head(config.Start); err == nil || os.IsNotExist(err) { + if dobj != nil { + dstkeys <- dobj + } + close(dstkeys) + logger.Debugf("produce single key %s", config.Start) + produce(tasks, srckeys, dstkeys, config) + return nil + } else { + logger.Warnf("head %s from %s: %s", config.Start, dst, err) + } + } else if err != nil && !os.IsNotExist(err) { + logger.Warnf("head %s from %s: %s", config.Start, src, err) + } + } if config.ListThreads <= 1 || strings.Count(prefix, "/") >= config.ListDepth { return startSingleProducer(tasks, src, dst, prefix, config) } diff --git a/pkg/utils/humanize.go b/pkg/utils/humanize.go index 28d89d90a11c..3d0b92218c1d 100644 --- a/pkg/utils/humanize.go +++ b/pkg/utils/humanize.go @@ -28,7 +28,10 @@ func ParseBytes(ctx *cli.Context, key string, unit byte) uint64 { if len(str) == 0 { return 0 } + return ParseBytesStr(key, str, unit) +} +func ParseBytesStr(key, str string, unit byte) uint64 { s := str if c := s[len(s)-1]; c < '0' || c > '9' { unit = c @@ -56,9 +59,8 @@ func ParseBytes(ctx *cli.Context, key string, unit byte) uint64 { val *= float64(uint64(1) << shift) } if err != nil { - logger.Fatalf("Invalid value \"%s\" for option \"--%s\"", str, key) + logger.Fatalf("Invalid value \"%s\" for \"%s\"", str, key) } - return uint64(val) } @@ -68,6 +70,10 @@ func ParseMbps(ctx *cli.Context, key string) int64 { return 0 } + return ParseMbpsStr(key, str) +} + +func ParseMbpsStr(key, str string) int64 { s := str var unit byte = 'M' if c := s[len(s)-1]; c < '0' || c > '9' { @@ -89,9 +95,8 @@ func ParseMbps(ctx *cli.Context, key string) int64 { } } if err != nil { - logger.Fatalf("Invalid value \"%s\" for option \"--%s\"", str, key) + logger.Fatalf("Invalid value \"%s\" for \"%s\"", str, key) } - return int64(val) } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index c288b8a5e5b7..ee4a3e5cd8bf 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -247,3 +247,29 @@ func LookupGroup(name string) int { groups[name] = gid return gid } + +func Duration(s string) time.Duration { + if s == "" { + return 0 + } + v, err := strconv.ParseFloat(s, 64) + if err == nil { + return time.Microsecond * time.Duration(v*1e6) + } + + err = nil + var d time.Duration + p := strings.Index(s, "d") + if p >= 0 { + v, err = strconv.ParseFloat(s[:p], 64) + } + if err == nil && s[p+1:] != "" { + d, err = time.ParseDuration(s[p+1:]) + } + + if err != nil { + logger.Warnf("Invalid duration value: %s, setting it to 0", s) + return 0 + } + return d + time.Hour*time.Duration(v*24) +} diff --git a/sdk/java/libjfs/main.go b/sdk/java/libjfs/main.go index 8f31a5c1062f..ab7b4ba0ff40 100644 --- a/sdk/java/libjfs/main.go +++ b/sdk/java/libjfs/main.go @@ -272,49 +272,50 @@ func freeHandle(fd int) { } type javaConf struct { - MetaURL string `json:"meta"` - Bucket string `json:"bucket"` - StorageClass string `json:"storageClass"` - ReadOnly bool `json:"readOnly"` - NoSession bool `json:"noSession"` - NoBGJob bool `json:"noBGJob"` - OpenCache float64 `json:"openCache"` - BackupMeta int64 `json:"backupMeta"` - BackupSkipTrash bool `json:"backupSkipTrash"` - Heartbeat int `json:"heartbeat"` - CacheDir string `json:"cacheDir"` - CacheSize int64 `json:"cacheSize"` - FreeSpace string `json:"freeSpace"` - AutoCreate bool `json:"autoCreate"` - CacheFullBlock bool `json:"cacheFullBlock"` - CacheChecksum string `json:"cacheChecksum"` - CacheEviction string `json:"cacheEviction"` - CacheScanInterval int `json:"cacheScanInterval"` - CacheExpire int64 `json:"cacheExpire"` - Writeback bool `json:"writeback"` - MemorySize int `json:"memorySize"` - Prefetch int `json:"prefetch"` - Readahead int `json:"readahead"` - UploadLimit int `json:"uploadLimit"` - DownloadLimit int `json:"downloadLimit"` - MaxUploads int `json:"maxUploads"` - MaxDeletes int `json:"maxDeletes"` - SkipDirNlink int `json:"skipDirNlink"` - IORetries int `json:"ioRetries"` - GetTimeout int `json:"getTimeout"` - PutTimeout int `json:"putTimeout"` - FastResolve bool `json:"fastResolve"` - AttrTimeout float64 `json:"attrTimeout"` - EntryTimeout float64 `json:"entryTimeout"` - DirEntryTimeout float64 `json:"dirEntryTimeout"` - Debug bool `json:"debug"` - NoUsageReport bool `json:"noUsageReport"` - AccessLog string `json:"accessLog"` - PushGateway string `json:"pushGateway"` - PushInterval int `json:"pushInterval"` - PushAuth string `json:"pushAuth"` - PushLabels string `json:"pushLabels"` - PushGraphite string `json:"pushGraphite"` + MetaURL string `json:"meta"` + Bucket string `json:"bucket"` + StorageClass string `json:"storageClass"` + ReadOnly bool `json:"readOnly"` + NoSession bool `json:"noSession"` + NoBGJob bool `json:"noBGJob"` + OpenCache string `json:"openCache"` + BackupMeta string `json:"backupMeta"` + BackupSkipTrash bool `json:"backupSkipTrash"` + Heartbeat string `json:"heartbeat"` + CacheDir string `json:"cacheDir"` + CacheSize string `json:"cacheSize"` + FreeSpace string `json:"freeSpace"` + AutoCreate bool `json:"autoCreate"` + CacheFullBlock bool `json:"cacheFullBlock"` + CacheChecksum string `json:"cacheChecksum"` + CacheEviction string `json:"cacheEviction"` + CacheScanInterval string `json:"cacheScanInterval"` + CacheExpire string `json:"cacheExpire"` + Writeback bool `json:"writeback"` + MemorySize string `json:"memorySize"` + Prefetch int `json:"prefetch"` + Readahead string `json:"readahead"` + UploadLimit string `json:"uploadLimit"` + DownloadLimit string `json:"downloadLimit"` + MaxUploads int `json:"maxUploads"` + MaxDeletes int `json:"maxDeletes"` + SkipDirNlink int `json:"skipDirNlink"` + SkipDirMtime string `json:"skipDirMtime"` + IORetries int `json:"ioRetries"` + GetTimeout string `json:"getTimeout"` + PutTimeout string `json:"putTimeout"` + FastResolve bool `json:"fastResolve"` + AttrTimeout string `json:"attrTimeout"` + EntryTimeout string `json:"entryTimeout"` + DirEntryTimeout string `json:"dirEntryTimeout"` + Debug bool `json:"debug"` + NoUsageReport bool `json:"noUsageReport"` + AccessLog string `json:"accessLog"` + PushGateway string `json:"pushGateway"` + PushInterval string `json:"pushInterval"` + PushAuth string `json:"pushAuth"` + PushLabels string `json:"pushLabels"` + PushGraphite string `json:"pushGraphite"` } func getOrCreate(name, user, group, superuser, supergroup string, f func() *fs.FileSystem) int64 { @@ -441,10 +442,11 @@ func jfs_init(cname, jsonConf, user, group, superuser, supergroup *C.char) int64 metaConf.Retries = jConf.IORetries metaConf.MaxDeletes = jConf.MaxDeletes metaConf.SkipDirNlink = jConf.SkipDirNlink + metaConf.SkipDirMtime = utils.Duration(jConf.SkipDirMtime) metaConf.ReadOnly = jConf.ReadOnly metaConf.NoBGJob = jConf.NoBGJob || jConf.NoSession - metaConf.OpenCache = time.Duration(jConf.OpenCache * 1e9) - metaConf.Heartbeat = time.Second * time.Duration(jConf.Heartbeat) + metaConf.OpenCache = utils.Duration(jConf.OpenCache) + metaConf.Heartbeat = utils.Duration(jConf.Heartbeat) m := meta.NewClient(jConf.MetaURL, metaConf) format, err := m.Load(true) if err != nil { @@ -477,10 +479,7 @@ func jfs_init(cname, jsonConf, user, group, superuser, supergroup *C.char) int64 registerer.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) registerer.MustRegister(collectors.NewGoCollector()) - var interval time.Duration - if jConf.PushInterval > 0 { - interval = time.Second * time.Duration(jConf.PushInterval) - } + var interval = utils.Duration(jConf.PushInterval) if jConf.PushGraphite != "" { push2Graphite(jConf.PushGraphite, interval, registry, commonLabels) } @@ -515,25 +514,25 @@ func jfs_init(cname, jsonConf, user, group, superuser, supergroup *C.char) int64 Compress: format.Compression, CacheDir: jConf.CacheDir, CacheMode: 0644, // all user can read cache - CacheSize: uint64(jConf.CacheSize << 20), + CacheSize: utils.ParseBytesStr("cache-size", jConf.CacheSize, 'M'), FreeSpace: float32(freeSpaceRatio), AutoCreate: jConf.AutoCreate, CacheFullBlock: jConf.CacheFullBlock, CacheChecksum: jConf.CacheChecksum, CacheEviction: jConf.CacheEviction, - CacheScanInterval: time.Second * time.Duration(jConf.CacheScanInterval), - CacheExpire: time.Second * time.Duration(jConf.CacheExpire), + CacheScanInterval: utils.Duration(jConf.CacheScanInterval), + CacheExpire: utils.Duration(jConf.CacheExpire), MaxUpload: jConf.MaxUploads, MaxRetries: jConf.IORetries, - UploadLimit: int64(jConf.UploadLimit) * 1e6 / 8, - DownloadLimit: int64(jConf.DownloadLimit) * 1e6 / 8, + UploadLimit: utils.ParseMbpsStr("upload-limit", jConf.UploadLimit), + DownloadLimit: utils.ParseMbpsStr("download-limit", jConf.DownloadLimit), Prefetch: jConf.Prefetch, Writeback: jConf.Writeback, HashPrefix: format.HashPrefix, - GetTimeout: time.Second * time.Duration(jConf.GetTimeout), - PutTimeout: time.Second * time.Duration(jConf.PutTimeout), - BufferSize: uint64(jConf.MemorySize << 20), - Readahead: jConf.Readahead << 20, + GetTimeout: utils.Duration(jConf.GetTimeout), + PutTimeout: utils.Duration(jConf.PutTimeout), + BufferSize: utils.ParseBytesStr("memory-size", jConf.MemorySize, 'M'), + Readahead: int(utils.ParseBytesStr("max-readahead", jConf.Readahead, 'M')), } if chunkConf.UploadLimit == 0 { chunkConf.UploadLimit = format.UploadLimit * 1e6 / 8 @@ -559,11 +558,11 @@ func jfs_init(cname, jsonConf, user, group, superuser, supergroup *C.char) int64 return nil } m.OnReload(func(fmt *meta.Format) { - if jConf.UploadLimit > 0 { - fmt.UploadLimit = int64(jConf.UploadLimit) + if chunkConf.UploadLimit > 0 { + fmt.UploadLimit = chunkConf.UploadLimit } - if jConf.DownloadLimit > 0 { - fmt.DownloadLimit = int64(jConf.DownloadLimit) + if chunkConf.DownloadLimit > 0 { + fmt.DownloadLimit = chunkConf.DownloadLimit } store.UpdateLimit(fmt.UploadLimit, fmt.DownloadLimit) }) @@ -572,12 +571,12 @@ func jfs_init(cname, jsonConf, user, group, superuser, supergroup *C.char) int64 Meta: metaConf, Format: *format, Chunk: &chunkConf, - AttrTimeout: time.Millisecond * time.Duration(jConf.AttrTimeout*1000), - EntryTimeout: time.Millisecond * time.Duration(jConf.EntryTimeout*1000), - DirEntryTimeout: time.Millisecond * time.Duration(jConf.DirEntryTimeout*1000), + AttrTimeout: utils.Duration(jConf.AttrTimeout), + EntryTimeout: utils.Duration(jConf.EntryTimeout), + DirEntryTimeout: utils.Duration(jConf.DirEntryTimeout), AccessLog: jConf.AccessLog, FastResolve: jConf.FastResolve, - BackupMeta: time.Second * time.Duration(jConf.BackupMeta), + BackupMeta: utils.Duration(jConf.BackupMeta), BackupSkipTrash: jConf.BackupSkipTrash, } if !jConf.ReadOnly && !jConf.NoSession && !jConf.NoBGJob && conf.BackupMeta > 0 { diff --git a/sdk/java/src/main/java/io/juicefs/JuiceFileSystemImpl.java b/sdk/java/src/main/java/io/juicefs/JuiceFileSystemImpl.java index 374cb8f79ae7..af6b9eb7091a 100644 --- a/sdk/java/src/main/java/io/juicefs/JuiceFileSystemImpl.java +++ b/sdk/java/src/main/java/io/juicefs/JuiceFileSystemImpl.java @@ -372,34 +372,34 @@ public void initialize(URI uri, Configuration conf) throws IOException { obj.put("noSession", Boolean.valueOf(getConf(conf, "no-session", "false"))); obj.put("noBGJob", Boolean.valueOf(getConf(conf, "no-bgjob", "false"))); obj.put("cacheDir", getConf(conf, "cache-dir", "memory")); - obj.put("cacheSize", Integer.valueOf(getConf(conf, "cache-size", "100"))); - obj.put("openCache", Float.valueOf(getConf(conf, "open-cache", "0.0"))); - obj.put("backupMeta", Integer.valueOf(getConf(conf, "backup-meta", "3600"))); + obj.put("cacheSize", getConf(conf, "cache-size", "100")); + obj.put("openCache", getConf(conf, "open-cache", "0.0")); + obj.put("backupMeta", getConf(conf, "backup-meta", "3600")); obj.put("backupSkipTrash", Boolean.valueOf(getConf(conf, "backup-skip-trash", "false"))); - obj.put("heartbeat", Integer.valueOf(getConf(conf, "heartbeat", "12"))); - obj.put("attrTimeout", Float.valueOf(getConf(conf, "attr-cache", "0.0"))); - obj.put("entryTimeout", Float.valueOf(getConf(conf, "entry-cache", "0.0"))); - obj.put("dirEntryTimeout", Float.valueOf(getConf(conf, "dir-entry-cache", "0.0"))); + obj.put("heartbeat", getConf(conf, "heartbeat", "12")); + obj.put("attrTimeout", getConf(conf, "attr-cache", "0.0")); + obj.put("entryTimeout", getConf(conf, "entry-cache", "0.0")); + obj.put("dirEntryTimeout", getConf(conf, "dir-entry-cache", "0.0")); obj.put("cacheFullBlock", Boolean.valueOf(getConf(conf, "cache-full-block", "true"))); obj.put("cacheChecksum", getConf(conf, "verify-cache-checksum", "full")); obj.put("cacheEviction", getConf(conf, "cache-eviction", "2-random")); - obj.put("cacheScanInterval", Integer.valueOf(getConf(conf, "cache-scan-interval", "300"))); - obj.put("cacheExpire", Integer.valueOf(getConf(conf, "cache-expire", "0"))); - obj.put("metacache", Boolean.valueOf(getConf(conf, "metacache", "true"))); + obj.put("cacheScanInterval", getConf(conf, "cache-scan-interval", "300")); + obj.put("cacheExpire", getConf(conf, "cache-expire", "0")); obj.put("autoCreate", Boolean.valueOf(getConf(conf, "auto-create-cache-dir", "true"))); obj.put("maxUploads", Integer.valueOf(getConf(conf, "max-uploads", "20"))); obj.put("maxDeletes", Integer.valueOf(getConf(conf, "max-deletes", "10"))); obj.put("skipDirNlink", Integer.valueOf(getConf(conf, "skip-dir-nlink", "20"))); - obj.put("uploadLimit", Integer.valueOf(getConf(conf, "upload-limit", "0"))); - obj.put("downloadLimit", Integer.valueOf(getConf(conf, "download-limit", "0"))); + obj.put("skipDirMtime", getConf(conf, "skip-dir-mtime", "100ms")); + obj.put("uploadLimit", getConf(conf, "upload-limit", "0")); + obj.put("downloadLimit", getConf(conf, "download-limit", "0")); obj.put("ioRetries", Integer.valueOf(getConf(conf, "io-retries", "10"))); - obj.put("getTimeout", Integer.valueOf(getConf(conf, "get-timeout", getConf(conf, "object-timeout", "5")))); - obj.put("putTimeout", Integer.valueOf(getConf(conf, "put-timeout", getConf(conf, "object-timeout", "60")))); - obj.put("memorySize", Integer.valueOf(getConf(conf, "memory-size", "300"))); + obj.put("getTimeout", getConf(conf, "get-timeout", getConf(conf, "object-timeout", "5"))); + obj.put("putTimeout", getConf(conf, "put-timeout", getConf(conf, "object-timeout", "60"))); + obj.put("memorySize", getConf(conf, "memory-size", "300")); obj.put("prefetch", Integer.valueOf(getConf(conf, "prefetch", "1"))); - obj.put("readahead", Integer.valueOf(getConf(conf, "max-readahead", "0"))); + obj.put("readahead", getConf(conf, "max-readahead", "0")); obj.put("pushGateway", getConf(conf, "push-gateway", "")); - obj.put("pushInterval", Integer.valueOf(getConf(conf, "push-interval", "10"))); + obj.put("pushInterval", getConf(conf, "push-interval", "10")); obj.put("pushAuth", getConf(conf, "push-auth", "")); obj.put("pushLabels", getConf(conf, "push-labels", "")); obj.put("pushGraphite", getConf(conf, "push-graphite", "")); diff --git a/sdk/java/src/main/java/io/juicefs/bench/TestDFSIO.java b/sdk/java/src/main/java/io/juicefs/bench/TestDFSIO.java index f8385b12cac3..752fb9b92d48 100644 --- a/sdk/java/src/main/java/io/juicefs/bench/TestDFSIO.java +++ b/sdk/java/src/main/java/io/juicefs/bench/TestDFSIO.java @@ -584,6 +584,7 @@ private void runIOTest( Class> mapperClass, Path outputDir) throws IOException { JobConf job = new JobConf(config, TestDFSIO.class); + job.setBoolean("mapreduce.output.fileoutputformat.compress", false); FileInputFormat.setInputPaths(job, getControlDir(config)); job.setInputFormat(SequenceFileInputFormat.class);