From bb58fef431fbdafb839c5279f589590e5e0e07d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 20:25:47 +0000 Subject: [PATCH] Bump github.com/pkg/sftp in /src/code.cloudfoundry.org Bumps [github.com/pkg/sftp](https://github.com/pkg/sftp) from 1.13.0 to 1.13.5. - [Release notes](https://github.com/pkg/sftp/releases) - [Commits](https://github.com/pkg/sftp/compare/v1.13.0...v1.13.5) --- updated-dependencies: - dependency-name: github.com/pkg/sftp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- src/code.cloudfoundry.org/go.mod | 2 +- src/code.cloudfoundry.org/go.sum | 10 +- .../vendor/github.com/pkg/sftp/.gitignore | 3 + .../vendor/github.com/pkg/sftp/Makefile | 10 + .../vendor/github.com/pkg/sftp/attrs.go | 113 +---- .../vendor/github.com/pkg/sftp/attrs_stubs.go | 2 +- .../vendor/github.com/pkg/sftp/attrs_unix.go | 3 +- .../vendor/github.com/pkg/sftp/client.go | 451 ++++++++++++------ .../vendor/github.com/pkg/sftp/conn.go | 16 +- .../vendor/github.com/pkg/sftp/fuzz.go | 8 +- .../internal/encoding/ssh/filexfer/attrs.go | 325 +++++++++++++ .../internal/encoding/ssh/filexfer/buffer.go | 293 ++++++++++++ .../encoding/ssh/filexfer/extended_packets.go | 142 ++++++ .../encoding/ssh/filexfer/extensions.go | 46 ++ .../encoding/ssh/filexfer/filexfer.go | 54 +++ .../sftp/internal/encoding/ssh/filexfer/fx.go | 147 ++++++ .../internal/encoding/ssh/filexfer/fxp.go | 124 +++++ .../encoding/ssh/filexfer/handle_packets.go | 249 ++++++++++ .../encoding/ssh/filexfer/init_packets.go | 99 ++++ .../encoding/ssh/filexfer/open_packets.go | 89 ++++ .../internal/encoding/ssh/filexfer/packets.go | 323 +++++++++++++ .../encoding/ssh/filexfer/path_packets.go | 368 ++++++++++++++ .../encoding/ssh/filexfer/permissions.go | 114 +++++ .../encoding/ssh/filexfer/response_packets.go | 243 ++++++++++ .../github.com/pkg/sftp/ls_formatting.go | 81 ++++ .../vendor/github.com/pkg/sftp/ls_plan9.go | 21 + .../vendor/github.com/pkg/sftp/ls_stub.go | 11 + .../vendor/github.com/pkg/sftp/ls_unix.go | 23 + .../vendor/github.com/pkg/sftp/match.go | 200 +------- .../github.com/pkg/sftp/packet-typing.go | 5 +- .../vendor/github.com/pkg/sftp/packet.go | 124 ++++- .../vendor/github.com/pkg/sftp/pool.go | 79 +++ .../github.com/pkg/sftp/request-attrs.go | 2 +- .../github.com/pkg/sftp/request-errors.go | 14 +- .../github.com/pkg/sftp/request-example.go | 13 +- .../github.com/pkg/sftp/request-interfaces.go | 19 + .../github.com/pkg/sftp/request-plan9.go | 23 +- .../github.com/pkg/sftp/request-server.go | 127 +++-- .../github.com/pkg/sftp/request-unix.go | 4 + .../vendor/github.com/pkg/sftp/request.go | 341 ++++++++----- .../github.com/pkg/sftp/request_windows.go | 35 +- .../vendor/github.com/pkg/sftp/server.go | 134 +----- .../pkg/sftp/server_statvfs_impl.go | 1 + .../github.com/pkg/sftp/server_stubs.go | 32 -- .../vendor/github.com/pkg/sftp/server_unix.go | 54 --- .../vendor/github.com/pkg/sftp/sftp.go | 16 +- .../vendor/github.com/pkg/sftp/stat_plan9.go | 49 +- .../vendor/github.com/pkg/sftp/stat_posix.go | 64 +-- src/code.cloudfoundry.org/vendor/modules.txt | 5 +- 49 files changed, 3817 insertions(+), 894 deletions(-) create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/ls_formatting.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/ls_plan9.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/ls_stub.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/ls_unix.go create mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/pool.go delete mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_stubs.go delete mode 100644 src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_unix.go diff --git a/src/code.cloudfoundry.org/go.mod b/src/code.cloudfoundry.org/go.mod index 1777ac1db3..d346263263 100644 --- a/src/code.cloudfoundry.org/go.mod +++ b/src/code.cloudfoundry.org/go.mod @@ -79,7 +79,7 @@ require ( github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b github.com/pborman/getopt v1.1.0 github.com/pkg/errors v0.9.1 - github.com/pkg/sftp v1.13.0 + github.com/pkg/sftp v1.13.5 github.com/spf13/cobra v1.4.0 github.com/square/certstrap v1.3.0 github.com/tedsuo/ifrit v0.0.0-20230330192023-5cba443a66c4 diff --git a/src/code.cloudfoundry.org/go.sum b/src/code.cloudfoundry.org/go.sum index 7aa4171746..0c2242cc5d 100644 --- a/src/code.cloudfoundry.org/go.sum +++ b/src/code.cloudfoundry.org/go.sum @@ -369,8 +369,8 @@ github.com/pborman/getopt v1.1.0 h1:eJ3aFZroQqq0bWmraivjQNt6Dmm5M0h2JcDW38/Azb0= github.com/pborman/getopt v1.1.0/go.mod h1:FxXoW1Re00sQG/+KIkuSqRL/LwQgSkv7uyac+STFsbk= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.0 h1:Riw6pgOKK41foc1I1Uu03CjvbLZDXeGpInycM4shXoI= -github.com/pkg/sftp v1.13.0/go.mod h1:41g+FIPlQUTDCveupEmEA65IoiQFrtgCeDopC4ajGIM= +github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= +github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:x1vqpbcMW9T/KRcQ4b48diSiSVtYgvwQ5xzDByEg4WE= @@ -449,9 +449,9 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -516,6 +516,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -578,7 +579,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -588,6 +588,7 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -597,7 +598,6 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/.gitignore b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/.gitignore index e1ec837c3f..caf2dca22e 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/.gitignore +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/.gitignore @@ -5,3 +5,6 @@ server_standalone/server_standalone examples/*/id_rsa examples/*/id_rsa.pub + +memprofile.out +memprofile.svg diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/Makefile b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/Makefile index 0afad58491..4d3a00799d 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/Makefile +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/Makefile @@ -1,3 +1,5 @@ +.PHONY: integration integration_w_race benchmark + integration: go test -integration -v ./... go test -testserver -v ./... @@ -14,4 +16,12 @@ integration_w_race: go test -race -testserver -allocator -v ./... go test -race -integration -allocator -testserver -v ./... +COUNT ?= 1 +BENCHMARK_PATTERN ?= "." + +benchmark: + go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) +benchmark_w_memprofile: + go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) -memprofile memprofile.out + go tool pprof -svg -output=memprofile.svg memprofile.out diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs.go index 9097a4d897..2bb2d57645 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs.go @@ -13,37 +13,34 @@ const ( sshFileXferAttrUIDGID = 0x00000002 sshFileXferAttrPermissions = 0x00000004 sshFileXferAttrACmodTime = 0x00000008 - sshFileXferAttrExtented = 0x80000000 + sshFileXferAttrExtended = 0x80000000 sshFileXferAttrAll = sshFileXferAttrSize | sshFileXferAttrUIDGID | sshFileXferAttrPermissions | - sshFileXferAttrACmodTime | sshFileXferAttrExtented + sshFileXferAttrACmodTime | sshFileXferAttrExtended ) // fileInfo is an artificial type designed to satisfy os.FileInfo. type fileInfo struct { - name string - size int64 - mode os.FileMode - mtime time.Time - sys interface{} + name string + stat *FileStat } // Name returns the base name of the file. func (fi *fileInfo) Name() string { return fi.name } // Size returns the length in bytes for regular files; system-dependent for others. -func (fi *fileInfo) Size() int64 { return fi.size } +func (fi *fileInfo) Size() int64 { return int64(fi.stat.Size) } // Mode returns file mode bits. -func (fi *fileInfo) Mode() os.FileMode { return fi.mode } +func (fi *fileInfo) Mode() os.FileMode { return toFileMode(fi.stat.Mode) } // ModTime returns the last modification time of the file. -func (fi *fileInfo) ModTime() time.Time { return fi.mtime } +func (fi *fileInfo) ModTime() time.Time { return time.Unix(int64(fi.stat.Mtime), 0) } // IsDir returns true if the file is a directory. func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } -func (fi *fileInfo) Sys() interface{} { return fi.sys } +func (fi *fileInfo) Sys() interface{} { return fi.stat } // FileStat holds the original unmarshalled values from a call to READDIR or // *STAT. It is exported for the purposes of accessing the raw values via @@ -65,25 +62,21 @@ type StatExtended struct { ExtData string } -func fileInfoFromStat(st *FileStat, name string) os.FileInfo { - fs := &fileInfo{ - name: name, - size: int64(st.Size), - mode: toFileMode(st.Mode), - mtime: time.Unix(int64(st.Mtime), 0), - sys: st, +func fileInfoFromStat(stat *FileStat, name string) os.FileInfo { + return &fileInfo{ + name: name, + stat: stat, } - return fs } -func fileStatFromInfo(fi os.FileInfo) (uint32, FileStat) { +func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) { mtime := fi.ModTime().Unix() atime := mtime var flags uint32 = sshFileXferAttrSize | sshFileXferAttrPermissions | sshFileXferAttrACmodTime - fileStat := FileStat{ + fileStat := &FileStat{ Size: uint64(fi.Size()), Mode: fromFileMode(fi.Mode()), Mtime: uint32(mtime), @@ -91,83 +84,7 @@ func fileStatFromInfo(fi os.FileInfo) (uint32, FileStat) { } // os specific file stat decoding - fileStatFromInfoOs(fi, &flags, &fileStat) + fileStatFromInfoOs(fi, &flags, fileStat) return flags, fileStat } - -func unmarshalAttrs(b []byte) (*FileStat, []byte) { - flags, b := unmarshalUint32(b) - return getFileStat(flags, b) -} - -func getFileStat(flags uint32, b []byte) (*FileStat, []byte) { - var fs FileStat - if flags&sshFileXferAttrSize == sshFileXferAttrSize { - fs.Size, b, _ = unmarshalUint64Safe(b) - } - if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { - fs.UID, b, _ = unmarshalUint32Safe(b) - } - if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { - fs.GID, b, _ = unmarshalUint32Safe(b) - } - if flags&sshFileXferAttrPermissions == sshFileXferAttrPermissions { - fs.Mode, b, _ = unmarshalUint32Safe(b) - } - if flags&sshFileXferAttrACmodTime == sshFileXferAttrACmodTime { - fs.Atime, b, _ = unmarshalUint32Safe(b) - fs.Mtime, b, _ = unmarshalUint32Safe(b) - } - if flags&sshFileXferAttrExtented == sshFileXferAttrExtented { - var count uint32 - count, b, _ = unmarshalUint32Safe(b) - ext := make([]StatExtended, count) - for i := uint32(0); i < count; i++ { - var typ string - var data string - typ, b, _ = unmarshalStringSafe(b) - data, b, _ = unmarshalStringSafe(b) - ext[i] = StatExtended{typ, data} - } - fs.Extended = ext - } - return &fs, b -} - -func marshalFileInfo(b []byte, fi os.FileInfo) []byte { - // attributes variable struct, and also variable per protocol version - // spec version 3 attributes: - // uint32 flags - // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE - // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID - // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID - // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS - // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME - // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME - // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED - // string extended_type - // string extended_data - // ... more extended data (extended_type - extended_data pairs), - // so that number of pairs equals extended_count - - flags, fileStat := fileStatFromInfo(fi) - - b = marshalUint32(b, flags) - if flags&sshFileXferAttrSize != 0 { - b = marshalUint64(b, fileStat.Size) - } - if flags&sshFileXferAttrUIDGID != 0 { - b = marshalUint32(b, fileStat.UID) - b = marshalUint32(b, fileStat.GID) - } - if flags&sshFileXferAttrPermissions != 0 { - b = marshalUint32(b, fileStat.Mode) - } - if flags&sshFileXferAttrACmodTime != 0 { - b = marshalUint32(b, fileStat.Atime) - b = marshalUint32(b, fileStat.Mtime) - } - - return b -} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs_stubs.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs_stubs.go index ba72e30fb1..c01f33677e 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs_stubs.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs_stubs.go @@ -1,4 +1,4 @@ -// +build !cgo plan9 windows android +// +build plan9 windows android package sftp diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs_unix.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs_unix.go index 846b2086dd..d1f4452415 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs_unix.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/attrs_unix.go @@ -1,5 +1,4 @@ -// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix -// +build cgo +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js package sftp diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/client.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/client.go index 81b2d6c1c9..9e0b61645e 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/client.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/client.go @@ -3,6 +3,7 @@ package sftp import ( "bytes" "encoding/binary" + "errors" "io" "math" "os" @@ -13,7 +14,6 @@ import ( "time" "github.com/kr/fs" - "github.com/pkg/errors" "golang.org/x/crypto/ssh" ) @@ -43,10 +43,10 @@ type ClientOption func(*Client) error func MaxPacketChecked(size int) ClientOption { return func(c *Client) error { if size < 1 { - return errors.Errorf("size must be greater or equal to 1") + return errors.New("size must be greater or equal to 1") } if size > 32768 { - return errors.Errorf("sizes larger than 32KB might not work with all servers") + return errors.New("sizes larger than 32KB might not work with all servers") } c.maxPacket = size return nil @@ -65,7 +65,7 @@ func MaxPacketChecked(size int) ClientOption { func MaxPacketUnchecked(size int) ClientOption { return func(c *Client) error { if size < 1 { - return errors.Errorf("size must be greater or equal to 1") + return errors.New("size must be greater or equal to 1") } c.maxPacket = size return nil @@ -90,7 +90,7 @@ func MaxPacket(size int) ClientOption { func MaxConcurrentRequestsPerFile(n int) ClientOption { return func(c *Client) error { if n < 1 { - return errors.Errorf("n must be greater or equal to 1") + return errors.New("n must be greater or equal to 1") } c.maxConcurrentRequests = n return nil @@ -273,7 +273,10 @@ func (c *Client) recvVersion() error { return &unexpectedPacketErr{sshFxpVersion, typ} } - version, data := unmarshalUint32(data) + version, data, err := unmarshalUint32Safe(data) + if err != nil { + return err + } if version != sftpProtocolVersion { return &unexpectedVersionErr{sftpProtocolVersion, version} } @@ -384,27 +387,11 @@ func (c *Client) opendir(path string) (string, error) { // Stat returns a FileInfo structure describing the file specified by path 'p'. // If 'p' is a symbolic link, the returned FileInfo structure describes the referent file. func (c *Client) Stat(p string) (os.FileInfo, error) { - id := c.nextID() - typ, data, err := c.sendPacket(nil, &sshFxpStatPacket{ - ID: id, - Path: p, - }) + fs, err := c.stat(p) if err != nil { return nil, err } - switch typ { - case sshFxpAttrs: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return fileInfoFromStat(attr, path.Base(p)), nil - case sshFxpStatus: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } + return fileInfoFromStat(fs, path.Base(p)), nil } // Lstat returns a FileInfo structure describing the file specified by path 'p'. @@ -560,8 +547,12 @@ func (c *Client) Chown(path string, uid, gid int) error { } // Chmod changes the permissions of the named file. +// +// Chmod does not apply a umask, because even retrieving the umask is not +// possible in a portable way without causing a race condition. Callers +// should mask off umask bits, if desired. func (c *Client) Chmod(path string, mode os.FileMode) error { - return c.setstat(path, sshFileXferAttrPermissions, uint32(mode)) + return c.setstat(path, sshFileXferAttrPermissions, toChmodPerm(mode)) } // Truncate sets the size of the named file. Although it may be safely assumed @@ -631,6 +622,30 @@ func (c *Client) close(handle string) error { } } +func (c *Client) stat(path string) (*FileStat, error) { + id := c.nextID() + typ, data, err := c.sendPacket(nil, &sshFxpStatPacket{ + ID: id, + Path: path, + }) + if err != nil { + return nil, err + } + switch typ { + case sshFxpAttrs: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return attr, nil + case sshFxpStatus: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + func (c *Client) fstat(handle string) (*FileStat, error) { id := c.nextID() typ, data, err := c.sendPacket(nil, &sshFxpFstatPacket{ @@ -864,12 +879,12 @@ func (c *Client) MkdirAll(path string) error { // Slow path: make sure parent exists and then call Mkdir for path. i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + for i > 0 && path[i-1] == '/' { // Skip trailing path separator. i-- } j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + for j > 0 && path[j-1] != '/' { // Scan backward over element. j-- } @@ -984,9 +999,6 @@ func (f *File) readAtSequential(b []byte, off int64) (read int, err error) { read += n } if err != nil { - if errors.Is(err, io.EOF) { - return read, nil // return nil explicitly. - } return read, err } } @@ -1013,7 +1025,17 @@ func (f *File) ReadAt(b []byte, off int64) (int, error) { cancel := make(chan struct{}) + concurrency := len(b)/f.c.maxPacket + 1 + if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { + concurrency = f.c.maxConcurrentRequests + } + + resPool := newResChanPool(concurrency) + type work struct { + id uint32 + res chan result + b []byte off int64 } @@ -1033,8 +1055,18 @@ func (f *File) ReadAt(b []byte, off int64) (int, error) { rb = rb[:chunkSize] } + id := f.c.nextID() + res := resPool.Get() + + f.c.dispatchRequest(res, &sshFxpReadPacket{ + ID: id, + Handle: f.handle, + Offset: uint64(offset), + Len: uint32(chunkSize), + }) + select { - case workCh <- work{rb, offset}: + case workCh <- work{id, res, rb, offset}: case <-cancel: return } @@ -1050,11 +1082,6 @@ func (f *File) ReadAt(b []byte, off int64) (int, error) { } errCh := make(chan rErr) - concurrency := len(b)/f.c.maxPacket + 1 - if concurrency > f.c.maxConcurrentRequests { - concurrency = f.c.maxConcurrentRequests - } - var wg sync.WaitGroup wg.Add(concurrency) for i := 0; i < concurrency; i++ { @@ -1062,10 +1089,40 @@ func (f *File) ReadAt(b []byte, off int64) (int, error) { go func() { defer wg.Done() - ch := make(chan result, 1) // reusable channel per mapper. - for packet := range workCh { - n, err := f.readChunkAt(ch, packet.b, packet.off) + var n int + + s := <-packet.res + resPool.Put(packet.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(packet.id, s.data)) + + case sshFxpData: + sid, data := unmarshalUint32(s.data) + if packet.id != sid { + err = &unexpectedIDErr{packet.id, sid} + + } else { + l, data := unmarshalUint32(data) + n = copy(packet.b, data[:l]) + + // For normal disk files, it is guaranteed that this will read + // the specified number of bytes, or up to end of file. + // This implies, if we have a short read, that means EOF. + if n < len(packet.b) { + err = io.EOF + } + } + + default: + err = unimplementedPacketErr(s.typ) + } + } + if err != nil { // return the offset as the start + how much we read before the error. errCh <- rErr{packet.off + int64(n), err} @@ -1119,11 +1176,11 @@ func (f *File) writeToSequential(w io.Writer) (written int64, err error) { if n > 0 { f.offset += int64(n) - m, err2 := w.Write(b[:n]) + m, err := w.Write(b[:n]) written += int64(m) - if err == nil { - err = err2 + if err != nil { + return written, err } } @@ -1153,30 +1210,32 @@ func (f *File) WriteTo(w io.Writer) (written int64, err error) { } // For concurrency, we want to guess how many concurrent workers we should use. - var fileSize uint64 + var fileStat *FileStat if f.c.useFstat { - fileStat, err := f.c.fstat(f.handle) - if err != nil { - return 0, err - } - fileSize = fileStat.Size + fileStat, err = f.c.fstat(f.handle) } else { - fi, err := f.c.Stat(f.path) - if err != nil { - return 0, err - } - fileSize = uint64(fi.Size()) + fileStat, err = f.c.stat(f.path) + } + if err != nil { + return 0, err } - if fileSize <= uint64(f.c.maxPacket) { - // We should be able to handle this in one Read. + fileSize := fileStat.Size + if fileSize <= uint64(f.c.maxPacket) || !isRegular(fileStat.Mode) { + // only regular files are guaranteed to return (full read) xor (partial read, next error) return f.writeToSequential(w) } - concurrency := int(fileSize/uint64(f.c.maxPacket) + 1) // a bad guess, but better than no guess - if concurrency > f.c.maxConcurrentRequests { - concurrency = f.c.maxConcurrentRequests + concurrency64 := fileSize/uint64(f.c.maxPacket) + 1 // a bad guess, but better than no guess + if concurrency64 > uint64(f.c.maxConcurrentRequests) || concurrency64 < 1 { + concurrency64 = uint64(f.c.maxConcurrentRequests) } + // Now that concurrency64 is saturated to an int value, we know this assignment cannot possibly overflow. + concurrency := int(concurrency64) + + chunkSize := f.c.maxPacket + pool := newBufPool(concurrency, chunkSize) + resPool := newResChanPool(concurrency) cancel := make(chan struct{}) var wg sync.WaitGroup @@ -1191,7 +1250,6 @@ func (f *File) WriteTo(w io.Writer) (written int64, err error) { type writeWork struct { b []byte - n int off int64 err error @@ -1200,7 +1258,10 @@ func (f *File) WriteTo(w io.Writer) (written int64, err error) { writeCh := make(chan writeWork) type readWork struct { - off int64 + id uint32 + res chan result + off int64 + cur, next chan writeWork } readCh := make(chan readWork) @@ -1210,53 +1271,78 @@ func (f *File) WriteTo(w io.Writer) (written int64, err error) { defer close(readCh) off := f.offset - chunkSize := int64(f.c.maxPacket) cur := writeCh for { + id := f.c.nextID() + res := resPool.Get() + next := make(chan writeWork) readWork := readWork{ - off: off, + id: id, + res: res, + off: off, + cur: cur, next: next, } + f.c.dispatchRequest(res, &sshFxpReadPacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off), + Len: uint32(chunkSize), + }) + select { case readCh <- readWork: case <-cancel: return } - off += chunkSize + off += int64(chunkSize) cur = next } }() - pool := sync.Pool{ - New: func() interface{} { - return make([]byte, f.c.maxPacket) - }, - } - wg.Add(concurrency) for i := 0; i < concurrency; i++ { // Map_i: each worker gets readWork, and does the Read into a buffer at the given offset. go func() { defer wg.Done() - ch := make(chan result, 1) // reusable channel - for readWork := range readCh { - b := pool.Get().([]byte) - - n, err := f.readChunkAt(ch, b, readWork.off) - if n < 0 { - panic("sftp.File: returned negative count from readChunkAt") + var b []byte + var n int + + s := <-readWork.res + resPool.Put(readWork.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(readWork.id, s.data)) + + case sshFxpData: + sid, data := unmarshalUint32(s.data) + if readWork.id != sid { + err = &unexpectedIDErr{readWork.id, sid} + + } else { + l, data := unmarshalUint32(data) + b = pool.Get()[:l] + n = copy(b, data[:l]) + b = b[:n] + } + + default: + err = unimplementedPacketErr(s.typ) + } } writeWork := writeWork{ b: b, - n: n, off: readWork.off, err: err, @@ -1281,14 +1367,14 @@ func (f *File) WriteTo(w io.Writer) (written int64, err error) { for { packet, ok := <-cur if !ok { - return written, nil + return written, errors.New("sftp.File.WriteTo: unexpectedly closed channel") } // Because writes are serialized, this will always be the last successfully read byte. - f.offset = packet.off + int64(packet.n) + f.offset = packet.off + int64(len(packet.b)) - if packet.n > 0 { - n, err := w.Write(packet.b[:packet.n]) + if len(packet.b) > 0 { + n, err := w.Write(packet.b) written += int64(n) if err != nil { return written, err @@ -1372,11 +1458,20 @@ func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) { cancel := make(chan struct{}) type work struct { - b []byte + id uint32 + res chan result + off int64 } workCh := make(chan work) + concurrency := len(b)/f.c.maxPacket + 1 + if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { + concurrency = f.c.maxConcurrentRequests + } + + pool := newResChanPool(concurrency) + // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. go func() { defer close(workCh) @@ -1390,8 +1485,20 @@ func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) { wb = wb[:chunkSize] } + id := f.c.nextID() + res := pool.Get() + off := off + int64(read) + + f.c.dispatchRequest(res, &sshFxpWritePacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off), + Length: uint32(len(wb)), + Data: wb, + }) + select { - case workCh <- work{wb, off + int64(read)}: + case workCh <- work{id, res, off}: case <-cancel: return } @@ -1406,11 +1513,6 @@ func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) { } errCh := make(chan wErr) - concurrency := len(b)/f.c.maxPacket + 1 - if concurrency > f.c.maxConcurrentRequests { - concurrency = f.c.maxConcurrentRequests - } - var wg sync.WaitGroup wg.Add(concurrency) for i := 0; i < concurrency; i++ { @@ -1418,13 +1520,22 @@ func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) { go func() { defer wg.Done() - ch := make(chan result, 1) // reusable channel per mapper. + for work := range workCh { + s := <-work.res + pool.Put(work.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(work.id, s.data)) + default: + err = unimplementedPacketErr(s.typ) + } + } - for packet := range workCh { - n, err := f.writeChunkAt(ch, packet.b, packet.off) if err != nil { - // return the offset as the start + how much we wrote before the error. - errCh <- wErr{packet.off + int64(n), err} + errCh <- wErr{work.off, err} } } }() @@ -1459,7 +1570,7 @@ func (f *File) writeAtConcurrent(b []byte, off int64) (int, error) { return len(b), nil } -// WriteAt writess up to len(b) byte to the File at a given offset `off`. It returns +// WriteAt writes up to len(b) byte to the File at a given offset `off`. It returns // the number of bytes written and an error, if any. WriteAt follows io.WriterAt semantics, // so the file offset is not altered during the write. func (f *File) WriteAt(b []byte, off int64) (written int, err error) { @@ -1495,8 +1606,13 @@ func (f *File) WriteAt(b []byte, off int64) (written int, err error) { return len(b), nil } -// readFromConcurrent implements ReaderFrom, but works concurrently rather than sequentially. -func (f *File) readFromConcurrent(r io.Reader, remain int64) (read int64, err error) { +// ReadFromWithConcurrency implements ReaderFrom, +// but uses the given concurrency to issue multiple requests at the same time. +// +// Giving a concurrency of less than one will default to the Client’s max concurrency. +// +// Otherwise, the given concurrency will be capped by the Client's max concurrency. +func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64, err error) { // Split the write into multiple maxPacket sized concurrent writes. // This allows writes with a suitably large reader // to transfer data at a much faster rate due to overlapping round trip times. @@ -1504,8 +1620,9 @@ func (f *File) readFromConcurrent(r io.Reader, remain int64) (read int64, err er cancel := make(chan struct{}) type work struct { - b []byte - n int + id uint32 + res chan result + off int64 } workCh := make(chan work) @@ -1516,28 +1633,38 @@ func (f *File) readFromConcurrent(r io.Reader, remain int64) (read int64, err er } errCh := make(chan rwErr) - pool := sync.Pool{ - New: func() interface{} { - return make([]byte, f.c.maxPacket) - }, + if concurrency > f.c.maxConcurrentRequests || concurrency < 1 { + concurrency = f.c.maxConcurrentRequests } + pool := newResChanPool(concurrency) + // Slice: cut up the Read into any number of buffers of length <= f.c.maxPacket, and at appropriate offsets. go func() { defer close(workCh) + b := make([]byte, f.c.maxPacket) off := f.offset for { - b := pool.Get().([]byte) - n, err := r.Read(b) + if n > 0 { read += int64(n) + id := f.c.nextID() + res := pool.Get() + + f.c.dispatchRequest(res, &sshFxpWritePacket{ + ID: id, + Handle: f.handle, + Offset: uint64(off), + Length: uint32(n), + Data: b, + }) + select { - case workCh <- work{b, n, off}: - // We need the pool.Put(b) to put the whole slice, not just trunced. + case workCh <- work{id, res, off}: case <-cancel: return } @@ -1554,11 +1681,6 @@ func (f *File) readFromConcurrent(r io.Reader, remain int64) (read int64, err er } }() - concurrency := int(remain/int64(f.c.maxPacket) + 1) // a bad guess, but better than no guess - if concurrency > f.c.maxConcurrentRequests { - concurrency = f.c.maxConcurrentRequests - } - var wg sync.WaitGroup wg.Add(concurrency) for i := 0; i < concurrency; i++ { @@ -1566,15 +1688,23 @@ func (f *File) readFromConcurrent(r io.Reader, remain int64) (read int64, err er go func() { defer wg.Done() - ch := make(chan result, 1) // reusable channel per mapper. + for work := range workCh { + s := <-work.res + pool.Put(work.res) + + err := s.err + if err == nil { + switch s.typ { + case sshFxpStatus: + err = normaliseError(unmarshalStatus(work.id, s.data)) + default: + err = unimplementedPacketErr(s.typ) + } + } - for packet := range workCh { - n, err := f.writeChunkAt(ch, packet.b[:packet.n], packet.off) if err != nil { - // return the offset as the start + how much we wrote before the error. - errCh <- rwErr{packet.off + int64(n), err} + errCh <- rwErr{work.off, err} } - pool.Put(packet.b) } }() } @@ -1607,7 +1737,7 @@ func (f *File) readFromConcurrent(r io.Reader, remain int64) (read int64, err er // * the offset of the first error from writing, // * the last successfully read offset. // - // This could be less than the last succesfully written offset, + // This could be less than the last successfully written offset, // which is the whole reason for the UseConcurrentWrites() ClientOption. // // Callers are responsible for truncating any SFTP files to a safe length. @@ -1638,17 +1768,37 @@ func (f *File) ReadFrom(r io.Reader) (int64, error) { case interface{ Len() int }: remain = int64(r.Len()) + case interface{ Size() int64 }: + remain = r.Size() + case *io.LimitedReader: remain = r.N - case *os.File: - // For files, always presume max concurrency. - remain = math.MaxInt64 + case interface{ Stat() (os.FileInfo, error) }: + info, err := r.Stat() + if err == nil { + remain = info.Size() + } + } + + if remain < 0 { + // We can strongly assert that we want default max concurrency here. + return f.ReadFromWithConcurrency(r, f.c.maxConcurrentRequests) } if remain > int64(f.c.maxPacket) { - // Only use concurrency, if it would be at least two read/writes. - return f.readFromConcurrent(r, remain) + // Otherwise, only use concurrency, if it would be at least two packets. + + // This is the best reasonable guess we can make. + concurrency64 := remain/int64(f.c.maxPacket) + 1 + + // We need to cap this value to an `int` size value to avoid overflow on 32-bit machines. + // So, we may as well pre-cap it to `f.c.maxConcurrentRequests`. + if concurrency64 > int64(f.c.maxConcurrentRequests) { + concurrency64 = int64(f.c.maxConcurrentRequests) + } + + return f.ReadFromWithConcurrency(r, int(concurrency64)) } } @@ -1719,8 +1869,10 @@ func (f *File) Chown(uid, gid int) error { } // Chmod changes the permissions of the current file. +// +// See Client.Chmod for details. func (f *File) Chmod(mode os.FileMode) error { - return f.c.Chmod(f.path, mode) + return f.c.setfstat(f.handle, sshFileXferAttrPermissions, toChmodPerm(mode)) } // Sync requests a flush of the contents of a File to stable storage. @@ -1752,13 +1904,6 @@ func (f *File) Truncate(size int64) error { return f.c.setfstat(f.handle, sshFileXferAttrSize, uint64(size)) } -func min(a, b int) int { - if a > b { - return b - } - return a -} - // normaliseError normalises an error into a more standard form that can be // checked against stdlib errors like io.EOF or os.ErrNotExist. func normaliseError(err error) error { @@ -1781,28 +1926,6 @@ func normaliseError(err error) error { } } -func unmarshalStatus(id uint32, data []byte) error { - sid, data := unmarshalUint32(data) - if sid != id { - return &unexpectedIDErr{id, sid} - } - code, data := unmarshalUint32(data) - msg, data, _ := unmarshalStringSafe(data) - lang, _, _ := unmarshalStringSafe(data) - return &StatusError{ - Code: code, - msg: msg, - lang: lang, - } -} - -func marshalStatus(b []byte, err StatusError) []byte { - b = marshalUint32(b, err.Code) - b = marshalString(b, err.msg) - b = marshalString(b, err.lang) - return b -} - // flags converts the flags passed to OpenFile into ssh flags. // Unsupported flags are ignored. func flags(f int) uint32 { @@ -1830,3 +1953,25 @@ func flags(f int) uint32 { } return out } + +// toChmodPerm converts Go permission bits to POSIX permission bits. +// +// This differs from fromFileMode in that we preserve the POSIX versions of +// setuid, setgid and sticky in m, because we've historically supported those +// bits, and we mask off any non-permission bits. +func toChmodPerm(m os.FileMode) (perm uint32) { + const mask = os.ModePerm | s_ISUID | s_ISGID | s_ISVTX + perm = uint32(m & mask) + + if m&os.ModeSetuid != 0 { + perm |= s_ISUID + } + if m&os.ModeSetgid != 0 { + perm |= s_ISGID + } + if m&os.ModeSticky != 0 { + perm |= s_ISVTX + } + + return perm +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/conn.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/conn.go index 952a2be410..7d95142378 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/conn.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/conn.go @@ -2,10 +2,9 @@ package sftp import ( "encoding" + "fmt" "io" "sync" - - "github.com/pkg/errors" ) // conn implements a bidirectional channel on which client and server @@ -16,8 +15,6 @@ type conn struct { // this is the same allocator used in packet manager alloc *allocator sync.Mutex // used to serialise writes to sendPacket - // sendPacketTest is needed to replicate packet issues in testing - sendPacketTest func(w io.Writer, m encoding.BinaryMarshaler) error } // the orderID is used in server mode if the allocator is enabled. @@ -29,9 +26,7 @@ func (c *conn) recvPacket(orderID uint32) (uint8, []byte, error) { func (c *conn) sendPacket(m encoding.BinaryMarshaler) error { c.Lock() defer c.Unlock() - if c.sendPacketTest != nil { - return c.sendPacketTest(c, m) - } + return sendPacket(c, m) } @@ -84,14 +79,17 @@ func (c *clientConn) recv() error { if err != nil { return err } - sid, _ := unmarshalUint32(data) + sid, _, err := unmarshalUint32Safe(data) + if err != nil { + return err + } ch, ok := c.getChannel(sid) if !ok { // This is an unexpected occurrence. Send the error // back to all listeners so that they terminate // gracefully. - return errors.Errorf("sid not found: %v", sid) + return fmt.Errorf("sid not found: %d", sid) } ch <- result{typ: typ, data: data} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/fuzz.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/fuzz.go index 2a2cd76b17..169aebc284 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/fuzz.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/fuzz.go @@ -4,12 +4,12 @@ package sftp import "bytes" -type sink struct{} +type sinkfuzz struct{} -func (*sink) Close() error { return nil } -func (*sink) Write(p []byte) (int, error) { return len(p), nil } +func (*sinkfuzz) Close() error { return nil } +func (*sinkfuzz) Write(p []byte) (int, error) { return len(p), nil } -var devnull = &sink{} +var devnull = &sinkfuzz{} // To run: go-fuzz-build && go-fuzz func Fuzz(data []byte) int { diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go new file mode 100644 index 0000000000..eed61bfc64 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go @@ -0,0 +1,325 @@ +package filexfer + +// Attributes related flags. +const ( + AttrSize = 1 << iota // SSH_FILEXFER_ATTR_SIZE + AttrUIDGID // SSH_FILEXFER_ATTR_UIDGID + AttrPermissions // SSH_FILEXFER_ATTR_PERMISSIONS + AttrACModTime // SSH_FILEXFER_ACMODTIME + + AttrExtended = 1 << 31 // SSH_FILEXFER_ATTR_EXTENDED +) + +// Attributes defines the file attributes type defined in draft-ietf-secsh-filexfer-02 +// +// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +type Attributes struct { + Flags uint32 + + // AttrSize + Size uint64 + + // AttrUIDGID + UID uint32 + GID uint32 + + // AttrPermissions + Permissions FileMode + + // AttrACmodTime + ATime uint32 + MTime uint32 + + // AttrExtended + ExtendedAttributes []ExtendedAttribute +} + +// GetSize returns the Size field and a bool that is true if and only if the value is valid/defined. +func (a *Attributes) GetSize() (size uint64, ok bool) { + return a.Size, a.Flags&AttrSize != 0 +} + +// SetSize is a convenience function that sets the Size field, +// and marks the field as valid/defined in Flags. +func (a *Attributes) SetSize(size uint64) { + a.Flags |= AttrSize + a.Size = size +} + +// GetUIDGID returns the UID and GID fields and a bool that is true if and only if the values are valid/defined. +func (a *Attributes) GetUIDGID() (uid, gid uint32, ok bool) { + return a.UID, a.GID, a.Flags&AttrUIDGID != 0 +} + +// SetUIDGID is a convenience function that sets the UID and GID fields, +// and marks the fields as valid/defined in Flags. +func (a *Attributes) SetUIDGID(uid, gid uint32) { + a.Flags |= AttrUIDGID + a.UID = uid + a.GID = gid +} + +// GetPermissions returns the Permissions field and a bool that is true if and only if the value is valid/defined. +func (a *Attributes) GetPermissions() (perms FileMode, ok bool) { + return a.Permissions, a.Flags&AttrPermissions != 0 +} + +// SetPermissions is a convenience function that sets the Permissions field, +// and marks the field as valid/defined in Flags. +func (a *Attributes) SetPermissions(perms FileMode) { + a.Flags |= AttrPermissions + a.Permissions = perms +} + +// GetACModTime returns the ATime and MTime fields and a bool that is true if and only if the values are valid/defined. +func (a *Attributes) GetACModTime() (atime, mtime uint32, ok bool) { + return a.ATime, a.MTime, a.Flags&AttrACModTime != 0 +} + +// SetACModTime is a convenience function that sets the ATime and MTime fields, +// and marks the fields as valid/defined in Flags. +func (a *Attributes) SetACModTime(atime, mtime uint32) { + a.Flags |= AttrACModTime + a.ATime = atime + a.MTime = mtime +} + +// Len returns the number of bytes a would marshal into. +func (a *Attributes) Len() int { + length := 4 + + if a.Flags&AttrSize != 0 { + length += 8 + } + + if a.Flags&AttrUIDGID != 0 { + length += 4 + 4 + } + + if a.Flags&AttrPermissions != 0 { + length += 4 + } + + if a.Flags&AttrACModTime != 0 { + length += 4 + 4 + } + + if a.Flags&AttrExtended != 0 { + length += 4 + + for _, ext := range a.ExtendedAttributes { + length += ext.Len() + } + } + + return length +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (a *Attributes) MarshalInto(b *Buffer) { + b.AppendUint32(a.Flags) + + if a.Flags&AttrSize != 0 { + b.AppendUint64(a.Size) + } + + if a.Flags&AttrUIDGID != 0 { + b.AppendUint32(a.UID) + b.AppendUint32(a.GID) + } + + if a.Flags&AttrPermissions != 0 { + b.AppendUint32(uint32(a.Permissions)) + } + + if a.Flags&AttrACModTime != 0 { + b.AppendUint32(a.ATime) + b.AppendUint32(a.MTime) + } + + if a.Flags&AttrExtended != 0 { + b.AppendUint32(uint32(len(a.ExtendedAttributes))) + + for _, ext := range a.ExtendedAttributes { + ext.MarshalInto(b) + } + } +} + +// MarshalBinary returns a as the binary encoding of a. +func (a *Attributes) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, a.Len())) + a.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an Attributes from the given Buffer into e. +// +// NOTE: The values of fields not covered in the a.Flags are explicitly undefined. +func (a *Attributes) UnmarshalFrom(b *Buffer) (err error) { + flags, err := b.ConsumeUint32() + if err != nil { + return err + } + + return a.XXX_UnmarshalByFlags(flags, b) +} + +// XXX_UnmarshalByFlags uses the pre-existing a.Flags field to determine which fields to decode. +// DO NOT USE THIS: it is an anti-corruption function to implement existing internal usage in pkg/sftp. +// This function is not a part of any compatibility promise. +func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, b *Buffer) (err error) { + a.Flags = flags + + // Short-circuit dummy attributes. + if a.Flags == 0 { + return nil + } + + if a.Flags&AttrSize != 0 { + if a.Size, err = b.ConsumeUint64(); err != nil { + return err + } + } + + if a.Flags&AttrUIDGID != 0 { + if a.UID, err = b.ConsumeUint32(); err != nil { + return err + } + + if a.GID, err = b.ConsumeUint32(); err != nil { + return err + } + } + + if a.Flags&AttrPermissions != 0 { + m, err := b.ConsumeUint32() + if err != nil { + return err + } + + a.Permissions = FileMode(m) + } + + if a.Flags&AttrACModTime != 0 { + if a.ATime, err = b.ConsumeUint32(); err != nil { + return err + } + + if a.MTime, err = b.ConsumeUint32(); err != nil { + return err + } + } + + if a.Flags&AttrExtended != 0 { + count, err := b.ConsumeUint32() + if err != nil { + return err + } + + a.ExtendedAttributes = make([]ExtendedAttribute, count) + for i := range a.ExtendedAttributes { + a.ExtendedAttributes[i].UnmarshalFrom(b) + } + } + + return nil +} + +// UnmarshalBinary decodes the binary encoding of Attributes into e. +func (a *Attributes) UnmarshalBinary(data []byte) error { + return a.UnmarshalFrom(NewBuffer(data)) +} + +// ExtendedAttribute defines the extended file attribute type defined in draft-ietf-secsh-filexfer-02 +// +// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +type ExtendedAttribute struct { + Type string + Data string +} + +// Len returns the number of bytes e would marshal into. +func (e *ExtendedAttribute) Len() int { + return 4 + len(e.Type) + 4 + len(e.Data) +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (e *ExtendedAttribute) MarshalInto(b *Buffer) { + b.AppendString(e.Type) + b.AppendString(e.Data) +} + +// MarshalBinary returns e as the binary encoding of e. +func (e *ExtendedAttribute) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, e.Len())) + e.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an ExtendedAattribute from the given Buffer into e. +func (e *ExtendedAttribute) UnmarshalFrom(b *Buffer) (err error) { + if e.Type, err = b.ConsumeString(); err != nil { + return err + } + + if e.Data, err = b.ConsumeString(); err != nil { + return err + } + + return nil +} + +// UnmarshalBinary decodes the binary encoding of ExtendedAttribute into e. +func (e *ExtendedAttribute) UnmarshalBinary(data []byte) error { + return e.UnmarshalFrom(NewBuffer(data)) +} + +// NameEntry implements the SSH_FXP_NAME repeated data type from draft-ietf-secsh-filexfer-02 +// +// This type is incompatible with versions 4 or higher. +type NameEntry struct { + Filename string + Longname string + Attrs Attributes +} + +// Len returns the number of bytes e would marshal into. +func (e *NameEntry) Len() int { + return 4 + len(e.Filename) + 4 + len(e.Longname) + e.Attrs.Len() +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (e *NameEntry) MarshalInto(b *Buffer) { + b.AppendString(e.Filename) + b.AppendString(e.Longname) + + e.Attrs.MarshalInto(b) +} + +// MarshalBinary returns e as the binary encoding of e. +func (e *NameEntry) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, e.Len())) + e.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an NameEntry from the given Buffer into e. +// +// NOTE: The values of fields not covered in the a.Flags are explicitly undefined. +func (e *NameEntry) UnmarshalFrom(b *Buffer) (err error) { + if e.Filename, err = b.ConsumeString(); err != nil { + return err + } + + if e.Longname, err = b.ConsumeString(); err != nil { + return err + } + + return e.Attrs.UnmarshalFrom(b) +} + +// UnmarshalBinary decodes the binary encoding of NameEntry into e. +func (e *NameEntry) UnmarshalBinary(data []byte) error { + return e.UnmarshalFrom(NewBuffer(data)) +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go new file mode 100644 index 0000000000..a6086036e7 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go @@ -0,0 +1,293 @@ +package filexfer + +import ( + "encoding/binary" + "errors" +) + +// Various encoding errors. +var ( + ErrShortPacket = errors.New("packet too short") + ErrLongPacket = errors.New("packet too long") +) + +// Buffer wraps up the various encoding details of the SSH format. +// +// Data types are encoded as per section 4 from https://tools.ietf.org/html/draft-ietf-secsh-architecture-09#page-8 +type Buffer struct { + b []byte + off int +} + +// NewBuffer creates and initializes a new buffer using buf as its initial contents. +// The new buffer takes ownership of buf, and the caller should not use buf after this call. +// +// In most cases, new(Buffer) (or just declaring a Buffer variable) is sufficient to initialize a Buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{ + b: buf, + } +} + +// NewMarshalBuffer creates a new Buffer ready to start marshaling a Packet into. +// It preallocates enough space for uint32(length), uint8(type), uint32(request-id) and size more bytes. +func NewMarshalBuffer(size int) *Buffer { + return NewBuffer(make([]byte, 4+1+4+size)) +} + +// Bytes returns a slice of length b.Len() holding the unconsumed bytes in the Buffer. +// The slice is valid for use only until the next buffer modification +// (that is, only until the next call to an Append or Consume method). +func (b *Buffer) Bytes() []byte { + return b.b[b.off:] +} + +// Len returns the number of unconsumed bytes in the buffer. +func (b *Buffer) Len() int { return len(b.b) - b.off } + +// Cap returns the capacity of the buffer’s underlying byte slice, +// that is, the total space allocated for the buffer’s data. +func (b *Buffer) Cap() int { return cap(b.b) } + +// Reset resets the buffer to be empty, but it retains the underlying storage for use by future Appends. +func (b *Buffer) Reset() { + b.b = b.b[:0] + b.off = 0 +} + +// StartPacket resets and initializes the buffer to be ready to start marshaling a packet into. +// It truncates the buffer, reserves space for uint32(length), then appends the given packetType and requestID. +func (b *Buffer) StartPacket(packetType PacketType, requestID uint32) { + b.b, b.off = append(b.b[:0], make([]byte, 4)...), 0 + + b.AppendUint8(uint8(packetType)) + b.AppendUint32(requestID) +} + +// Packet finalizes the packet started from StartPacket. +// It is expected that this will end the ownership of the underlying byte-slice, +// and so the returned byte-slices may be reused the same as any other byte-slice, +// the caller should not use this buffer after this call. +// +// It writes the packet body length into the first four bytes of the buffer in network byte order (big endian). +// The packet body length is the length of this buffer less the 4-byte length itself, plus the length of payload. +// +// It is assumed that no Consume methods have been called on this buffer, +// and so it returns the whole underlying slice. +func (b *Buffer) Packet(payload []byte) (header, payloadPassThru []byte, err error) { + b.PutLength(len(b.b) - 4 + len(payload)) + + return b.b, payload, nil +} + +// ConsumeUint8 consumes a single byte from the buffer. +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeUint8() (uint8, error) { + if b.Len() < 1 { + return 0, ErrShortPacket + } + + var v uint8 + v, b.off = b.b[b.off], b.off+1 + return v, nil +} + +// AppendUint8 appends a single byte into the buffer. +func (b *Buffer) AppendUint8(v uint8) { + b.b = append(b.b, v) +} + +// ConsumeBool consumes a single byte from the buffer, and returns true if that byte is non-zero. +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeBool() (bool, error) { + v, err := b.ConsumeUint8() + if err != nil { + return false, err + } + + return v != 0, nil +} + +// AppendBool appends a single bool into the buffer. +// It encodes it as a single byte, with false as 0, and true as 1. +func (b *Buffer) AppendBool(v bool) { + if v { + b.AppendUint8(1) + } else { + b.AppendUint8(0) + } +} + +// ConsumeUint16 consumes a single uint16 from the buffer, in network byte order (big-endian). +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeUint16() (uint16, error) { + if b.Len() < 2 { + return 0, ErrShortPacket + } + + v := binary.BigEndian.Uint16(b.b[b.off:]) + b.off += 2 + return v, nil +} + +// AppendUint16 appends single uint16 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendUint16(v uint16) { + b.b = append(b.b, + byte(v>>8), + byte(v>>0), + ) +} + +// unmarshalUint32 is used internally to read the packet length. +// It is unsafe, and so not exported. +// Even within this package, its use should be avoided. +func unmarshalUint32(b []byte) uint32 { + return binary.BigEndian.Uint32(b[:4]) +} + +// ConsumeUint32 consumes a single uint32 from the buffer, in network byte order (big-endian). +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeUint32() (uint32, error) { + if b.Len() < 4 { + return 0, ErrShortPacket + } + + v := binary.BigEndian.Uint32(b.b[b.off:]) + b.off += 4 + return v, nil +} + +// AppendUint32 appends a single uint32 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendUint32(v uint32) { + b.b = append(b.b, + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v>>0), + ) +} + +// ConsumeUint64 consumes a single uint64 from the buffer, in network byte order (big-endian). +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeUint64() (uint64, error) { + if b.Len() < 8 { + return 0, ErrShortPacket + } + + v := binary.BigEndian.Uint64(b.b[b.off:]) + b.off += 8 + return v, nil +} + +// AppendUint64 appends a single uint64 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendUint64(v uint64) { + b.b = append(b.b, + byte(v>>56), + byte(v>>48), + byte(v>>40), + byte(v>>32), + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v>>0), + ) +} + +// ConsumeInt64 consumes a single int64 from the buffer, in network byte order (big-endian) with two’s complement. +// If the buffer does not have enough data, it will return ErrShortPacket. +func (b *Buffer) ConsumeInt64() (int64, error) { + u, err := b.ConsumeUint64() + if err != nil { + return 0, err + } + + return int64(u), err +} + +// AppendInt64 appends a single int64 into the buffer, in network byte order (big-endian) with two’s complement. +func (b *Buffer) AppendInt64(v int64) { + b.AppendUint64(uint64(v)) +} + +// ConsumeByteSlice consumes a single string of raw binary data from the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. +// +// The returned slice aliases the buffer contents, and is valid only as long as the buffer is not reused +// (that is, only until the next call to Reset, PutLength, StartPacket, or UnmarshalBinary). +// +// In no case will any Consume calls return overlapping slice aliases, +// and Append calls are guaranteed to not disturb this slice alias. +func (b *Buffer) ConsumeByteSlice() ([]byte, error) { + length, err := b.ConsumeUint32() + if err != nil { + return nil, err + } + + if b.Len() < int(length) { + return nil, ErrShortPacket + } + + v := b.b[b.off:] + if len(v) > int(length) { + v = v[:length:length] + } + b.off += int(length) + return v, nil +} + +// AppendByteSlice appends a single string of raw binary data into the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +func (b *Buffer) AppendByteSlice(v []byte) { + b.AppendUint32(uint32(len(v))) + b.b = append(b.b, v...) +} + +// ConsumeString consumes a single string of binary data from the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. +// +// NOTE: Go implicitly assumes that strings contain UTF-8 encoded data. +// All caveats on using arbitrary binary data in Go strings applies. +func (b *Buffer) ConsumeString() (string, error) { + v, err := b.ConsumeByteSlice() + if err != nil { + return "", err + } + + return string(v), nil +} + +// AppendString appends a single string of binary data into the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +func (b *Buffer) AppendString(v string) { + b.AppendByteSlice([]byte(v)) +} + +// PutLength writes the given size into the first four bytes of the buffer in network byte order (big endian). +func (b *Buffer) PutLength(size int) { + if len(b.b) < 4 { + b.b = append(b.b, make([]byte, 4-len(b.b))...) + } + + binary.BigEndian.PutUint32(b.b, uint32(size)) +} + +// MarshalBinary returns a clone of the full internal buffer. +func (b *Buffer) MarshalBinary() ([]byte, error) { + clone := make([]byte, len(b.b)) + n := copy(clone, b.b) + return clone[:n], nil +} + +// UnmarshalBinary sets the internal buffer of b to be a clone of data, and zeros the internal offset. +func (b *Buffer) UnmarshalBinary(data []byte) error { + if grow := len(data) - len(b.b); grow > 0 { + b.b = append(b.b, make([]byte, grow)...) + } + + n := copy(b.b, data) + b.b = b.b[:n] + b.off = 0 + return nil +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go new file mode 100644 index 0000000000..6b7b2cef4d --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go @@ -0,0 +1,142 @@ +package filexfer + +import ( + "encoding" + "sync" +) + +// ExtendedData aliases the untyped interface composition of encoding.BinaryMarshaler and encoding.BinaryUnmarshaler. +type ExtendedData = interface { + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler +} + +// ExtendedDataConstructor defines a function that returns a new(ArbitraryExtendedPacket). +type ExtendedDataConstructor func() ExtendedData + +var extendedPacketTypes = struct { + mu sync.RWMutex + constructors map[string]ExtendedDataConstructor +}{ + constructors: make(map[string]ExtendedDataConstructor), +} + +// RegisterExtendedPacketType defines a specific ExtendedDataConstructor for the given extension string. +func RegisterExtendedPacketType(extension string, constructor ExtendedDataConstructor) { + extendedPacketTypes.mu.Lock() + defer extendedPacketTypes.mu.Unlock() + + if _, exist := extendedPacketTypes.constructors[extension]; exist { + panic("encoding/ssh/filexfer: multiple registration of extended packet type " + extension) + } + + extendedPacketTypes.constructors[extension] = constructor +} + +func newExtendedPacket(extension string) ExtendedData { + extendedPacketTypes.mu.RLock() + defer extendedPacketTypes.mu.RUnlock() + + if f := extendedPacketTypes.constructors[extension]; f != nil { + return f() + } + + return new(Buffer) +} + +// ExtendedPacket defines the SSH_FXP_CLOSE packet. +type ExtendedPacket struct { + ExtendedRequest string + + Data ExtendedData +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ExtendedPacket) Type() PacketType { + return PacketTypeExtended +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The Data is marshaled into binary, and returned as the payload. +func (p *ExtendedPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.ExtendedRequest) // string(extended-request) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeExtended, reqid) + buf.AppendString(p.ExtendedRequest) + + if p.Data != nil { + payload, err = p.Data.MarshalBinary() + if err != nil { + return nil, nil, err + } + } + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is nil, and the extension has been registered, a new type will be made from the registration. +// If the extension has not been registered, then a new Buffer will be allocated. +// Then the request-specific-data will be unmarshaled from the rest of the buffer. +func (p *ExtendedPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.ExtendedRequest, err = buf.ConsumeString(); err != nil { + return err + } + + if p.Data == nil { + p.Data = newExtendedPacket(p.ExtendedRequest) + } + + return p.Data.UnmarshalBinary(buf.Bytes()) +} + +// ExtendedReplyPacket defines the SSH_FXP_CLOSE packet. +type ExtendedReplyPacket struct { + Data ExtendedData +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ExtendedReplyPacket) Type() PacketType { + return PacketTypeExtendedReply +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The Data is marshaled into binary, and returned as the payload. +func (p *ExtendedReplyPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + buf = NewMarshalBuffer(0) + } + + buf.StartPacket(PacketTypeExtendedReply, reqid) + + if p.Data != nil { + payload, err = p.Data.MarshalBinary() + if err != nil { + return nil, nil, err + } + } + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is nil, and there is request-specific-data, +// then the request-specific-data will be wrapped in a Buffer and assigned to p.Data. +func (p *ExtendedReplyPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Data == nil { + p.Data = new(Buffer) + } + + return p.Data.UnmarshalBinary(buf.Bytes()) +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go new file mode 100644 index 0000000000..11c0b99c22 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go @@ -0,0 +1,46 @@ +package filexfer + +// ExtensionPair defines the extension-pair type defined in draft-ietf-secsh-filexfer-13. +// This type is backwards-compatible with how draft-ietf-secsh-filexfer-02 defines extensions. +// +// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-4.2 +type ExtensionPair struct { + Name string + Data string +} + +// Len returns the number of bytes e would marshal into. +func (e *ExtensionPair) Len() int { + return 4 + len(e.Name) + 4 + len(e.Data) +} + +// MarshalInto marshals e onto the end of the given Buffer. +func (e *ExtensionPair) MarshalInto(buf *Buffer) { + buf.AppendString(e.Name) + buf.AppendString(e.Data) +} + +// MarshalBinary returns e as the binary encoding of e. +func (e *ExtensionPair) MarshalBinary() ([]byte, error) { + buf := NewBuffer(make([]byte, 0, e.Len())) + e.MarshalInto(buf) + return buf.Bytes(), nil +} + +// UnmarshalFrom unmarshals an ExtensionPair from the given Buffer into e. +func (e *ExtensionPair) UnmarshalFrom(buf *Buffer) (err error) { + if e.Name, err = buf.ConsumeString(); err != nil { + return err + } + + if e.Data, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// UnmarshalBinary decodes the binary encoding of ExtensionPair into e. +func (e *ExtensionPair) UnmarshalBinary(data []byte) error { + return e.UnmarshalFrom(NewBuffer(data)) +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go new file mode 100644 index 0000000000..1e5abf7466 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go @@ -0,0 +1,54 @@ +// Package filexfer implements the wire encoding for secsh-filexfer as described in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +package filexfer + +// PacketMarshaller narrowly defines packets that will only be transmitted. +// +// ExtendedPacket types will often only implement this interface, +// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field. +type PacketMarshaller interface { + // MarshalPacket is the primary intended way to encode a packet. + // The request-id for the packet is set from reqid. + // + // An optional buffer may be given in b. + // If the buffer has a minimum capacity, it shall be truncated and used to marshal the header into. + // The minimum capacity for the packet must be a constant expression, and should be at least 9. + // + // It shall return the main body of the encoded packet in header, + // and may optionally return an additional payload to be written immediately after the header. + // + // It shall encode in the first 4-bytes of the header the proper length of the rest of the header+payload. + MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) +} + +// Packet defines the behavior of a full generic SFTP packet. +// +// InitPacket, and VersionPacket are not generic SFTP packets, and instead implement (Un)MarshalBinary. +// +// ExtendedPacket types should not iplement this interface, +// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field. +type Packet interface { + PacketMarshaller + + // Type returns the SSH_FXP_xy value associated with the specific packet. + Type() PacketType + + // UnmarshalPacketBody decodes a packet body from the given Buffer. + // It is assumed that the common header values of the length, type and request-id have already been consumed. + // + // Implementations should not alias the given Buffer, + // instead they can consider prepopulating an internal buffer as a hint, + // and copying into that buffer if it has sufficient length. + UnmarshalPacketBody(buf *Buffer) error +} + +// ComposePacket converts returns from MarshalPacket into an equivalent call to MarshalBinary. +func ComposePacket(header, payload []byte, err error) ([]byte, error) { + return append(header, payload...), err +} + +// Default length values, +// Defined in draft-ietf-secsh-filexfer-02 section 3. +const ( + DefaultMaxPacketLength = 34000 + DefaultMaxDataLength = 32768 +) diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go new file mode 100644 index 0000000000..48f869861a --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go @@ -0,0 +1,147 @@ +package filexfer + +import ( + "fmt" +) + +// Status defines the SFTP error codes used in SSH_FXP_STATUS response packets. +type Status uint32 + +// Defines the various SSH_FX_* values. +const ( + // see draft-ietf-secsh-filexfer-02 + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7 + StatusOK = Status(iota) + StatusEOF + StatusNoSuchFile + StatusPermissionDenied + StatusFailure + StatusBadMessage + StatusNoConnection + StatusConnectionLost + StatusOPUnsupported + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-03#section-7 + StatusV4InvalidHandle + StatusV4NoSuchPath + StatusV4FileAlreadyExists + StatusV4WriteProtect + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-04#section-7 + StatusV4NoMedia + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-05#section-7 + StatusV5NoSpaceOnFilesystem + StatusV5QuotaExceeded + StatusV5UnknownPrincipal + StatusV5LockConflict + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-06#section-8 + StatusV6DirNotEmpty + StatusV6NotADirectory + StatusV6InvalidFilename + StatusV6LinkLoop + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-8 + StatusV6CannotDelete + StatusV6InvalidParameter + StatusV6FileIsADirectory + StatusV6ByteRangeLockConflict + StatusV6ByteRangeLockRefused + StatusV6DeletePending + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-8.1 + StatusV6FileCorrupt + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-10#section-9.1 + StatusV6OwnerInvalid + StatusV6GroupInvalid + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 + StatusV6NoMatchingByteRangeLock +) + +func (s Status) Error() string { + return s.String() +} + +// Is returns true if the target is the same Status code, +// or target is a StatusPacket with the same Status code. +func (s Status) Is(target error) bool { + if target, ok := target.(*StatusPacket); ok { + return target.StatusCode == s + } + + return s == target +} + +func (s Status) String() string { + switch s { + case StatusOK: + return "SSH_FX_OK" + case StatusEOF: + return "SSH_FX_EOF" + case StatusNoSuchFile: + return "SSH_FX_NO_SUCH_FILE" + case StatusPermissionDenied: + return "SSH_FX_PERMISSION_DENIED" + case StatusFailure: + return "SSH_FX_FAILURE" + case StatusBadMessage: + return "SSH_FX_BAD_MESSAGE" + case StatusNoConnection: + return "SSH_FX_NO_CONNECTION" + case StatusConnectionLost: + return "SSH_FX_CONNECTION_LOST" + case StatusOPUnsupported: + return "SSH_FX_OP_UNSUPPORTED" + case StatusV4InvalidHandle: + return "SSH_FX_INVALID_HANDLE" + case StatusV4NoSuchPath: + return "SSH_FX_NO_SUCH_PATH" + case StatusV4FileAlreadyExists: + return "SSH_FX_FILE_ALREADY_EXISTS" + case StatusV4WriteProtect: + return "SSH_FX_WRITE_PROTECT" + case StatusV4NoMedia: + return "SSH_FX_NO_MEDIA" + case StatusV5NoSpaceOnFilesystem: + return "SSH_FX_NO_SPACE_ON_FILESYSTEM" + case StatusV5QuotaExceeded: + return "SSH_FX_QUOTA_EXCEEDED" + case StatusV5UnknownPrincipal: + return "SSH_FX_UNKNOWN_PRINCIPAL" + case StatusV5LockConflict: + return "SSH_FX_LOCK_CONFLICT" + case StatusV6DirNotEmpty: + return "SSH_FX_DIR_NOT_EMPTY" + case StatusV6NotADirectory: + return "SSH_FX_NOT_A_DIRECTORY" + case StatusV6InvalidFilename: + return "SSH_FX_INVALID_FILENAME" + case StatusV6LinkLoop: + return "SSH_FX_LINK_LOOP" + case StatusV6CannotDelete: + return "SSH_FX_CANNOT_DELETE" + case StatusV6InvalidParameter: + return "SSH_FX_INVALID_PARAMETER" + case StatusV6FileIsADirectory: + return "SSH_FX_FILE_IS_A_DIRECTORY" + case StatusV6ByteRangeLockConflict: + return "SSH_FX_BYTE_RANGE_LOCK_CONFLICT" + case StatusV6ByteRangeLockRefused: + return "SSH_FX_BYTE_RANGE_LOCK_REFUSED" + case StatusV6DeletePending: + return "SSH_FX_DELETE_PENDING" + case StatusV6FileCorrupt: + return "SSH_FX_FILE_CORRUPT" + case StatusV6OwnerInvalid: + return "SSH_FX_OWNER_INVALID" + case StatusV6GroupInvalid: + return "SSH_FX_GROUP_INVALID" + case StatusV6NoMatchingByteRangeLock: + return "SSH_FX_NO_MATCHING_BYTE_RANGE_LOCK" + default: + return fmt.Sprintf("SSH_FX_UNKNOWN(%d)", s) + } +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go new file mode 100644 index 0000000000..15caf6d28a --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go @@ -0,0 +1,124 @@ +package filexfer + +import ( + "fmt" +) + +// PacketType defines the various SFTP packet types. +type PacketType uint8 + +// Request packet types. +const ( + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + PacketTypeInit = PacketType(iota + 1) + PacketTypeVersion + PacketTypeOpen + PacketTypeClose + PacketTypeRead + PacketTypeWrite + PacketTypeLStat + PacketTypeFStat + PacketTypeSetstat + PacketTypeFSetstat + PacketTypeOpenDir + PacketTypeReadDir + PacketTypeRemove + PacketTypeMkdir + PacketTypeRmdir + PacketTypeRealPath + PacketTypeStat + PacketTypeRename + PacketTypeReadLink + PacketTypeSymlink + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-3.3 + PacketTypeV6Link + + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-3.3 + PacketTypeV6Block + PacketTypeV6Unblock +) + +// Response packet types. +const ( + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + PacketTypeStatus = PacketType(iota + 101) + PacketTypeHandle + PacketTypeData + PacketTypeName + PacketTypeAttrs +) + +// Extended packet types. +const ( + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + PacketTypeExtended = PacketType(iota + 200) + PacketTypeExtendedReply +) + +func (f PacketType) String() string { + switch f { + case PacketTypeInit: + return "SSH_FXP_INIT" + case PacketTypeVersion: + return "SSH_FXP_VERSION" + case PacketTypeOpen: + return "SSH_FXP_OPEN" + case PacketTypeClose: + return "SSH_FXP_CLOSE" + case PacketTypeRead: + return "SSH_FXP_READ" + case PacketTypeWrite: + return "SSH_FXP_WRITE" + case PacketTypeLStat: + return "SSH_FXP_LSTAT" + case PacketTypeFStat: + return "SSH_FXP_FSTAT" + case PacketTypeSetstat: + return "SSH_FXP_SETSTAT" + case PacketTypeFSetstat: + return "SSH_FXP_FSETSTAT" + case PacketTypeOpenDir: + return "SSH_FXP_OPENDIR" + case PacketTypeReadDir: + return "SSH_FXP_READDIR" + case PacketTypeRemove: + return "SSH_FXP_REMOVE" + case PacketTypeMkdir: + return "SSH_FXP_MKDIR" + case PacketTypeRmdir: + return "SSH_FXP_RMDIR" + case PacketTypeRealPath: + return "SSH_FXP_REALPATH" + case PacketTypeStat: + return "SSH_FXP_STAT" + case PacketTypeRename: + return "SSH_FXP_RENAME" + case PacketTypeReadLink: + return "SSH_FXP_READLINK" + case PacketTypeSymlink: + return "SSH_FXP_SYMLINK" + case PacketTypeV6Link: + return "SSH_FXP_LINK" + case PacketTypeV6Block: + return "SSH_FXP_BLOCK" + case PacketTypeV6Unblock: + return "SSH_FXP_UNBLOCK" + case PacketTypeStatus: + return "SSH_FXP_STATUS" + case PacketTypeHandle: + return "SSH_FXP_HANDLE" + case PacketTypeData: + return "SSH_FXP_DATA" + case PacketTypeName: + return "SSH_FXP_NAME" + case PacketTypeAttrs: + return "SSH_FXP_ATTRS" + case PacketTypeExtended: + return "SSH_FXP_EXTENDED" + case PacketTypeExtendedReply: + return "SSH_FXP_EXTENDED_REPLY" + default: + return fmt.Sprintf("SSH_FXP_UNKNOWN(%d)", f) + } +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go new file mode 100644 index 0000000000..a142771285 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go @@ -0,0 +1,249 @@ +package filexfer + +// ClosePacket defines the SSH_FXP_CLOSE packet. +type ClosePacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ClosePacket) Type() PacketType { + return PacketTypeClose +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ClosePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeClose, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ClosePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// ReadPacket defines the SSH_FXP_READ packet. +type ReadPacket struct { + Handle string + Offset uint64 + Len uint32 +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ReadPacket) Type() PacketType { + return PacketTypeRead +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(handle) + uint64(offset) + uint32(len) + size := 4 + len(p.Handle) + 8 + 4 + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRead, reqid) + buf.AppendString(p.Handle) + buf.AppendUint64(p.Offset) + buf.AppendUint32(p.Len) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ReadPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + if p.Offset, err = buf.ConsumeUint64(); err != nil { + return err + } + + if p.Len, err = buf.ConsumeUint32(); err != nil { + return err + } + + return nil +} + +// WritePacket defines the SSH_FXP_WRITE packet. +type WritePacket struct { + Handle string + Offset uint64 + Data []byte +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *WritePacket) Type() PacketType { + return PacketTypeWrite +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *WritePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(handle) + uint64(offset) + uint32(len(data)); data content in payload + size := 4 + len(p.Handle) + 8 + 4 + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeWrite, reqid) + buf.AppendString(p.Handle) + buf.AppendUint64(p.Offset) + buf.AppendUint32(uint32(len(p.Data))) + + return buf.Packet(p.Data) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +// +// If p.Data is already populated, and of sufficient length to hold the data, +// then this will copy the data into that byte slice. +// +// If p.Data has a length insufficient to hold the data, +// then this will make a new slice of sufficient length, and copy the data into that. +// +// This means this _does not_ alias any of the data buffer that is passed in. +func (p *WritePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + if p.Offset, err = buf.ConsumeUint64(); err != nil { + return err + } + + data, err := buf.ConsumeByteSlice() + if err != nil { + return err + } + + if len(p.Data) < len(data) { + p.Data = make([]byte, len(data)) + } + + n := copy(p.Data, data) + p.Data = p.Data[:n] + return nil +} + +// FStatPacket defines the SSH_FXP_FSTAT packet. +type FStatPacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *FStatPacket) Type() PacketType { + return PacketTypeFStat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *FStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeFStat, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *FStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// FSetstatPacket defines the SSH_FXP_FSETSTAT packet. +type FSetstatPacket struct { + Handle string + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *FSetstatPacket) Type() PacketType { + return PacketTypeFSetstat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *FSetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) + p.Attrs.Len() // string(handle) + ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeFSetstat, reqid) + buf.AppendString(p.Handle) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *FSetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// ReadDirPacket defines the SSH_FXP_READDIR packet. +type ReadDirPacket struct { + Handle string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ReadDirPacket) Type() PacketType { + return PacketTypeReadDir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ReadDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Handle) // string(handle) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeReadDir, reqid) + buf.AppendString(p.Handle) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ReadDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Handle, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go new file mode 100644 index 0000000000..b0bc6f5053 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go @@ -0,0 +1,99 @@ +package filexfer + +// InitPacket defines the SSH_FXP_INIT packet. +type InitPacket struct { + Version uint32 + Extensions []*ExtensionPair +} + +// MarshalBinary returns p as the binary encoding of p. +func (p *InitPacket) MarshalBinary() ([]byte, error) { + size := 1 + 4 // byte(type) + uint32(version) + + for _, ext := range p.Extensions { + size += ext.Len() + } + + b := NewBuffer(make([]byte, 4, 4+size)) + b.AppendUint8(uint8(PacketTypeInit)) + b.AppendUint32(p.Version) + + for _, ext := range p.Extensions { + ext.MarshalInto(b) + } + + b.PutLength(size) + + return b.Bytes(), nil +} + +// UnmarshalBinary unmarshals a full raw packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into. +func (p *InitPacket) UnmarshalBinary(data []byte) (err error) { + buf := NewBuffer(data) + + if p.Version, err = buf.ConsumeUint32(); err != nil { + return err + } + + for buf.Len() > 0 { + var ext ExtensionPair + if err := ext.UnmarshalFrom(buf); err != nil { + return err + } + + p.Extensions = append(p.Extensions, &ext) + } + + return nil +} + +// VersionPacket defines the SSH_FXP_VERSION packet. +type VersionPacket struct { + Version uint32 + Extensions []*ExtensionPair +} + +// MarshalBinary returns p as the binary encoding of p. +func (p *VersionPacket) MarshalBinary() ([]byte, error) { + size := 1 + 4 // byte(type) + uint32(version) + + for _, ext := range p.Extensions { + size += ext.Len() + } + + b := NewBuffer(make([]byte, 4, 4+size)) + b.AppendUint8(uint8(PacketTypeVersion)) + b.AppendUint32(p.Version) + + for _, ext := range p.Extensions { + ext.MarshalInto(b) + } + + b.PutLength(size) + + return b.Bytes(), nil +} + +// UnmarshalBinary unmarshals a full raw packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into. +func (p *VersionPacket) UnmarshalBinary(data []byte) (err error) { + buf := NewBuffer(data) + + if p.Version, err = buf.ConsumeUint32(); err != nil { + return err + } + + for buf.Len() > 0 { + var ext ExtensionPair + if err := ext.UnmarshalFrom(buf); err != nil { + return err + } + + p.Extensions = append(p.Extensions, &ext) + } + + return nil +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go new file mode 100644 index 0000000000..1358711421 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go @@ -0,0 +1,89 @@ +package filexfer + +// SSH_FXF_* flags. +const ( + FlagRead = 1 << iota // SSH_FXF_READ + FlagWrite // SSH_FXF_WRITE + FlagAppend // SSH_FXF_APPEND + FlagCreate // SSH_FXF_CREAT + FlagTruncate // SSH_FXF_TRUNC + FlagExclusive // SSH_FXF_EXCL +) + +// OpenPacket defines the SSH_FXP_OPEN packet. +type OpenPacket struct { + Filename string + PFlags uint32 + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *OpenPacket) Type() PacketType { + return PacketTypeOpen +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *OpenPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(filename) + uint32(pflags) + ATTRS(attrs) + size := 4 + len(p.Filename) + 4 + p.Attrs.Len() + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeOpen, reqid) + buf.AppendString(p.Filename) + buf.AppendUint32(p.PFlags) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *OpenPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Filename, err = buf.ConsumeString(); err != nil { + return err + } + + if p.PFlags, err = buf.ConsumeUint32(); err != nil { + return err + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// OpenDirPacket defines the SSH_FXP_OPENDIR packet. +type OpenDirPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *OpenDirPacket) Type() PacketType { + return PacketTypeOpenDir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *OpenDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeOpenDir, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *OpenDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go new file mode 100644 index 0000000000..3f24e9c224 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go @@ -0,0 +1,323 @@ +package filexfer + +import ( + "errors" + "fmt" + "io" +) + +// smallBufferSize is an initial allocation minimal capacity. +const smallBufferSize = 64 + +func newPacketFromType(typ PacketType) (Packet, error) { + switch typ { + case PacketTypeOpen: + return new(OpenPacket), nil + case PacketTypeClose: + return new(ClosePacket), nil + case PacketTypeRead: + return new(ReadPacket), nil + case PacketTypeWrite: + return new(WritePacket), nil + case PacketTypeLStat: + return new(LStatPacket), nil + case PacketTypeFStat: + return new(FStatPacket), nil + case PacketTypeSetstat: + return new(SetstatPacket), nil + case PacketTypeFSetstat: + return new(FSetstatPacket), nil + case PacketTypeOpenDir: + return new(OpenDirPacket), nil + case PacketTypeReadDir: + return new(ReadDirPacket), nil + case PacketTypeRemove: + return new(RemovePacket), nil + case PacketTypeMkdir: + return new(MkdirPacket), nil + case PacketTypeRmdir: + return new(RmdirPacket), nil + case PacketTypeRealPath: + return new(RealPathPacket), nil + case PacketTypeStat: + return new(StatPacket), nil + case PacketTypeRename: + return new(RenamePacket), nil + case PacketTypeReadLink: + return new(ReadLinkPacket), nil + case PacketTypeSymlink: + return new(SymlinkPacket), nil + case PacketTypeExtended: + return new(ExtendedPacket), nil + default: + return nil, fmt.Errorf("unexpected request packet type: %v", typ) + } +} + +// RawPacket implements the general packet format from draft-ietf-secsh-filexfer-02 +// +// RawPacket is intended for use in clients receiving responses, +// where a response will be expected to be of a limited number of types, +// and unmarshaling unknown/unexpected response packets is unnecessary. +// +// For servers expecting to receive arbitrary request packet types, +// use RequestPacket. +// +// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 +type RawPacket struct { + PacketType PacketType + RequestID uint32 + + Data Buffer +} + +// Type returns the Type field defining the SSH_FXP_xy type for this packet. +func (p *RawPacket) Type() PacketType { + return p.PacketType +} + +// Reset clears the pointers and reference-semantic variables of RawPacket, +// releasing underlying resources, and making them and the RawPacket suitable to be reused, +// so long as no other references have been kept. +func (p *RawPacket) Reset() { + p.Data = Buffer{} +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The internal p.RequestID is overridden by the reqid argument. +func (p *RawPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + buf = NewMarshalBuffer(0) + } + + buf.StartPacket(p.PacketType, reqid) + + return buf.Packet(p.Data.Bytes()) +} + +// MarshalBinary returns p as the binary encoding of p. +// +// This is a convenience implementation primarily intended for tests, +// because it is inefficient with allocations. +func (p *RawPacket) MarshalBinary() ([]byte, error) { + return ComposePacket(p.MarshalPacket(p.RequestID, nil)) +} + +// UnmarshalFrom decodes a RawPacket from the given Buffer into p. +// +// The Data field will alias the passed in Buffer, +// so the buffer passed in should not be reused before RawPacket.Reset(). +func (p *RawPacket) UnmarshalFrom(buf *Buffer) error { + typ, err := buf.ConsumeUint8() + if err != nil { + return err + } + + p.PacketType = PacketType(typ) + + if p.RequestID, err = buf.ConsumeUint32(); err != nil { + return err + } + + p.Data = *buf + return nil +} + +// UnmarshalBinary decodes a full raw packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// +// This is a convenience implementation primarily intended for tests, +// because this must clone the given data byte slice, +// as Data is not allowed to alias any part of the data byte slice. +func (p *RawPacket) UnmarshalBinary(data []byte) error { + clone := make([]byte, len(data)) + n := copy(clone, data) + return p.UnmarshalFrom(NewBuffer(clone[:n])) +} + +// readPacket reads a uint32 length-prefixed binary data packet from r. +// using the given byte slice as a backing array. +// +// If the packet length read from r is bigger than maxPacketLength, +// or greater than math.MaxInt32 on a 32-bit implementation, +// then a `ErrLongPacket` error will be returned. +// +// If the given byte slice is insufficient to hold the packet, +// then it will be extended to fill the packet size. +func readPacket(r io.Reader, b []byte, maxPacketLength uint32) ([]byte, error) { + if cap(b) < 4 { + // We will need allocate our own buffer just for reading the packet length. + + // However, we don’t really want to allocate an extremely narrow buffer (4-bytes), + // and cause unnecessary allocation churn from both length reads and small packet reads, + // so we use smallBufferSize from the bytes package as a reasonable guess. + + // But if callers really do want to force narrow throw-away allocation of every packet body, + // they can do so with a buffer of capacity 4. + b = make([]byte, smallBufferSize) + } + + if _, err := io.ReadFull(r, b[:4]); err != nil { + return nil, err + } + + length := unmarshalUint32(b) + if int(length) < 5 { + // Must have at least uint8(type) and uint32(request-id) + + if int(length) < 0 { + // Only possible when strconv.IntSize == 32, + // the packet length is longer than math.MaxInt32, + // and thus longer than any possible slice. + return nil, ErrLongPacket + } + + return nil, ErrShortPacket + } + if length > maxPacketLength { + return nil, ErrLongPacket + } + + if int(length) > cap(b) { + // We know int(length) must be positive, because of tests above. + b = make([]byte, length) + } + + n, err := io.ReadFull(r, b[:length]) + return b[:n], err +} + +// ReadFrom provides a simple functional packet reader, +// using the given byte slice as a backing array. +// +// To protect against potential denial of service attacks, +// if the read packet length is longer than maxPacketLength, +// then no packet data will be read, and ErrLongPacket will be returned. +// (On 32-bit int architectures, all packets >= 2^31 in length +// will return ErrLongPacket regardless of maxPacketLength.) +// +// If the read packet length is longer than cap(b), +// then a throw-away slice will allocated to meet the exact packet length. +// This can be used to limit the length of reused buffers, +// while still allowing reception of occasional large packets. +// +// The Data field may alias the passed in byte slice, +// so the byte slice passed in should not be reused before RawPacket.Reset(). +func (p *RawPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error { + b, err := readPacket(r, b, maxPacketLength) + if err != nil { + return err + } + + return p.UnmarshalFrom(NewBuffer(b)) +} + +// RequestPacket implements the general packet format from draft-ietf-secsh-filexfer-02 +// but also automatically decode/encodes valid request packets (2 < type < 100 || type == 200). +// +// RequestPacket is intended for use in servers receiving requests, +// where any arbitrary request may be received, and so decoding them automatically +// is useful. +// +// For clients expecting to receive specific response packet types, +// where automatic unmarshaling of the packet body does not make sense, +// use RawPacket. +// +// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 +type RequestPacket struct { + RequestID uint32 + + Request Packet +} + +// Type returns the SSH_FXP_xy value associated with the underlying packet. +func (p *RequestPacket) Type() PacketType { + return p.Request.Type() +} + +// Reset clears the pointers and reference-semantic variables in RequestPacket, +// releasing underlying resources, and making them and the RequestPacket suitable to be reused, +// so long as no other references have been kept. +func (p *RequestPacket) Reset() { + p.Request = nil +} + +// MarshalPacket returns p as a two-part binary encoding of p. +// +// The internal p.RequestID is overridden by the reqid argument. +func (p *RequestPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + if p.Request == nil { + return nil, nil, errors.New("empty request packet") + } + + return p.Request.MarshalPacket(reqid, b) +} + +// MarshalBinary returns p as the binary encoding of p. +// +// This is a convenience implementation primarily intended for tests, +// because it is inefficient with allocations. +func (p *RequestPacket) MarshalBinary() ([]byte, error) { + return ComposePacket(p.MarshalPacket(p.RequestID, nil)) +} + +// UnmarshalFrom decodes a RequestPacket from the given Buffer into p. +// +// The Request field may alias the passed in Buffer, (e.g. SSH_FXP_WRITE), +// so the buffer passed in should not be reused before RequestPacket.Reset(). +func (p *RequestPacket) UnmarshalFrom(buf *Buffer) error { + typ, err := buf.ConsumeUint8() + if err != nil { + return err + } + + p.Request, err = newPacketFromType(PacketType(typ)) + if err != nil { + return err + } + + if p.RequestID, err = buf.ConsumeUint32(); err != nil { + return err + } + + return p.Request.UnmarshalPacketBody(buf) +} + +// UnmarshalBinary decodes a full request packet out of the given data. +// It is assumed that the uint32(length) has already been consumed to receive the data. +// +// This is a convenience implementation primarily intended for tests, +// because this must clone the given data byte slice, +// as Request is not allowed to alias any part of the data byte slice. +func (p *RequestPacket) UnmarshalBinary(data []byte) error { + clone := make([]byte, len(data)) + n := copy(clone, data) + return p.UnmarshalFrom(NewBuffer(clone[:n])) +} + +// ReadFrom provides a simple functional packet reader, +// using the given byte slice as a backing array. +// +// To protect against potential denial of service attacks, +// if the read packet length is longer than maxPacketLength, +// then no packet data will be read, and ErrLongPacket will be returned. +// (On 32-bit int architectures, all packets >= 2^31 in length +// will return ErrLongPacket regardless of maxPacketLength.) +// +// If the read packet length is longer than cap(b), +// then a throw-away slice will allocated to meet the exact packet length. +// This can be used to limit the length of reused buffers, +// while still allowing reception of occasional large packets. +// +// The Request field may alias the passed in byte slice, +// so the byte slice passed in should not be reused before RawPacket.Reset(). +func (p *RequestPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error { + b, err := readPacket(r, b, maxPacketLength) + if err != nil { + return err + } + + return p.UnmarshalFrom(NewBuffer(b)) +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go new file mode 100644 index 0000000000..e6f692d9ff --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go @@ -0,0 +1,368 @@ +package filexfer + +// LStatPacket defines the SSH_FXP_LSTAT packet. +type LStatPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *LStatPacket) Type() PacketType { + return PacketTypeLStat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *LStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeLStat, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *LStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// SetstatPacket defines the SSH_FXP_SETSTAT packet. +type SetstatPacket struct { + Path string + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *SetstatPacket) Type() PacketType { + return PacketTypeSetstat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *SetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeSetstat, reqid) + buf.AppendString(p.Path) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *SetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// RemovePacket defines the SSH_FXP_REMOVE packet. +type RemovePacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RemovePacket) Type() PacketType { + return PacketTypeRemove +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RemovePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRemove, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RemovePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// MkdirPacket defines the SSH_FXP_MKDIR packet. +type MkdirPacket struct { + Path string + Attrs Attributes +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *MkdirPacket) Type() PacketType { + return PacketTypeMkdir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *MkdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeMkdir, reqid) + buf.AppendString(p.Path) + + p.Attrs.MarshalInto(buf) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *MkdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return p.Attrs.UnmarshalFrom(buf) +} + +// RmdirPacket defines the SSH_FXP_RMDIR packet. +type RmdirPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RmdirPacket) Type() PacketType { + return PacketTypeRmdir +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RmdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRmdir, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RmdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// RealPathPacket defines the SSH_FXP_REALPATH packet. +type RealPathPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RealPathPacket) Type() PacketType { + return PacketTypeRealPath +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RealPathPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRealPath, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RealPathPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// StatPacket defines the SSH_FXP_STAT packet. +type StatPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *StatPacket) Type() PacketType { + return PacketTypeStat +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *StatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeStat, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *StatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// RenamePacket defines the SSH_FXP_RENAME packet. +type RenamePacket struct { + OldPath string + NewPath string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *RenamePacket) Type() PacketType { + return PacketTypeRename +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *RenamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(oldpath) + string(newpath) + size := 4 + len(p.OldPath) + 4 + len(p.NewPath) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeRename, reqid) + buf.AppendString(p.OldPath) + buf.AppendString(p.NewPath) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *RenamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.OldPath, err = buf.ConsumeString(); err != nil { + return err + } + + if p.NewPath, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// ReadLinkPacket defines the SSH_FXP_READLINK packet. +type ReadLinkPacket struct { + Path string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *ReadLinkPacket) Type() PacketType { + return PacketTypeReadLink +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *ReadLinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + size := 4 + len(p.Path) // string(path) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeReadLink, reqid) + buf.AppendString(p.Path) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *ReadLinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + if p.Path, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} + +// SymlinkPacket defines the SSH_FXP_SYMLINK packet. +// +// The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed. +// Unfortunately, the reversal was not noticed until the server was widely deployed. +// Covered in Section 3.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL +type SymlinkPacket struct { + LinkPath string + TargetPath string +} + +// Type returns the SSH_FXP_xy value associated with this packet type. +func (p *SymlinkPacket) Type() PacketType { + return PacketTypeSymlink +} + +// MarshalPacket returns p as a two-part binary encoding of p. +func (p *SymlinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) { + buf := NewBuffer(b) + if buf.Cap() < 9 { + // string(targetpath) + string(linkpath) + size := 4 + len(p.TargetPath) + 4 + len(p.LinkPath) + buf = NewMarshalBuffer(size) + } + + buf.StartPacket(PacketTypeSymlink, reqid) + + // Arguments were inadvertently reversed. + buf.AppendString(p.TargetPath) + buf.AppendString(p.LinkPath) + + return buf.Packet(payload) +} + +// UnmarshalPacketBody unmarshals the packet body from the given Buffer. +// It is assumed that the uint32(request-id) has already been consumed. +func (p *SymlinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { + // Arguments were inadvertently reversed. + if p.TargetPath, err = buf.ConsumeString(); err != nil { + return err + } + + if p.LinkPath, err = buf.ConsumeString(); err != nil { + return err + } + + return nil +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go new file mode 100644 index 0000000000..2fe63d5916 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go @@ -0,0 +1,114 @@ +package filexfer + +// FileMode represents a file’s mode and permission bits. +// The bits are defined according to POSIX standards, +// and may not apply to the OS being built for. +type FileMode uint32 + +// Permission flags, defined here to avoid potential inconsistencies in individual OS implementations. +const ( + ModePerm FileMode = 0o0777 // S_IRWXU | S_IRWXG | S_IRWXO + ModeUserRead FileMode = 0o0400 // S_IRUSR + ModeUserWrite FileMode = 0o0200 // S_IWUSR + ModeUserExec FileMode = 0o0100 // S_IXUSR + ModeGroupRead FileMode = 0o0040 // S_IRGRP + ModeGroupWrite FileMode = 0o0020 // S_IWGRP + ModeGroupExec FileMode = 0o0010 // S_IXGRP + ModeOtherRead FileMode = 0o0004 // S_IROTH + ModeOtherWrite FileMode = 0o0002 // S_IWOTH + ModeOtherExec FileMode = 0o0001 // S_IXOTH + + ModeSetUID FileMode = 0o4000 // S_ISUID + ModeSetGID FileMode = 0o2000 // S_ISGID + ModeSticky FileMode = 0o1000 // S_ISVTX + + ModeType FileMode = 0xF000 // S_IFMT + ModeNamedPipe FileMode = 0x1000 // S_IFIFO + ModeCharDevice FileMode = 0x2000 // S_IFCHR + ModeDir FileMode = 0x4000 // S_IFDIR + ModeDevice FileMode = 0x6000 // S_IFBLK + ModeRegular FileMode = 0x8000 // S_IFREG + ModeSymlink FileMode = 0xA000 // S_IFLNK + ModeSocket FileMode = 0xC000 // S_IFSOCK +) + +// IsDir reports whether m describes a directory. +// That is, it tests for m.Type() == ModeDir. +func (m FileMode) IsDir() bool { + return (m & ModeType) == ModeDir +} + +// IsRegular reports whether m describes a regular file. +// That is, it tests for m.Type() == ModeRegular +func (m FileMode) IsRegular() bool { + return (m & ModeType) == ModeRegular +} + +// Perm returns the POSIX permission bits in m (m & ModePerm). +func (m FileMode) Perm() FileMode { + return (m & ModePerm) +} + +// Type returns the type bits in m (m & ModeType). +func (m FileMode) Type() FileMode { + return (m & ModeType) +} + +// String returns a `-rwxrwxrwx` style string representing the `ls -l` POSIX permissions string. +func (m FileMode) String() string { + var buf [10]byte + + switch m.Type() { + case ModeRegular: + buf[0] = '-' + case ModeDir: + buf[0] = 'd' + case ModeSymlink: + buf[0] = 'l' + case ModeDevice: + buf[0] = 'b' + case ModeCharDevice: + buf[0] = 'c' + case ModeNamedPipe: + buf[0] = 'p' + case ModeSocket: + buf[0] = 's' + default: + buf[0] = '?' + } + + const rwx = "rwxrwxrwx" + for i, c := range rwx { + if m&(1< 0 && pattern[0] == '*' { - pattern = pattern[1:] - star = true - } - inrange := false - var i int -Scan: - for i = 0; i < len(pattern); i++ { - switch pattern[i] { - case '\\': - - // error check handled in matchChunk: bad pattern. - if i+1 < len(pattern) { - i++ - } - case '[': - inrange = true - case ']': - inrange = false - case '*': - if !inrange { - break Scan - } - } - } - return star, pattern[0:i], pattern[i:] + return c == '/' } -// matchChunk checks whether chunk matches the beginning of s. -// If so, it returns the remainder of s (after the match). -// Chunk is all single-character operators: literals, char classes, and ?. -func matchChunk(chunk, s string) (rest string, ok bool, err error) { - for len(chunk) > 0 { - if len(s) == 0 { - return - } - switch chunk[0] { - case '[': - // character class - r, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - // We can't end right after '[', we're expecting at least - // a closing bracket and possibly a caret. - if len(chunk) == 0 { - err = ErrBadPattern - return - } - // possibly negated - negated := chunk[0] == '^' - if negated { - chunk = chunk[1:] - } - // parse all ranges - match := false - nrange := 0 - for { - if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { - chunk = chunk[1:] - break - } - var lo, hi rune - if lo, chunk, err = getEsc(chunk); err != nil { - return - } - hi = lo - if chunk[0] == '-' { - if hi, chunk, err = getEsc(chunk[1:]); err != nil { - return - } - } - if lo <= r && r <= hi { - match = true - } - nrange++ - } - if match == negated { - return - } - - case '?': - if isPathSeparator(s[0]) { - return - } - _, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - - case '\\': - chunk = chunk[1:] - if len(chunk) == 0 { - err = ErrBadPattern - return - } - fallthrough - - default: - if chunk[0] != s[0] { - return - } - s = s[1:] - chunk = chunk[1:] - } - } - return s, true, nil -} - -// getEsc gets a possibly-escaped character from chunk, for a character class. -func getEsc(chunk string) (r rune, nchunk string, err error) { - if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { - err = ErrBadPattern - return - } - if chunk[0] == '\\' { - chunk = chunk[1:] - if len(chunk) == 0 { - err = ErrBadPattern - return - } - } - r, n := utf8.DecodeRuneInString(chunk) - if r == utf8.RuneError && n == 1 { - err = ErrBadPattern - } - nchunk = chunk[n:] - if len(nchunk) == 0 { - err = ErrBadPattern - } - return -} - -// Split splits path immediately following the final Separator, +// Split splits the path p immediately following the final slash, // separating it into a directory and file name component. -// If there is no Separator in path, Split returns an empty dir -// and file set to path. -// The returned values have the property that path = dir+file. -func Split(path string) (dir, file string) { - i := len(path) - 1 - for i >= 0 && !isPathSeparator(path[i]) { - i-- - } - return path[:i+1], path[i+1:] +// +// This is an alias for path.Split from the standard library, +// offered so that callers need not import the path package. +// For details, see https://golang.org/pkg/path/#Split. +func Split(p string) (dir, file string) { + return path.Split(p) } // Glob returns the names of all files matching pattern or nil // if there is no matching file. The syntax of patterns is the same // as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). +// /usr/*/bin/ed. // // Glob ignores file system errors such as I/O errors reading directories. // The only possible returned error is ErrBadPattern, when pattern @@ -241,8 +82,7 @@ func cleanGlobPath(path string) string { switch path { case "": return "." - case string(separator): - // do nothing to the path + case "/": return path default: return path[0 : len(path)-1] // chop off trailing separator @@ -280,9 +120,12 @@ func (c *Client) glob(dir, pattern string, matches []string) (m []string, e erro return } -// Join joins any number of path elements into a single path, adding -// a Separator if necessary. -// all empty strings are ignored. +// Join joins any number of path elements into a single path, separating +// them with slashes. +// +// This is an alias for path.Join from the standard library, +// offered so that callers need not import the path package. +// For details, see https://golang.org/pkg/path/#Join. func Join(elem ...string) string { return path.Join(elem...) } @@ -290,6 +133,5 @@ func Join(elem ...string) string { // hasMeta reports whether path contains any of the magic characters // recognized by Match. func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.ContainsAny(path, "*?[") + return strings.ContainsAny(path, "\\*?[") } diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/packet-typing.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/packet-typing.go index da5c2bc687..f4f9052950 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/packet-typing.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/packet-typing.go @@ -2,8 +2,7 @@ package sftp import ( "encoding" - - "github.com/pkg/errors" + "fmt" ) // all incoming packets @@ -125,7 +124,7 @@ func makePacket(p rxPacket) (requestPacket, error) { case sshFxpExtended: pkt = &sshFxpExtendedPacket{} default: - return nil, errors.Errorf("unhandled packet type: %s", p.pktType) + return nil, fmt.Errorf("unhandled packet type: %s", p.pktType) } if err := pkt.UnmarshalBinary(p.pktBytes); err != nil { // Return partially unpacked packet to allow callers to return diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/packet.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/packet.go index 4a6863550d..4059cf8e0a 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/packet.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/packet.go @@ -4,12 +4,11 @@ import ( "bytes" "encoding" "encoding/binary" + "errors" "fmt" "io" "os" "reflect" - - "github.com/pkg/errors" ) var ( @@ -38,6 +37,50 @@ func marshalString(b []byte, v string) []byte { return append(marshalUint32(b, uint32(len(v))), v...) } +func marshalFileInfo(b []byte, fi os.FileInfo) []byte { + // attributes variable struct, and also variable per protocol version + // spec version 3 attributes: + // uint32 flags + // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE + // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS + // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED + // string extended_type + // string extended_data + // ... more extended data (extended_type - extended_data pairs), + // so that number of pairs equals extended_count + + flags, fileStat := fileStatFromInfo(fi) + + b = marshalUint32(b, flags) + if flags&sshFileXferAttrSize != 0 { + b = marshalUint64(b, fileStat.Size) + } + if flags&sshFileXferAttrUIDGID != 0 { + b = marshalUint32(b, fileStat.UID) + b = marshalUint32(b, fileStat.GID) + } + if flags&sshFileXferAttrPermissions != 0 { + b = marshalUint32(b, fileStat.Mode) + } + if flags&sshFileXferAttrACmodTime != 0 { + b = marshalUint32(b, fileStat.Atime) + b = marshalUint32(b, fileStat.Mtime) + } + + return b +} + +func marshalStatus(b []byte, err StatusError) []byte { + b = marshalUint32(b, err.Code) + b = marshalString(b, err.msg) + b = marshalString(b, err.lang) + return b +} + func marshal(b []byte, v interface{}) []byte { if v == nil { return b @@ -116,6 +159,63 @@ func unmarshalStringSafe(b []byte) (string, []byte, error) { return string(b[:n]), b[n:], nil } +func unmarshalAttrs(b []byte) (*FileStat, []byte) { + flags, b := unmarshalUint32(b) + return unmarshalFileStat(flags, b) +} + +func unmarshalFileStat(flags uint32, b []byte) (*FileStat, []byte) { + var fs FileStat + if flags&sshFileXferAttrSize == sshFileXferAttrSize { + fs.Size, b, _ = unmarshalUint64Safe(b) + } + if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { + fs.UID, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { + fs.GID, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrPermissions == sshFileXferAttrPermissions { + fs.Mode, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrACmodTime == sshFileXferAttrACmodTime { + fs.Atime, b, _ = unmarshalUint32Safe(b) + fs.Mtime, b, _ = unmarshalUint32Safe(b) + } + if flags&sshFileXferAttrExtended == sshFileXferAttrExtended { + var count uint32 + count, b, _ = unmarshalUint32Safe(b) + ext := make([]StatExtended, count) + for i := uint32(0); i < count; i++ { + var typ string + var data string + typ, b, _ = unmarshalStringSafe(b) + data, b, _ = unmarshalStringSafe(b) + ext[i] = StatExtended{ + ExtType: typ, + ExtData: data, + } + } + fs.Extended = ext + } + return &fs, b +} + +func unmarshalStatus(id uint32, data []byte) error { + sid, data := unmarshalUint32(data) + if sid != id { + return &unexpectedIDErr{id, sid} + } + code, data := unmarshalUint32(data) + msg, data, _ := unmarshalStringSafe(data) + lang, _, _ := unmarshalStringSafe(data) + return &StatusError{ + Code: code, + msg: msg, + lang: lang, + } +} + type packetMarshaler interface { marshalPacket() (header, payload []byte, err error) } @@ -133,7 +233,7 @@ func marshalPacket(m encoding.BinaryMarshaler) (header, payload []byte, err erro func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { header, payload, err := marshalPacket(m) if err != nil { - return errors.Errorf("binary marshaller failed: %v", err) + return fmt.Errorf("binary marshaller failed: %w", err) } length := len(header) + len(payload) - 4 // subtract the uint32(length) from the start @@ -146,12 +246,12 @@ func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { binary.BigEndian.PutUint32(header[:4], uint32(length)) if _, err := w.Write(header); err != nil { - return errors.Errorf("failed to send packet: %v", err) + return fmt.Errorf("failed to send packet: %w", err) } if len(payload) > 0 { if _, err := w.Write(payload); err != nil { - return errors.Errorf("failed to send packet payload: %v", err) + return fmt.Errorf("failed to send packet payload: %w", err) } } @@ -639,12 +739,17 @@ func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { const dataHeaderLen = 4 + 1 + 4 + 4 func (p *sshFxpReadPacket) getDataSlice(alloc *allocator, orderID uint32) []byte { - dataLen := clamp(p.Len, maxTxPacket) + dataLen := p.Len + if dataLen > maxTxPacket { + dataLen = maxTxPacket + } + if alloc != nil { // GetPage returns a slice with capacity = maxMsgLength this is enough to avoid new allocations in // sshFxpDataPacket.MarshalBinary return alloc.GetPage(orderID)[:dataLen] } + // allocate with extra space for the header return make([]byte, dataLen, dataLen+dataHeaderLen) } @@ -1017,6 +1122,7 @@ func (p *StatVFS) marshalPacket() ([]byte, []byte, error) { return header, buf.Bytes(), err } +// MarshalBinary encodes the StatVFS as an SSH_FXP_EXTENDED_REPLY packet. func (p *StatVFS) MarshalBinary() ([]byte, error) { header, payload, err := p.marshalPacket() return append(header, payload...), err @@ -1086,7 +1192,7 @@ func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { case "hardlink@openssh.com": p.SpecificPacket = &sshFxpExtendedPacketHardlink{} default: - return errors.Wrapf(errUnknownExtendedPacket, "packet type %v", p.SpecificPacket) + return fmt.Errorf("packet type %v: %w", p.SpecificPacket, errUnknownExtendedPacket) } return p.SpecificPacket.UnmarshalBinary(bOrig) @@ -1136,7 +1242,7 @@ func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error { } func (p *sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket { - err := os.Rename(p.Oldpath, p.Newpath) + err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) return statusFromError(p.ID, err) } @@ -1165,6 +1271,6 @@ func (p *sshFxpExtendedPacketHardlink) UnmarshalBinary(b []byte) error { } func (p *sshFxpExtendedPacketHardlink) respond(s *Server) responsePacket { - err := os.Link(p.Oldpath, p.Newpath) + err := os.Link(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) return statusFromError(p.ID, err) } diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/pool.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/pool.go new file mode 100644 index 0000000000..3612629065 --- /dev/null +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/pool.go @@ -0,0 +1,79 @@ +package sftp + +// bufPool provides a pool of byte-slices to be reused in various parts of the package. +// It is safe to use concurrently through a pointer. +type bufPool struct { + ch chan []byte + blen int +} + +func newBufPool(depth, bufLen int) *bufPool { + return &bufPool{ + ch: make(chan []byte, depth), + blen: bufLen, + } +} + +func (p *bufPool) Get() []byte { + if p.blen <= 0 { + panic("bufPool: new buffer creation length must be greater than zero") + } + + for { + select { + case b := <-p.ch: + if cap(b) < p.blen { + // just in case: throw away any buffer with insufficient capacity. + continue + } + + return b[:p.blen] + + default: + return make([]byte, p.blen) + } + } +} + +func (p *bufPool) Put(b []byte) { + if p == nil { + // functional default: no reuse. + return + } + + if cap(b) < p.blen || cap(b) > p.blen*2 { + // DO NOT reuse buffers with insufficient capacity. + // This could cause panics when resizing to p.blen. + + // DO NOT reuse buffers with excessive capacity. + // This could cause memory leaks. + return + } + + select { + case p.ch <- b: + default: + } +} + +type resChanPool chan chan result + +func newResChanPool(depth int) resChanPool { + return make(chan chan result, depth) +} + +func (p resChanPool) Get() chan result { + select { + case ch := <-p: + return ch + default: + return make(chan result, 1) + } +} + +func (p resChanPool) Put(ch chan result) { + select { + case p <- ch: + default: + } +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-attrs.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-attrs.go index 7c2e5c1223..b5c95b4ad6 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-attrs.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-attrs.go @@ -58,6 +58,6 @@ func (a FileStat) FileMode() os.FileMode { // Attributes parses file attributes byte blob and return them in a // FileStat object. func (r *Request) Attributes() *FileStat { - fs, _ := getFileStat(r.Flags, r.Attrs) + fs, _ := unmarshalFileStat(r.Flags, r.Attrs) return fs } diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-errors.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-errors.go index e4b4b86dc8..6505b5c74f 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-errors.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-errors.go @@ -37,18 +37,18 @@ func (e fxerr) Error() string { case ErrSSHFxEOF: return "EOF" case ErrSSHFxNoSuchFile: - return "No Such File" + return "no such file" case ErrSSHFxPermissionDenied: - return "Permission Denied" + return "permission denied" case ErrSSHFxBadMessage: - return "Bad Message" + return "bad message" case ErrSSHFxNoConnection: - return "No Connection" + return "no connection" case ErrSSHFxConnectionLost: - return "Connection Lost" + return "connection lost" case ErrSSHFxOpUnsupported: - return "Operation Unsupported" + return "operation unsupported" default: - return "Failure" + return "failure" } } diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-example.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-example.go index ab6c675ec8..ba22bcd0f8 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-example.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-example.go @@ -464,10 +464,19 @@ func (fs *root) Lstat(r *Request) (ListerAt, error) { return listerat{file}, nil } +// implements RealpathFileLister interface +func (fs *root) Realpath(p string) string { + if fs.startDirectory == "" || fs.startDirectory == "/" { + return cleanPath(p) + } + return cleanPathWithBase(fs.startDirectory, p) +} + // In memory file-system-y thing that the Hanlders live on type root struct { - rootFile *memFile - mockErr error + rootFile *memFile + mockErr error + startDirectory string mu sync.Mutex files map[string]*memFile diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-interfaces.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-interfaces.go index 4529d99897..e5dc49bb19 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-interfaces.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-interfaces.go @@ -86,6 +86,25 @@ type LstatFileLister interface { Lstat(*Request) (ListerAt, error) } +// RealPathFileLister is a FileLister that implements the Realpath method. +// We use "/" as start directory for relative paths, implementing this +// interface you can customize the start directory. +// You have to return an absolute POSIX path. +// +// Deprecated: if you want to set a start directory use WithStartDirectory RequestServerOption instead. +type RealPathFileLister interface { + FileLister + RealPath(string) string +} + +// NameLookupFileLister is a FileLister that implmeents the LookupUsername and LookupGroupName methods. +// If this interface is implemented, then longname ls formatting will use these to convert usernames and groupnames. +type NameLookupFileLister interface { + FileLister + LookupUserName(string) string + LookupGroupName(string) string +} + // ListerAt does for file lists what io.ReaderAt does for files. // ListAt should return the number of entries copied and an io.EOF // error if at end of list. This is testable by comparing how many you diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-plan9.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-plan9.go index 0074e8a3d0..2444da593d 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-plan9.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-plan9.go @@ -2,7 +2,11 @@ package sftp -import "syscall" +import ( + "path" + "path/filepath" + "syscall" +) func fakeFileInfoSys() interface{} { return &syscall.Dir{} @@ -11,3 +15,20 @@ func fakeFileInfoSys() interface{} { func testOsSys(sys interface{}) error { return nil } + +func toLocalPath(p string) string { + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp[1:] + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/#s/boot" to "#s/boot" + return tmp + } + } + + return lp +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-server.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-server.go index 0caafc93cb..b7dadd6c1b 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-server.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-server.go @@ -2,13 +2,12 @@ package sftp import ( "context" + "errors" "io" "path" "path/filepath" "strconv" "sync" - - "github.com/pkg/errors" ) var maxTxPacket uint32 = 1 << 15 @@ -23,12 +22,16 @@ type Handlers struct { // RequestServer abstracts the sftp protocol with an http request-like protocol type RequestServer struct { + Handlers Handlers + *serverConn - Handlers Handlers - pktMgr *packetManager - openRequests map[string]*Request - openRequestLock sync.RWMutex - handleCount int + pktMgr *packetManager + + startDirectory string + + mu sync.RWMutex + handleCount int + openRequests map[string]*Request } // A RequestServerOption is a function which applies configuration to a RequestServer. @@ -46,6 +49,14 @@ func WithRSAllocator() RequestServerOption { } } +// WithStartDirectory sets a start directory to use as base for relative paths. +// If unset the default is "/" +func WithStartDirectory(startDirectory string) RequestServerOption { + return func(rs *RequestServer) { + rs.startDirectory = cleanPath(startDirectory) + } +} + // NewRequestServer creates/allocates/returns new RequestServer. // Normally there will be one server per user-session. func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServerOption) *RequestServer { @@ -56,9 +67,13 @@ func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServ }, } rs := &RequestServer{ - serverConn: svrConn, - Handlers: h, - pktMgr: newPktMgr(svrConn), + Handlers: h, + + serverConn: svrConn, + pktMgr: newPktMgr(svrConn), + + startDirectory: "/", + openRequests: make(map[string]*Request), } @@ -70,13 +85,15 @@ func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServ // New Open packet/Request func (rs *RequestServer) nextRequest(r *Request) string { - rs.openRequestLock.Lock() - defer rs.openRequestLock.Unlock() + rs.mu.Lock() + defer rs.mu.Unlock() + rs.handleCount++ - handle := strconv.Itoa(rs.handleCount) - r.handle = handle - rs.openRequests[handle] = r - return handle + + r.handle = strconv.Itoa(rs.handleCount) + rs.openRequests[r.handle] = r + + return r.handle } // Returns Request from openRequests, bool is false if it is missing. @@ -85,20 +102,23 @@ func (rs *RequestServer) nextRequest(r *Request) string { // you can do different things with. What you are doing with it are denoted by // the first packet of that type (read/write/etc). func (rs *RequestServer) getRequest(handle string) (*Request, bool) { - rs.openRequestLock.RLock() - defer rs.openRequestLock.RUnlock() + rs.mu.RLock() + defer rs.mu.RUnlock() + r, ok := rs.openRequests[handle] return r, ok } // Close the Request and clear from openRequests map func (rs *RequestServer) closeRequest(handle string) error { - rs.openRequestLock.Lock() - defer rs.openRequestLock.Unlock() + rs.mu.Lock() + defer rs.mu.Unlock() + if r, ok := rs.openRequests[handle]; ok { delete(rs.openRequests, handle) return r.close() } + return EBADF } @@ -122,8 +142,8 @@ func (rs *RequestServer) serveLoop(pktChan chan<- orderedRequest) error { pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) if err != nil { - switch errors.Cause(err) { - case errUnknownExtendedPacket: + switch { + case errors.Is(err, errUnknownExtendedPacket): // do nothing default: debug("makePacket err: %v", err) @@ -143,8 +163,10 @@ func (rs *RequestServer) Serve() error { rs.pktMgr.alloc.Free() } }() + ctx, cancel := context.WithCancel(context.Background()) defer cancel() + var wg sync.WaitGroup runWorker := func(ch chan orderedRequest) { wg.Add(1) @@ -161,8 +183,8 @@ func (rs *RequestServer) Serve() error { wg.Wait() // wait for all workers to exit - rs.openRequestLock.Lock() - defer rs.openRequestLock.Unlock() + rs.mu.Lock() + defer rs.mu.Unlock() // make sure all open requests are properly closed // (eg. possible on dropped connections, client crashes, etc.) @@ -179,9 +201,7 @@ func (rs *RequestServer) Serve() error { return err } -func (rs *RequestServer) packetWorker( - ctx context.Context, pktChan chan orderedRequest, -) error { +func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedRequest) error { for pkt := range pktChan { orderID := pkt.orderID() if epkt, ok := pkt.requestPacket.(*sshFxpExtendedPacket); ok { @@ -198,9 +218,15 @@ func (rs *RequestServer) packetWorker( handle := pkt.getHandle() rpkt = statusFromError(pkt.ID, rs.closeRequest(handle)) case *sshFxpRealpathPacket: - rpkt = cleanPacketPath(pkt) + var realPath string + if realPather, ok := rs.Handlers.FileList.(RealPathFileLister); ok { + realPath = realPather.RealPath(pkt.getPath()) + } else { + realPath = cleanPathWithBase(rs.startDirectory, pkt.getPath()) + } + rpkt = cleanPacketPath(pkt, realPath) case *sshFxpOpendirPacket: - request := requestFromPacket(ctx, pkt) + request := requestFromPacket(ctx, pkt, rs.startDirectory) handle := rs.nextRequest(request) rpkt = request.opendir(rs.Handlers, pkt) if _, ok := rpkt.(*sshFxpHandlePacket); !ok { @@ -208,7 +234,7 @@ func (rs *RequestServer) packetWorker( rs.closeRequest(handle) } case *sshFxpOpenPacket: - request := requestFromPacket(ctx, pkt) + request := requestFromPacket(ctx, pkt, rs.startDirectory) handle := rs.nextRequest(request) rpkt = request.open(rs.Handlers, pkt) if _, ok := rpkt.(*sshFxpHandlePacket); !ok { @@ -221,7 +247,10 @@ func (rs *RequestServer) packetWorker( if !ok { rpkt = statusFromError(pkt.ID, EBADF) } else { - request = NewRequest("Stat", request.Filepath) + request = &Request{ + Method: "Stat", + Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), + } rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) } case *sshFxpFsetstatPacket: @@ -230,15 +259,24 @@ func (rs *RequestServer) packetWorker( if !ok { rpkt = statusFromError(pkt.ID, EBADF) } else { - request = NewRequest("Setstat", request.Filepath) + request = &Request{ + Method: "Setstat", + Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath), + } rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) } case *sshFxpExtendedPacketPosixRename: - request := NewRequest("PosixRename", pkt.Oldpath) - request.Target = pkt.Newpath + request := &Request{ + Method: "PosixRename", + Filepath: cleanPathWithBase(rs.startDirectory, pkt.Oldpath), + Target: cleanPathWithBase(rs.startDirectory, pkt.Newpath), + } rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) case *sshFxpExtendedPacketStatVFS: - request := NewRequest("StatVFS", pkt.Path) + request := &Request{ + Method: "StatVFS", + Filepath: cleanPathWithBase(rs.startDirectory, pkt.Path), + } rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) case hasHandle: handle := pkt.getHandle() @@ -249,7 +287,7 @@ func (rs *RequestServer) packetWorker( rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) } case hasPath: - request := requestFromPacket(ctx, pkt) + request := requestFromPacket(ctx, pkt, rs.startDirectory) rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID) request.close() default: @@ -263,14 +301,13 @@ func (rs *RequestServer) packetWorker( } // clean and return name packet for file -func cleanPacketPath(pkt *sshFxpRealpathPacket) responsePacket { - path := cleanPath(pkt.getPath()) +func cleanPacketPath(pkt *sshFxpRealpathPacket, realPath string) responsePacket { return &sshFxpNamePacket{ ID: pkt.id(), NameAttrs: []*sshFxpNameAttr{ { - Name: path, - LongName: path, + Name: realPath, + LongName: realPath, Attrs: emptyFileStat, }, }, @@ -279,9 +316,13 @@ func cleanPacketPath(pkt *sshFxpRealpathPacket) responsePacket { // Makes sure we have a clean POSIX (/) absolute path to work with func cleanPath(p string) string { - p = filepath.ToSlash(p) + return cleanPathWithBase("/", p) +} + +func cleanPathWithBase(base, p string) string { + p = filepath.ToSlash(filepath.Clean(p)) if !path.IsAbs(p) { - p = "/" + p + return path.Join(base, p) } - return path.Clean(p) + return p } diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-unix.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-unix.go index d30b256917..50b08a38d3 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-unix.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request-unix.go @@ -21,3 +21,7 @@ func testOsSys(sys interface{}) error { } return nil } + +func toLocalPath(p string) string { + return p +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request.go index d6851ff180..116c27aab9 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request.go @@ -2,20 +2,125 @@ package sftp import ( "context" + "errors" + "fmt" "io" "os" - "path" - "path/filepath" "strings" "sync" "syscall" - - "github.com/pkg/errors" ) // MaxFilelist is the max number of files to return in a readdir batch. var MaxFilelist int64 = 100 +// state encapsulates the reader/writer/readdir from handlers. +type state struct { + mu sync.RWMutex + + writerAt io.WriterAt + readerAt io.ReaderAt + writerAtReaderAt WriterAtReaderAt + listerAt ListerAt + lsoffset int64 +} + +// copy returns a shallow copy the state. +// This is broken out to specific fields, +// because we have to copy around the mutex in state. +func (s *state) copy() state { + s.mu.RLock() + defer s.mu.RUnlock() + + return state{ + writerAt: s.writerAt, + readerAt: s.readerAt, + writerAtReaderAt: s.writerAtReaderAt, + listerAt: s.listerAt, + lsoffset: s.lsoffset, + } +} + +func (s *state) setReaderAt(rd io.ReaderAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.readerAt = rd +} + +func (s *state) getReaderAt() io.ReaderAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.readerAt +} + +func (s *state) setWriterAt(rd io.WriterAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.writerAt = rd +} + +func (s *state) getWriterAt() io.WriterAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.writerAt +} + +func (s *state) setWriterAtReaderAt(rw WriterAtReaderAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.writerAtReaderAt = rw +} + +func (s *state) getWriterAtReaderAt() WriterAtReaderAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.writerAtReaderAt +} + +func (s *state) getAllReaderWriters() (io.ReaderAt, io.WriterAt, WriterAtReaderAt) { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.readerAt, s.writerAt, s.writerAtReaderAt +} + +// Returns current offset for file list +func (s *state) lsNext() int64 { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.lsoffset +} + +// Increases next offset +func (s *state) lsInc(offset int64) { + s.mu.Lock() + defer s.mu.Unlock() + + s.lsoffset += offset +} + +// manage file read/write state +func (s *state) setListerAt(la ListerAt) { + s.mu.Lock() + defer s.mu.Unlock() + + s.listerAt = la +} + +func (s *state) getListerAt() ListerAt { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.listerAt +} + // Request contains the data and state for the incoming service request. type Request struct { // Get, Put, Setstat, Stat, Rename, Remove @@ -26,26 +131,48 @@ type Request struct { Attrs []byte // convert to sub-struct Target string // for renames and sym-links handle string + // reader/writer/readdir from handlers - state state + state + // context lasts duration of request ctx context.Context cancelCtx context.CancelFunc } -type state struct { - *sync.RWMutex - writerAt io.WriterAt - readerAt io.ReaderAt - writerReaderAt WriterAtReaderAt - listerAt ListerAt - lsoffset int64 +// NewRequest creates a new Request object. +func NewRequest(method, path string) *Request { + return &Request{ + Method: method, + Filepath: cleanPath(path), + } +} + +// copy returns a shallow copy of existing request. +// This is broken out to specific fields, +// because we have to copy around the mutex in state. +func (r *Request) copy() *Request { + return &Request{ + Method: r.Method, + Filepath: r.Filepath, + Flags: r.Flags, + Attrs: r.Attrs, + Target: r.Target, + handle: r.handle, + + state: r.state.copy(), + + ctx: r.ctx, + cancelCtx: r.cancelCtx, + } } // New Request initialized based on packet data -func requestFromPacket(ctx context.Context, pkt hasPath) *Request { - method := requestMethod(pkt) - request := NewRequest(method, pkt.getPath()) +func requestFromPacket(ctx context.Context, pkt hasPath, baseDir string) *Request { + request := &Request{ + Method: requestMethod(pkt), + Filepath: cleanPathWithBase(baseDir, pkt.getPath()), + } request.ctx, request.cancelCtx = context.WithCancel(ctx) switch p := pkt.(type) { @@ -55,32 +182,17 @@ func requestFromPacket(ctx context.Context, pkt hasPath) *Request { request.Flags = p.Flags request.Attrs = p.Attrs.([]byte) case *sshFxpRenamePacket: - request.Target = cleanPath(p.Newpath) + request.Target = cleanPathWithBase(baseDir, p.Newpath) case *sshFxpSymlinkPacket: // NOTE: given a POSIX compliant signature: symlink(target, linkpath string) // this makes Request.Target the linkpath, and Request.Filepath the target. - request.Target = cleanPath(p.Linkpath) + request.Target = cleanPathWithBase(baseDir, p.Linkpath) case *sshFxpExtendedPacketHardlink: - request.Target = cleanPath(p.Newpath) + request.Target = cleanPathWithBase(baseDir, p.Newpath) } return request } -// NewRequest creates a new Request object. -func NewRequest(method, path string) *Request { - return &Request{Method: method, Filepath: cleanPath(path), - state: state{RWMutex: new(sync.RWMutex)}} -} - -// shallow copy of existing request -func (r *Request) copy() *Request { - r.state.Lock() - defer r.state.Unlock() - r2 := new(Request) - *r2 = *r - return r2 -} - // Context returns the request's context. To change the context, // use WithContext. // @@ -108,33 +220,6 @@ func (r *Request) WithContext(ctx context.Context) *Request { return r2 } -// Returns current offset for file list -func (r *Request) lsNext() int64 { - r.state.RLock() - defer r.state.RUnlock() - return r.state.lsoffset -} - -// Increases next offset -func (r *Request) lsInc(offset int64) { - r.state.Lock() - defer r.state.Unlock() - r.state.lsoffset = r.state.lsoffset + offset -} - -// manage file read/write state -func (r *Request) setListerState(la ListerAt) { - r.state.Lock() - defer r.state.Unlock() - r.state.listerAt = la -} - -func (r *Request) getLister() ListerAt { - r.state.RLock() - defer r.state.RUnlock() - return r.state.listerAt -} - // Close reader/writer if possible func (r *Request) close() error { defer func() { @@ -143,11 +228,7 @@ func (r *Request) close() error { } }() - r.state.RLock() - wr := r.state.writerAt - rd := r.state.readerAt - rw := r.state.writerReaderAt - r.state.RUnlock() + rd, wr, rw := r.getAllReaderWriters() var err error @@ -164,7 +245,8 @@ func (r *Request) close() error { if err2 := c.Close(); err == nil { // update error if it is still nil err = err2 - r.state.writerReaderAt = nil + + r.setWriterAtReaderAt(nil) } } @@ -184,11 +266,7 @@ func (r *Request) transferError(err error) { return } - r.state.RLock() - wr := r.state.writerAt - rd := r.state.readerAt - rw := r.state.writerReaderAt - r.state.RUnlock() + rd, wr, rw := r.getAllReaderWriters() if t, ok := wr.(TransferError); ok { t.TransferError(err) @@ -219,8 +297,7 @@ func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, o case "Stat", "Lstat", "Readlink": return filestat(handlers.FileList, r, pkt) default: - return statusFromError(pkt.id(), - errors.Errorf("unexpected method: %s", r.Method)) + return statusFromError(pkt.id(), fmt.Errorf("unexpected method: %s", r.Method)) } } @@ -239,8 +316,13 @@ func (r *Request) open(h Handlers, pkt requestPacket) responsePacket { if err != nil { return statusFromError(id, err) } - r.state.writerReaderAt = rw - return &sshFxpHandlePacket{ID: id, Handle: r.handle} + + r.setWriterAtReaderAt(rw) + + return &sshFxpHandlePacket{ + ID: id, + Handle: r.handle, + } } } @@ -249,18 +331,26 @@ func (r *Request) open(h Handlers, pkt requestPacket) responsePacket { if err != nil { return statusFromError(id, err) } - r.state.writerAt = wr + + r.setWriterAt(wr) + case flags.Read: r.Method = "Get" rd, err := h.FileGet.Fileread(r) if err != nil { return statusFromError(id, err) } - r.state.readerAt = rd + + r.setReaderAt(rd) + default: return statusFromError(id, errors.New("bad file flags")) } - return &sshFxpHandlePacket{ID: id, Handle: r.handle} + + return &sshFxpHandlePacket{ + ID: id, + Handle: r.handle, + } } func (r *Request) opendir(h Handlers, pkt requestPacket) responsePacket { @@ -269,25 +359,30 @@ func (r *Request) opendir(h Handlers, pkt requestPacket) responsePacket { if err != nil { return statusFromError(pkt.id(), wrapPathError(r.Filepath, err)) } - r.state.listerAt = la - return &sshFxpHandlePacket{ID: pkt.id(), Handle: r.handle} + + r.setListerAt(la) + + return &sshFxpHandlePacket{ + ID: pkt.id(), + Handle: r.handle, + } } // wrap FileReader handler func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { - r.state.RLock() - reader := r.state.readerAt - r.state.RUnlock() - if reader == nil { + rd := r.getReaderAt() + if rd == nil { return statusFromError(pkt.id(), errors.New("unexpected read packet")) } data, offset, _ := packetData(pkt, alloc, orderID) - n, err := reader.ReadAt(data, offset) + + n, err := rd.ReadAt(data, offset) // only return EOF error if no data left to read if err != nil && (err != io.EOF || n == 0) { return statusFromError(pkt.id(), err) } + return &sshFxpDataPacket{ ID: pkt.id(), Length: uint32(n), @@ -297,43 +392,46 @@ func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orde // wrap FileWriter handler func fileput(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { - r.state.RLock() - writer := r.state.writerAt - r.state.RUnlock() - if writer == nil { + wr := r.getWriterAt() + if wr == nil { return statusFromError(pkt.id(), errors.New("unexpected write packet")) } data, offset, _ := packetData(pkt, alloc, orderID) - _, err := writer.WriteAt(data, offset) + + _, err := wr.WriteAt(data, offset) return statusFromError(pkt.id(), err) } // wrap OpenFileWriter handler func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket { - r.state.RLock() - writerReader := r.state.writerReaderAt - r.state.RUnlock() - if writerReader == nil { + rw := r.getWriterAtReaderAt() + if rw == nil { return statusFromError(pkt.id(), errors.New("unexpected write and read packet")) } + switch p := pkt.(type) { case *sshFxpReadPacket: data, offset := p.getDataSlice(alloc, orderID), int64(p.Offset) - n, err := writerReader.ReadAt(data, offset) + + n, err := rw.ReadAt(data, offset) // only return EOF error if no data left to read if err != nil && (err != io.EOF || n == 0) { return statusFromError(pkt.id(), err) } + return &sshFxpDataPacket{ ID: pkt.id(), Length: uint32(n), Data: data[:n], } + case *sshFxpWritePacket: data, offset := p.Data, int64(p.Offset) - _, err := writerReader.WriteAt(data, offset) + + _, err := rw.WriteAt(data, offset) return statusFromError(pkt.id(), err) + default: return statusFromError(pkt.id(), errors.New("unexpected packet type for read or write")) } @@ -358,7 +456,8 @@ func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket { r.Attrs = p.Attrs.([]byte) } - if r.Method == "PosixRename" { + switch r.Method { + case "PosixRename": if posixRenamer, ok := h.(PosixRenameFileCmder); ok { err := posixRenamer.PosixRename(r) return statusFromError(pkt.id(), err) @@ -368,9 +467,8 @@ func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket { r.Method = "Rename" err := h.Filecmd(r) return statusFromError(pkt.id(), err) - } - if r.Method == "StatVFS" { + case "StatVFS": if statVFSCmdr, ok := h.(StatVFSFileCmder); ok { stat, err := statVFSCmdr.StatVFS(r) if err != nil { @@ -389,8 +487,7 @@ func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket { // wrap FileLister handler func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket { - var err error - lister := r.getLister() + lister := r.getListerAt() if lister == nil { return statusFromError(pkt.id(), errors.New("unexpected dir packet")) } @@ -404,25 +501,31 @@ func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket { switch r.Method { case "List": - if err != nil && err != io.EOF { + if err != nil && (err != io.EOF || n == 0) { return statusFromError(pkt.id(), err) } - if err == io.EOF && n == 0 { - return statusFromError(pkt.id(), io.EOF) - } - dirname := filepath.ToSlash(path.Base(r.Filepath)) - ret := &sshFxpNamePacket{ID: pkt.id()} + + nameAttrs := make([]*sshFxpNameAttr, 0, len(finfo)) + + // If the type conversion fails, we get untyped `nil`, + // which is handled by not looking up any names. + idLookup, _ := h.(NameLookupFileLister) for _, fi := range finfo { - ret.NameAttrs = append(ret.NameAttrs, &sshFxpNameAttr{ + nameAttrs = append(nameAttrs, &sshFxpNameAttr{ Name: fi.Name(), - LongName: runLs(dirname, fi), + LongName: runLs(idLookup, fi), Attrs: []interface{}{fi}, }) } - return ret + + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: nameAttrs, + } + default: - err = errors.Errorf("unexpected method: %s", r.Method) + err = fmt.Errorf("unexpected method: %s", r.Method) return statusFromError(pkt.id(), err) } } @@ -455,8 +558,11 @@ func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket { return statusFromError(pkt.id(), err) } if n == 0 { - err = &os.PathError{Op: strings.ToLower(r.Method), Path: r.Filepath, - Err: syscall.ENOENT} + err = &os.PathError{ + Op: strings.ToLower(r.Method), + Path: r.Filepath, + Err: syscall.ENOENT, + } return statusFromError(pkt.id(), err) } return &sshFxpStatResponse{ @@ -468,8 +574,11 @@ func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket { return statusFromError(pkt.id(), err) } if n == 0 { - err = &os.PathError{Op: "readlink", Path: r.Filepath, - Err: syscall.ENOENT} + err = &os.PathError{ + Op: "readlink", + Path: r.Filepath, + Err: syscall.ENOENT, + } return statusFromError(pkt.id(), err) } filename := finfo[0].Name() @@ -484,7 +593,7 @@ func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket { }, } default: - err = errors.Errorf("unexpected method: %s", r.Method) + err = fmt.Errorf("unexpected method: %s", r.Method) return statusFromError(pkt.id(), err) } } diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request_windows.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request_windows.go index 94d306b6e9..1f6d3df170 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request_windows.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/request_windows.go @@ -1,6 +1,10 @@ package sftp -import "syscall" +import ( + "path" + "path/filepath" + "syscall" +) func fakeFileInfoSys() interface{} { return syscall.Win32FileAttributeData{} @@ -9,3 +13,32 @@ func fakeFileInfoSys() interface{} { func testOsSys(sys interface{}) error { return nil } + +func toLocalPath(p string) string { + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp + for len(tmp) > 0 && tmp[0] == '\\' { + tmp = tmp[1:] + } + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/C:/Windows" to "C:\\Windows" + return tmp + } + + tmp += "\\" + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes but with extra end slash is absolute, + // then we have a filepath encoded with a prefix '/' and a dropped '/' at the end. + // e.g. "/C:" to "C:\\" + return tmp + } + } + + return lp +} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server.go index 909563f02d..529052b444 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server.go @@ -4,6 +4,7 @@ package sftp import ( "encoding" + "errors" "fmt" "io" "io/ioutil" @@ -13,8 +14,6 @@ import ( "sync" "syscall" "time" - - "github.com/pkg/errors" ) const ( @@ -175,7 +174,7 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpStatPacket: // stat the requested file - info, err := os.Stat(p.Path) + info, err := os.Stat(toLocalPath(p.Path)) rpkt = &sshFxpStatResponse{ ID: p.ID, info: info, @@ -185,7 +184,7 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpLstatPacket: // stat the requested file - info, err := os.Lstat(p.Path) + info, err := os.Lstat(toLocalPath(p.Path)) rpkt = &sshFxpStatResponse{ ID: p.ID, info: info, @@ -209,24 +208,24 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpMkdirPacket: // TODO FIXME: ignore flags field - err := os.Mkdir(p.Path, 0755) + err := os.Mkdir(toLocalPath(p.Path), 0755) rpkt = statusFromError(p.ID, err) case *sshFxpRmdirPacket: - err := os.Remove(p.Path) + err := os.Remove(toLocalPath(p.Path)) rpkt = statusFromError(p.ID, err) case *sshFxpRemovePacket: - err := os.Remove(p.Filename) + err := os.Remove(toLocalPath(p.Filename)) rpkt = statusFromError(p.ID, err) case *sshFxpRenamePacket: - err := os.Rename(p.Oldpath, p.Newpath) + err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) rpkt = statusFromError(p.ID, err) case *sshFxpSymlinkPacket: - err := os.Symlink(p.Targetpath, p.Linkpath) + err := os.Symlink(toLocalPath(p.Targetpath), toLocalPath(p.Linkpath)) rpkt = statusFromError(p.ID, err) case *sshFxpClosePacket: rpkt = statusFromError(p.ID, s.closeHandle(p.Handle)) case *sshFxpReadlinkPacket: - f, err := os.Readlink(p.Path) + f, err := os.Readlink(toLocalPath(p.Path)) rpkt = &sshFxpNamePacket{ ID: p.ID, NameAttrs: []*sshFxpNameAttr{ @@ -241,7 +240,7 @@ func handlePacket(s *Server, p orderedRequest) error { rpkt = statusFromError(p.ID, err) } case *sshFxpRealpathPacket: - f, err := filepath.Abs(p.Path) + f, err := filepath.Abs(toLocalPath(p.Path)) f = cleanPath(f) rpkt = &sshFxpNamePacket{ ID: p.ID, @@ -257,6 +256,8 @@ func handlePacket(s *Server, p orderedRequest) error { rpkt = statusFromError(p.ID, err) } case *sshFxpOpendirPacket: + p.Path = toLocalPath(p.Path) + if stat, err := os.Stat(p.Path); err != nil { rpkt = statusFromError(p.ID, err) } else if !stat.IsDir() { @@ -306,7 +307,7 @@ func handlePacket(s *Server, p orderedRequest) error { case serverRespondablePacket: rpkt = p.respond(s) default: - return errors.Errorf("unexpected packet type %T", p) + return fmt.Errorf("unexpected packet type %T", p) } s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, orderID)) @@ -346,8 +347,8 @@ func (svr *Server) Serve() error { pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) if err != nil { - switch errors.Cause(err) { - case errUnknownExtendedPacket: + switch { + case errors.Is(err, errUnknownExtendedPacket): //if err := svr.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil { // debug("failed to send err packet: %v", err) // svr.conn.Close() // shuts down recvPacket @@ -445,7 +446,7 @@ func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket { osFlags |= os.O_EXCL } - f, err := os.OpenFile(p.Path, osFlags, 0644) + f, err := os.OpenFile(toLocalPath(p.Path), osFlags, 0644) if err != nil { return statusFromError(p.ID, err) } @@ -460,17 +461,18 @@ func (p *sshFxpReaddirPacket) respond(svr *Server) responsePacket { return statusFromError(p.ID, EBADF) } - dirname := f.Name() dirents, err := f.Readdir(128) if err != nil { return statusFromError(p.ID, err) } + idLookup := osIDLookup{} + ret := &sshFxpNamePacket{ID: p.ID} for _, dirent := range dirents { ret.NameAttrs = append(ret.NameAttrs, &sshFxpNameAttr{ Name: dirent.Name(), - LongName: runLs(dirname, dirent), + LongName: runLs(idLookup, dirent), Attrs: []interface{}{dirent}, }) } @@ -482,6 +484,8 @@ func (p *sshFxpSetstatPacket) respond(svr *Server) responsePacket { b := p.Attrs.([]byte) var err error + p.Path = toLocalPath(p.Path) + debug("setstat name \"%s\"", p.Path) if (p.Flags & sshFileXferAttrSize) != 0 { var size uint64 @@ -610,99 +614,3 @@ func statusFromError(id uint32, err error) *sshFxpStatusPacket { return ret } - -func clamp(v, max uint32) uint32 { - if v > max { - return max - } - return v -} - -func runLsTypeWord(dirent os.FileInfo) string { - // find first character, the type char - // b Block special file. - // c Character special file. - // d Directory. - // l Symbolic link. - // s Socket link. - // p FIFO. - // - Regular file. - tc := '-' - mode := dirent.Mode() - if (mode & os.ModeDir) != 0 { - tc = 'd' - } else if (mode & os.ModeDevice) != 0 { - tc = 'b' - if (mode & os.ModeCharDevice) != 0 { - tc = 'c' - } - } else if (mode & os.ModeSymlink) != 0 { - tc = 'l' - } else if (mode & os.ModeSocket) != 0 { - tc = 's' - } else if (mode & os.ModeNamedPipe) != 0 { - tc = 'p' - } - - // owner - orc := '-' - if (mode & 0400) != 0 { - orc = 'r' - } - owc := '-' - if (mode & 0200) != 0 { - owc = 'w' - } - oxc := '-' - ox := (mode & 0100) != 0 - setuid := (mode & os.ModeSetuid) != 0 - if ox && setuid { - oxc = 's' - } else if setuid { - oxc = 'S' - } else if ox { - oxc = 'x' - } - - // group - grc := '-' - if (mode & 040) != 0 { - grc = 'r' - } - gwc := '-' - if (mode & 020) != 0 { - gwc = 'w' - } - gxc := '-' - gx := (mode & 010) != 0 - setgid := (mode & os.ModeSetgid) != 0 - if gx && setgid { - gxc = 's' - } else if setgid { - gxc = 'S' - } else if gx { - gxc = 'x' - } - - // all / others - arc := '-' - if (mode & 04) != 0 { - arc = 'r' - } - awc := '-' - if (mode & 02) != 0 { - awc = 'w' - } - axc := '-' - ax := (mode & 01) != 0 - sticky := (mode & os.ModeSticky) != 0 - if ax && sticky { - axc = 't' - } else if sticky { - axc = 'T' - } else if ax { - axc = 'x' - } - - return fmt.Sprintf("%c%c%c%c%c%c%c%c%c%c", tc, orc, owc, oxc, grc, gwc, gxc, arc, awc, axc) -} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_statvfs_impl.go index 2d467d1ee3..94b6d832cd 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_statvfs_impl.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_statvfs_impl.go @@ -14,6 +14,7 @@ func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket { if err != nil { return statusFromError(p.ID, err) } + retPkt.ID = p.ID return retPkt } diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_stubs.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_stubs.go deleted file mode 100644 index 62c9fa1a19..0000000000 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_stubs.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !cgo plan9 windows android - -package sftp - -import ( - "fmt" - "os" - "time" -) - -func runLs(dirname string, dirent os.FileInfo) string { - typeword := runLsTypeWord(dirent) - numLinks := 1 - if dirent.IsDir() { - numLinks = 0 - } - username := "root" - groupname := "root" - mtime := dirent.ModTime() - monthStr := mtime.Month().String()[0:3] - day := mtime.Day() - year := mtime.Year() - now := time.Now() - isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) - - yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) - if isOld { - yearOrTime = fmt.Sprintf("%d", year) - } - - return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) -} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_unix.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_unix.go deleted file mode 100644 index abceca498f..0000000000 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/server_unix.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix -// +build cgo - -package sftp - -import ( - "fmt" - "os" - "path" - "syscall" - "time" -) - -func runLsStatt(dirent os.FileInfo, statt *syscall.Stat_t) string { - // example from openssh sftp server: - // crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd - // format: - // {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name - - typeword := runLsTypeWord(dirent) - numLinks := statt.Nlink - uid := statt.Uid - gid := statt.Gid - username := fmt.Sprintf("%d", uid) - groupname := fmt.Sprintf("%d", gid) - // TODO FIXME: uid -> username, gid -> groupname lookup for ls -l format output - - mtime := dirent.ModTime() - monthStr := mtime.Month().String()[0:3] - day := mtime.Day() - year := mtime.Year() - now := time.Now() - isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) - - yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) - if isOld { - yearOrTime = fmt.Sprintf("%d", year) - } - - return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) -} - -// ls -l style output for a file, which is in the 'long output' section of a readdir response packet -// this is a very simple (lazy) implementation, just enough to look almost like openssh in a few basic cases -func runLs(dirname string, dirent os.FileInfo) string { - dsys := dirent.Sys() - if dsys == nil { - } else if statt, ok := dsys.(*syscall.Stat_t); !ok { - } else { - return runLsStatt(dirent, statt) - } - - return path.Join(dirname, dirent.Name()) -} diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/sftp.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/sftp.go index 912dff1d8a..9a63c39dc4 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/sftp.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/sftp.go @@ -4,8 +4,6 @@ package sftp import ( "fmt" - - "github.com/pkg/errors" ) const ( @@ -58,9 +56,9 @@ const ( sshFxNoMedia = 13 sshFxNoSpaceOnFilesystem = 14 sshFxQuotaExceeded = 15 - sshFxUnlnownPrincipal = 16 + sshFxUnknownPrincipal = 16 sshFxLockConflict = 17 - sshFxDitNotEmpty = 18 + sshFxDirNotEmpty = 18 sshFxNotADirectory = 19 sshFxInvalidFilename = 20 sshFxLinkLoop = 21 @@ -194,21 +192,21 @@ func (u *unexpectedPacketErr) Error() string { } func unimplementedPacketErr(u uint8) error { - return errors.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) + return fmt.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) } type unexpectedIDErr struct{ want, got uint32 } func (u *unexpectedIDErr) Error() string { - return fmt.Sprintf("sftp: unexpected id: want %v, got %v", u.want, u.got) + return fmt.Sprintf("sftp: unexpected id: want %d, got %d", u.want, u.got) } func unimplementedSeekWhence(whence int) error { - return errors.Errorf("sftp: unimplemented seek whence %v", whence) + return fmt.Errorf("sftp: unimplemented seek whence %d", whence) } func unexpectedCount(want, got uint32) error { - return errors.Errorf("sftp: unexpected count: want %v, got %v", want, got) + return fmt.Errorf("sftp: unexpected count: want %d, got %d", want, got) } type unexpectedVersionErr struct{ want, got uint32 } @@ -239,7 +237,7 @@ func getSupportedExtensionByName(extensionName string) (sshExtensionPair, error) return supportedExtension, nil } } - return sshExtensionPair{}, fmt.Errorf("Unsupported extension: %v", extensionName) + return sshExtensionPair{}, fmt.Errorf("unsupported extension: %s", extensionName) } // SetSFTPExtensions allows to customize the supported server extensions. diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/stat_plan9.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/stat_plan9.go index 32f17e0b8f..761abdf56b 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/stat_plan9.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/stat_plan9.go @@ -41,9 +41,15 @@ func translateSyscallError(err error) (uint32, bool) { return 0, false } +// isRegular returns true if the mode describes a regular file. +func isRegular(mode uint32) bool { + return mode&S_IFMT == syscall.S_IFREG +} + // toFileMode converts sftp filemode bits to the os.FileMode specification func toFileMode(mode uint32) os.FileMode { var fm = os.FileMode(mode & 0777) + switch mode & S_IFMT { case syscall.S_IFBLK: fm |= os.ModeDevice @@ -60,37 +66,38 @@ func toFileMode(mode uint32) os.FileMode { case syscall.S_IFSOCK: fm |= os.ModeSocket } + return fm } // fromFileMode converts from the os.FileMode specification to sftp filemode bits func fromFileMode(mode os.FileMode) uint32 { - ret := uint32(0) + ret := uint32(mode & os.ModePerm) - if mode&os.ModeDevice != 0 { - if mode&os.ModeCharDevice != 0 { - ret |= syscall.S_IFCHR - } else { - ret |= syscall.S_IFBLK - } - } - if mode&os.ModeDir != 0 { + switch mode & os.ModeType { + case os.ModeDevice | os.ModeCharDevice: + ret |= syscall.S_IFCHR + case os.ModeDevice: + ret |= syscall.S_IFBLK + case os.ModeDir: ret |= syscall.S_IFDIR - } - if mode&os.ModeSymlink != 0 { - ret |= syscall.S_IFLNK - } - if mode&os.ModeNamedPipe != 0 { + case os.ModeNamedPipe: ret |= syscall.S_IFIFO - } - if mode&os.ModeSocket != 0 { - ret |= syscall.S_IFSOCK - } - - if mode&os.ModeType == 0 { + case os.ModeSymlink: + ret |= syscall.S_IFLNK + case 0: ret |= syscall.S_IFREG + case os.ModeSocket: + ret |= syscall.S_IFSOCK } - ret |= uint32(mode & os.ModePerm) return ret } + +// Plan 9 doesn't have setuid, setgid or sticky, but a Plan 9 client should +// be able to send these bits to a POSIX server. +const ( + s_ISUID = 04000 + s_ISGID = 02000 + s_ISVTX = 01000 +) diff --git a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/stat_posix.go b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/stat_posix.go index 38ba26ad0c..5b870e23c2 100644 --- a/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/stat_posix.go +++ b/src/code.cloudfoundry.org/vendor/github.com/pkg/sftp/stat_posix.go @@ -1,3 +1,4 @@ +//go:build !plan9 // +build !plan9 package sftp @@ -23,7 +24,7 @@ func translateErrno(errno syscall.Errno) uint32 { return sshFxOk case syscall.ENOENT: return sshFxNoSuchFile - case syscall.EPERM: + case syscall.EACCES, syscall.EPERM: return sshFxPermissionDenied } @@ -43,9 +44,15 @@ func translateSyscallError(err error) (uint32, bool) { return 0, false } +// isRegular returns true if the mode describes a regular file. +func isRegular(mode uint32) bool { + return mode&S_IFMT == syscall.S_IFREG +} + // toFileMode converts sftp filemode bits to the os.FileMode specification func toFileMode(mode uint32) os.FileMode { var fm = os.FileMode(mode & 0777) + switch mode & S_IFMT { case syscall.S_IFBLK: fm |= os.ModeDevice @@ -62,55 +69,56 @@ func toFileMode(mode uint32) os.FileMode { case syscall.S_IFSOCK: fm |= os.ModeSocket } - if mode&syscall.S_ISGID != 0 { - fm |= os.ModeSetgid - } + if mode&syscall.S_ISUID != 0 { fm |= os.ModeSetuid } + if mode&syscall.S_ISGID != 0 { + fm |= os.ModeSetgid + } if mode&syscall.S_ISVTX != 0 { fm |= os.ModeSticky } + return fm } // fromFileMode converts from the os.FileMode specification to sftp filemode bits func fromFileMode(mode os.FileMode) uint32 { - ret := uint32(0) + ret := uint32(mode & os.ModePerm) - if mode&os.ModeDevice != 0 { - if mode&os.ModeCharDevice != 0 { - ret |= syscall.S_IFCHR - } else { - ret |= syscall.S_IFBLK - } - } - if mode&os.ModeDir != 0 { + switch mode & os.ModeType { + case os.ModeDevice | os.ModeCharDevice: + ret |= syscall.S_IFCHR + case os.ModeDevice: + ret |= syscall.S_IFBLK + case os.ModeDir: ret |= syscall.S_IFDIR - } - if mode&os.ModeSymlink != 0 { + case os.ModeNamedPipe: + ret |= syscall.S_IFIFO + case os.ModeSymlink: ret |= syscall.S_IFLNK + case 0: + ret |= syscall.S_IFREG + case os.ModeSocket: + ret |= syscall.S_IFSOCK } - if mode&os.ModeNamedPipe != 0 { - ret |= syscall.S_IFIFO + + if mode&os.ModeSetuid != 0 { + ret |= syscall.S_ISUID } if mode&os.ModeSetgid != 0 { ret |= syscall.S_ISGID } - if mode&os.ModeSetuid != 0 { - ret |= syscall.S_ISUID - } if mode&os.ModeSticky != 0 { ret |= syscall.S_ISVTX } - if mode&os.ModeSocket != 0 { - ret |= syscall.S_IFSOCK - } - - if mode&os.ModeType == 0 { - ret |= syscall.S_IFREG - } - ret |= uint32(mode & os.ModePerm) return ret } + +const ( + s_ISUID = syscall.S_ISUID + s_ISGID = syscall.S_ISGID + s_ISVTX = syscall.S_ISVTX +) diff --git a/src/code.cloudfoundry.org/vendor/modules.txt b/src/code.cloudfoundry.org/vendor/modules.txt index a5e9c70960..7cd824ec56 100644 --- a/src/code.cloudfoundry.org/vendor/modules.txt +++ b/src/code.cloudfoundry.org/vendor/modules.txt @@ -574,9 +574,10 @@ github.com/pborman/getopt # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pkg/sftp v1.13.0 -## explicit; go 1.14 +# github.com/pkg/sftp v1.13.5 +## explicit; go 1.15 github.com/pkg/sftp +github.com/pkg/sftp/internal/encoding/ssh/filexfer # github.com/prometheus/client_golang v1.14.0 => github.com/prometheus/client_golang v1.11.1 ## explicit; go 1.13 github.com/prometheus/client_golang/prometheus