From de41a9feaec5555e7d6467ae322029bc2bbc7c58 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Tue, 29 Sep 2015 21:31:18 -0700 Subject: [PATCH 1/2] implement ipfs files command License: MIT Signed-off-by: Jeromy --- core/builder.go | 5 + core/commands/files/files.go | 555 +++++++++++++++++++++++++++++++ core/commands/root.go | 2 + core/core.go | 61 +++- mfs/ops.go | 109 +++++- test/sharness/t0250-files-api.sh | 219 ++++++++++++ 6 files changed, 944 insertions(+), 7 deletions(-) create mode 100644 core/commands/files/files.go create mode 100755 test/sharness/t0250-files-api.sh diff --git a/core/builder.go b/core/builder.go index d5d46dd6e8e..af3a038408b 100644 --- a/core/builder.go +++ b/core/builder.go @@ -159,5 +159,10 @@ func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { } n.Resolver = &path.Resolver{DAG: n.DAG} + err = n.loadFilesRoot() + if err != nil { + return err + } + return nil } diff --git a/core/commands/files/files.go b/core/commands/files/files.go new file mode 100644 index 00000000000..60fde789fa1 --- /dev/null +++ b/core/commands/files/files.go @@ -0,0 +1,555 @@ +package commands + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + gopath "path" + "strings" + + cmds "github.com/ipfs/go-ipfs/commands" + core "github.com/ipfs/go-ipfs/core" + dag "github.com/ipfs/go-ipfs/merkledag" + mfs "github.com/ipfs/go-ipfs/mfs" + path "github.com/ipfs/go-ipfs/path" + ft "github.com/ipfs/go-ipfs/unixfs" + u "github.com/ipfs/go-ipfs/util" +) + +var log = u.Logger("cmds/files") + +var FilesCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Manipulate unixfs files", + ShortDescription: ` +Files is an API for manipulating ipfs objects as if they were a unix filesystem. +`, + }, + Subcommands: map[string]*cmds.Command{ + "read": FilesReadCmd, + "write": FilesWriteCmd, + "mv": FilesMvCmd, + "cp": FilesCpCmd, + "ls": FilesLsCmd, + "mkdir": FilesMkdirCmd, + "stat": FilesStatCmd, + "rm": FilesRmCmd, + }, +} + +var FilesStatCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "display file status", + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to node to stat"), + }, + Run: func(req cmds.Request, res cmds.Response) { + node, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path := req.Arguments()[0] + fsn, err := mfs.Lookup(node.FilesRoot, path) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + nd, err := fsn.GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + k, err := nd.Key() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + res.SetOutput(&Object{ + Hash: k.B58String(), + }) + }, + Marshalers: cmds.MarshalerMap{ + cmds.Text: func(res cmds.Response) (io.Reader, error) { + out := res.Output().(*Object) + return strings.NewReader(out.Hash), nil + }, + }, + Type: Object{}, +} + +var FilesCpCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "copy files into mfs", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("src", true, false, "source object to copy"), + cmds.StringArg("dest", true, false, "destination to copy object to"), + }, + Run: func(req cmds.Request, res cmds.Response) { + node, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + src := req.Arguments()[0] + dst := req.Arguments()[1] + + var nd *dag.Node + switch { + case strings.HasPrefix(src, "/ipfs/"): + p, err := path.ParsePath(src) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + obj, err := core.Resolve(req.Context(), node, p) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + nd = obj + default: + fsn, err := mfs.Lookup(node.FilesRoot, src) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + obj, err := fsn.GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + nd = obj + } + + err = mfs.PutNode(node.FilesRoot, dst, nd) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +type Object struct { + Hash string +} + +type FilesLsOutput struct { + Entries []mfs.NodeListing +} + +var FilesLsCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List directories", + ShortDescription: ` +List directories. + +Examples: + + $ ipfs files ls /welcome/docs/ + about + contact + help + quick-start + readme + security-notes + + $ ipfs files ls /myfiles/a/b/c/d + foo + bar +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to show listing for"), + }, + Options: []cmds.Option{ + cmds.BoolOption("l", "use long listing format"), + }, + Run: func(req cmds.Request, res cmds.Response) { + path := req.Arguments()[0] + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fsn, err := mfs.Lookup(nd.FilesRoot, path) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + switch fsn := fsn.(type) { + case *mfs.Directory: + listing, err := fsn.List() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + res.SetOutput(&FilesLsOutput{listing}) + return + case *mfs.File: + parts := strings.Split(path, "/") + name := parts[len(parts)-1] + out := &FilesLsOutput{[]mfs.NodeListing{mfs.NodeListing{Name: name, Type: 1}}} + res.SetOutput(out) + return + default: + res.SetError(errors.New("unrecognized type"), cmds.ErrNormal) + } + }, + Marshalers: cmds.MarshalerMap{ + cmds.Text: func(res cmds.Response) (io.Reader, error) { + out := res.Output().(*FilesLsOutput) + buf := new(bytes.Buffer) + long, _, _ := res.Request().Option("l").Bool() + + for _, o := range out.Entries { + if long { + fmt.Fprintf(buf, "%s\t%s\t%d\n", o.Name, o.Hash, o.Size) + } else { + fmt.Fprintf(buf, "%s\n", o.Name) + } + } + return buf, nil + }, + }, + Type: FilesLsOutput{}, +} + +var FilesReadCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Read a file in a given mfs", + ShortDescription: ` +Read a specified number of bytes from a file at a given offset. By default, will +read the entire file similar to unix cat. + +Examples: + + $ ipfs files read /test/hello + hello + `, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to file to be read"), + }, + Options: []cmds.Option{ + cmds.IntOption("o", "offset", "offset to read from"), + cmds.IntOption("n", "count", "maximum number of bytes to read"), + }, + Run: func(req cmds.Request, res cmds.Response) { + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path := req.Arguments()[0] + fsn, err := mfs.Lookup(n.FilesRoot, path) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fi, ok := fsn.(*mfs.File) + if !ok { + res.SetError(fmt.Errorf("%s was not a file", path), cmds.ErrNormal) + return + } + + offset, _, _ := req.Option("offset").Int() + + _, err = fi.Seek(int64(offset), os.SEEK_SET) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + var r io.Reader = fi + count, found, err := req.Option("count").Int() + if err == nil && found { + r = io.LimitReader(fi, int64(count)) + } + + res.SetOutput(r) + }, +} + +var FilesMvCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Move files", + ShortDescription: ` +Move files around. Just like traditional unix mv. + +Example: + + $ ipfs files mv /myfs/a/b/c /myfs/foo/newc + + `, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("source", true, false, "source file to move"), + cmds.StringArg("dest", true, false, "target path for file to be moved to"), + }, + Run: func(req cmds.Request, res cmds.Response) { + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + src := req.Arguments()[0] + dst := req.Arguments()[1] + + err = mfs.Mv(n.FilesRoot, src, dst) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +var FilesWriteCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Write to a mutable file in a given filesystem", + ShortDescription: ` +Write data to a file in a given filesystem. This command allows you to specify +a beginning offset to write to. The entire length of the input will be written. + +If the '--create' option is specified, the file will be create if it does not +exist. Nonexistant intermediate directories will not be created. + +Example: + + echo "hello world" | ipfs files write --create /myfs/a/b/file + echo "hello world" | ipfs files write --truncate /myfs/a/b/file + `, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to write to"), + cmds.FileArg("data", true, false, "data to write").EnableStdin(), + }, + Options: []cmds.Option{ + cmds.IntOption("o", "offset", "offset to write to"), + cmds.BoolOption("n", "create", "create the file if it does not exist"), + cmds.BoolOption("t", "truncate", "truncate the file before writing"), + }, + Run: func(req cmds.Request, res cmds.Response) { + path := req.Arguments()[0] + create, _, _ := req.Option("create").Bool() + trunc, _, _ := req.Option("truncate").Bool() + + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + fi, err := getFileHandle(nd.FilesRoot, path, create) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + defer fi.Close() + + if trunc { + if err := fi.Truncate(0); err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + } + + offset, _, _ := req.Option("offset").Int() + + _, err = fi.Seek(int64(offset), os.SEEK_SET) + if err != nil { + log.Error("seekfail: ", err) + res.SetError(err, cmds.ErrNormal) + return + } + + input, err := req.Files().NextFile() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + n, err := io.Copy(fi, input) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + log.Debugf("wrote %d bytes to %s", n, path) + }, +} + +var FilesMkdirCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "make directories", + ShortDescription: ` +Create the directory if it does not already exist. + +Note: all paths must be absolute. + +Examples: + + $ ipfs mfs mkdir /test/newdir + $ ipfs mfs mkdir -p /test/does/not/exist/yet +`, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, false, "path to dir to make"), + }, + Options: []cmds.Option{ + cmds.BoolOption("p", "parents", "no error if existing, make parent directories as needed"), + }, + Run: func(req cmds.Request, res cmds.Response) { + n, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + dashp, _, _ := req.Option("parents").Bool() + dirtomake := req.Arguments()[0] + + if dirtomake[0] != '/' { + res.SetError(errors.New("paths must be absolute"), cmds.ErrNormal) + return + } + + err = mfs.Mkdir(n.FilesRoot, dirtomake, dashp) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + }, +} + +var FilesRmCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "remove a file", + ShortDescription: ``, + }, + + Arguments: []cmds.Argument{ + cmds.StringArg("path", true, true, "file to remove"), + }, + Options: []cmds.Option{ + cmds.BoolOption("r", "recursive", "recursively remove directories"), + }, + Run: func(req cmds.Request, res cmds.Response) { + nd, err := req.InvocContext().GetNode() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + path := req.Arguments()[0] + dir, name := gopath.Split(path) + parent, err := mfs.Lookup(nd.FilesRoot, dir) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + pdir, ok := parent.(*mfs.Directory) + if !ok { + res.SetError(fmt.Errorf("no such file or directory: %s", path), cmds.ErrNormal) + return + } + + childi, err := pdir.Child(name) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + dashr, _, _ := req.Option("r").Bool() + + switch childi.(type) { + case *mfs.Directory: + if dashr { + err := pdir.Unlink(name) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + } else { + res.SetError(fmt.Errorf("%s is a directory, use -r to remove directories", path), cmds.ErrNormal) + return + } + default: + err := pdir.Unlink(name) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + } + }, +} + +func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) { + + target, err := mfs.Lookup(r, path) + switch err { + case nil: + fi, ok := target.(*mfs.File) + if !ok { + return nil, fmt.Errorf("%s was not a file", path) + } + return fi, nil + + case os.ErrNotExist: + if !create { + return nil, err + } + + // if create is specified and the file doesnt exist, we create the file + dirname, fname := gopath.Split(path) + pdiri, err := mfs.Lookup(r, dirname) + if err != nil { + log.Error("lookupfail ", dirname) + return nil, err + } + pdir, ok := pdiri.(*mfs.Directory) + if !ok { + return nil, fmt.Errorf("%s was not a directory", dirname) + } + + nd := &dag.Node{Data: ft.FilePBData(nil, 0)} + err = pdir.AddChild(fname, nd) + if err != nil { + return nil, err + } + + fsn, err := pdir.Child(fname) + if err != nil { + return nil, err + } + + // can unsafely cast, if it fails, that means programmer error + return fsn.(*mfs.File), nil + + default: + log.Error("GFH default") + return nil, err + } +} diff --git a/core/commands/root.go b/core/commands/root.go index 987178058b0..17f0ace753c 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -5,6 +5,7 @@ import ( "strings" cmds "github.com/ipfs/go-ipfs/commands" + files "github.com/ipfs/go-ipfs/core/commands/files" unixfs "github.com/ipfs/go-ipfs/core/commands/unixfs" evlog "github.com/ipfs/go-ipfs/thirdparty/eventlog" ) @@ -94,6 +95,7 @@ var rootSubcommands = map[string]*cmds.Command{ "dht": DhtCmd, "diag": DiagCmd, "dns": DNSCmd, + "files": files.FilesCmd, "get": GetCmd, "id": IDCmd, "log": LogCmd, diff --git a/core/core.go b/core/core.go index 346bbc6d34b..f90f15b7eac 100644 --- a/core/core.go +++ b/core/core.go @@ -17,6 +17,7 @@ import ( "time" b58 "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" + ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore" ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" goprocess "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/goprocess" mamask "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/whyrusleeping/multiaddr-filter" @@ -40,11 +41,13 @@ import ( offroute "github.com/ipfs/go-ipfs/routing/offline" bstore "github.com/ipfs/go-ipfs/blocks/blockstore" + key "github.com/ipfs/go-ipfs/blocks/key" bserv "github.com/ipfs/go-ipfs/blockservice" exchange "github.com/ipfs/go-ipfs/exchange" bitswap "github.com/ipfs/go-ipfs/exchange/bitswap" bsnet "github.com/ipfs/go-ipfs/exchange/bitswap/network" rp "github.com/ipfs/go-ipfs/exchange/reprovide" + mfs "github.com/ipfs/go-ipfs/mfs" mount "github.com/ipfs/go-ipfs/fuse/mount" merkledag "github.com/ipfs/go-ipfs/merkledag" @@ -53,6 +56,7 @@ import ( pin "github.com/ipfs/go-ipfs/pin" repo "github.com/ipfs/go-ipfs/repo" config "github.com/ipfs/go-ipfs/repo/config" + unixfs "github.com/ipfs/go-ipfs/unixfs" ) const IpnsValidatorTag = "ipns" @@ -92,6 +96,7 @@ type IpfsNode struct { Resolver *path.Resolver // the path resolution system Reporter metrics.Reporter Discovery discovery.Service + FilesRoot *mfs.Root // Online PeerHost p2phost.Host // the network host (server+client) @@ -249,8 +254,14 @@ func (n *IpfsNode) teardown() error { log.Debug("core is shutting down...") // owned objects are closed in this teardown to ensure that they're closed // regardless of which constructor was used to add them to the node. - closers := []io.Closer{ - n.Repo, + var closers []io.Closer + + // NOTE: the order that objects are added(closed) matters, if an object + // needs to use another during its shutdown/cleanup process, it should be + // closed before that other object + + if n.FilesRoot != nil { + closers = append(closers, n.FilesRoot) } if n.Exchange != nil { @@ -264,6 +275,10 @@ func (n *IpfsNode) teardown() error { closers = append(closers, mount.Closer(n.Mounts.Ipns)) } + if dht, ok := n.Routing.(*dht.IpfsDHT); ok { + closers = append(closers, dht.Process()) + } + if n.Blocks != nil { closers = append(closers, n.Blocks) } @@ -272,14 +287,13 @@ func (n *IpfsNode) teardown() error { closers = append(closers, n.Bootstrapper) } - if dht, ok := n.Routing.(*dht.IpfsDHT); ok { - closers = append(closers, dht.Process()) - } - if n.PeerHost != nil { closers = append(closers, n.PeerHost) } + // Repo closed last, most things need to preserve state here + closers = append(closers, n.Repo) + var errs []error for _, closer := range closers { if err := closer.Close(); err != nil { @@ -390,6 +404,41 @@ func (n *IpfsNode) loadBootstrapPeers() ([]peer.PeerInfo, error) { return toPeerInfos(parsed), nil } +func (n *IpfsNode) loadFilesRoot() error { + dsk := ds.NewKey("/filesroot") + pf := func(ctx context.Context, k key.Key) error { + return n.Repo.Datastore().Put(dsk, []byte(k)) + } + + var nd *merkledag.Node + val, err := n.Repo.Datastore().Get(dsk) + + switch { + case err == ds.ErrNotFound || val == nil: + nd = &merkledag.Node{Data: unixfs.FolderPBData()} + _, err := n.DAG.Add(nd) + if err != nil { + return fmt.Errorf("failure writing to dagstore: %s", err) + } + case err == nil: + k := key.Key(val.([]byte)) + nd, err = n.DAG.Get(n.Context(), k) + if err != nil { + return fmt.Errorf("error loading filesroot from DAG: %s", err) + } + default: + return err + } + + mr, err := mfs.NewRoot(n.Context(), n.DAG, nd, pf) + if err != nil { + return err + } + + n.FilesRoot = mr + return nil +} + // SetupOfflineRouting loads the local nodes private key and // uses it to instantiate a routing system in offline mode. // This is primarily used for offline ipns modifications. diff --git a/mfs/ops.go b/mfs/ops.go index 75f187f528b..397aea65aa7 100644 --- a/mfs/ops.go +++ b/mfs/ops.go @@ -3,10 +3,117 @@ package mfs import ( "errors" "fmt" + "os" + gopath "path" "strings" + + dag "github.com/ipfs/go-ipfs/merkledag" ) -func rootLookup(r *Root, path string) (FSNode, error) { +// Mv moves the file or directory at 'src' to 'dst' +func Mv(r *Root, src, dst string) error { + srcDir, srcFname := gopath.Split(src) + + srcObj, err := Lookup(r, src) + if err != nil { + return err + } + + var dstDirStr string + var filename string + if dst[len(dst)-1] == '/' { + dstDirStr = dst + filename = srcFname + } else { + dstDirStr, filename = gopath.Split(dst) + } + + dstDiri, err := Lookup(r, dstDirStr) + if err != nil { + return err + } + + dstDir := dstDiri.(*Directory) + nd, err := srcObj.GetNode() + if err != nil { + return err + } + + err = dstDir.AddChild(filename, nd) + if err != nil { + return err + } + + srcDirObji, err := Lookup(r, srcDir) + if err != nil { + return err + } + + srcDirObj := srcDirObji.(*Directory) + err = srcDirObj.Unlink(srcFname) + if err != nil { + return err + } + + return nil +} + +// PutNode inserts 'nd' at 'path' in the given mfs +func PutNode(r *Root, path string, nd *dag.Node) error { + dirp, filename := gopath.Split(path) + + parent, err := Lookup(r, dirp) + if err != nil { + return fmt.Errorf("lookup '%s' failed: %s", dirp, err) + } + + pdir, ok := parent.(*Directory) + if !ok { + return fmt.Errorf("%s did not point to directory", dirp) + } + + return pdir.AddChild(filename, nd) +} + +// Mkdir creates a directory at 'path' under the directory 'd', creating +// intermediary directories as needed if 'parents' is set to true +func Mkdir(r *Root, path string, parents bool) error { + parts := strings.Split(path, "/") + if parts[0] == "" { + parts = parts[1:] + } + + cur := r.GetValue().(*Directory) + for i, d := range parts[:len(parts)-1] { + fsn, err := cur.Child(d) + if err != nil { + if err == os.ErrNotExist && parents { + mkd, err := cur.Mkdir(d) + if err != nil { + return err + } + fsn = mkd + } + } + + next, ok := fsn.(*Directory) + if !ok { + return fmt.Errorf("%s was not a directory", strings.Join(parts[:i], "/")) + } + cur = next + } + + _, err := cur.Mkdir(parts[len(parts)-1]) + if err != nil { + if !parents || err != os.ErrExist { + return err + } + } + + return nil +} + +func Lookup(r *Root, path string) (FSNode, error) { dir, ok := r.GetValue().(*Directory) if !ok { return nil, errors.New("root was not a directory") diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh new file mode 100755 index 00000000000..fb91c313848 --- /dev/null +++ b/test/sharness/t0250-files-api.sh @@ -0,0 +1,219 @@ +#!/bin/sh +# +# Copyright (c) 2015 Jeromy Johnson +# MIT Licensed; see the LICENSE file in this repository. +# + +test_description="test the unix files api" + +. lib/test-lib.sh + +test_init_ipfs + +# setup files for testing +test_expect_success "can create some files for testing" ' + FILE1=$(echo foo | ipfs add -q) && + FILE2=$(echo bar | ipfs add -q) && + FILE3=$(echo baz | ipfs add -q) && + mkdir stuff_test && + echo cats > stuff_test/a && + echo dogs > stuff_test/b && + echo giraffes > stuff_test/c && + DIR1=$(ipfs add -q stuff_test | tail -n1) +' + +verify_path_exists() { + # simply running ls on a file should be a good 'check' + ipfs files ls $1 +} + +verify_dir_contents() { + dir=$1 + shift + rm -f expected + touch expected + for e in $@ + do + echo $e >> expected + done + + test_expect_success "can list dir" ' + ipfs files ls $dir > output + ' + + test_expect_success "dir entries look good" ' + test_sort_cmp output expected + ' +} + +test_files_api() { + test_expect_success "can mkdir in root" ' + ipfs files mkdir /cats + ' + + test_expect_success "directory was created" ' + verify_path_exists /cats + ' + + test_expect_success "directory is empty" ' + verify_dir_contents /cats + ' + + test_expect_success "can put files into directory" ' + ipfs files cp /ipfs/$FILE1 /cats/file1 + ' + + test_expect_success "file shows up in directory" ' + verify_dir_contents /cats file1 + ' + + test_expect_success "can read file" ' + ipfs files read /cats/file1 > file1out + ' + + test_expect_success "output looks good" ' + echo foo > expected && + test_cmp file1out expected + ' + + test_expect_success "can put another file into root" ' + ipfs files cp /ipfs/$FILE2 /file2 + ' + + test_expect_success "file shows up in root" ' + verify_dir_contents / file2 cats + ' + + test_expect_success "can read file" ' + ipfs files read /file2 > file2out + ' + + test_expect_success "output looks good" ' + echo bar > expected && + test_cmp file2out expected + ' + + test_expect_success "can make deep directory" ' + ipfs files mkdir -p /cats/this/is/a/dir + ' + + test_expect_success "directory was created correctly" ' + verify_path_exists /cats/this/is/a/dir && + verify_dir_contents /cats this file1 && + verify_dir_contents /cats/this is && + verify_dir_contents /cats/this/is a && + verify_dir_contents /cats/this/is/a dir && + verify_dir_contents /cats/this/is/a/dir + ' + + test_expect_success "can copy file into new dir" ' + ipfs files cp /ipfs/$FILE3 /cats/this/is/a/dir/file3 + ' + + test_expect_success "can read file" ' + ipfs files read /cats/this/is/a/dir/file3 > output + ' + + test_expect_success "output looks good" ' + echo baz > expected && + test_cmp output expected + ' + + test_expect_success "file shows up in dir" ' + verify_dir_contents /cats/this/is/a/dir file3 + ' + + test_expect_success "can remove file" ' + ipfs files rm /cats/this/is/a/dir/file3 + ' + + test_expect_success "file no longer appears" ' + verify_dir_contents /cats/this/is/a/dir + ' + + test_expect_success "can remove dir" ' + ipfs files rm -r /cats/this/is/a/dir + ' + + test_expect_success "dir no longer appears" ' + verify_dir_contents /cats/this/is/a + ' + + test_expect_success "can remove file from root" ' + ipfs files rm /file2 + ' + + test_expect_success "file no longer appears" ' + verify_dir_contents / cats + ' + + # test read options + + test_expect_success "read from offset works" ' + ipfs files read -o 1 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo oo > expected && + test_cmp output expected + ' + + test_expect_success "read with size works" ' + ipfs files read -n 2 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + printf fo > expected && + test_cmp output expected + ' + + # test write + + test_expect_success "can write file" ' + echo "ipfs rocks" > tmpfile && + cat tmpfile | ipfs files write --create /cats/ipfs + ' + + test_expect_success "file was created" ' + verify_dir_contents /cats ipfs file1 this + ' + + test_expect_success "can read file we just wrote" ' + ipfs files read /cats/ipfs > output + ' + + test_expect_success "can write to offset" ' + echo "is super cool" | ipfs files write -o 5 /cats/ipfs + ' + + test_expect_success "file looks correct" ' + echo "ipfs is super cool" > expected && + ipfs files read /cats/ipfs > output && + test_cmp output expected + ' + + # test mv + test_expect_success "can mv dir" ' + ipfs files mv /cats/this/is /cats/ + ' + + test_expect_success "mv worked" ' + verify_dir_contents /cats file1 ipfs this is && + verify_dir_contents /cats/this + ' + + test_expect_success "cleanup, remove 'cats'" ' + ipfs files rm -r /cats + ' + + test_expect_success "cleanup looks good" ' + verify_dir_contents / + ' +} + +# test offline and online +test_files_api +test_launch_ipfs_daemon +test_files_api +test_kill_ipfs_daemon +test_done From d69891f98177fee9185dc3eb94db9aaf57d677dc Mon Sep 17 00:00:00 2001 From: Jeromy Date: Wed, 30 Sep 2015 17:12:51 -0700 Subject: [PATCH 2/2] address comments from CR License: MIT Signed-off-by: Jeromy --- core/commands/files/files.go | 288 +++++++++++++++++++++++-------- core/core.go | 6 +- mfs/ops.go | 82 ++++++--- test/sharness/t0250-files-api.sh | 136 ++++++++++++++- unixfs/mod/dagmodifier.go | 20 ++- unixfs/mod/dagmodifier_test.go | 47 +++++ 6 files changed, 473 insertions(+), 106 deletions(-) diff --git a/core/commands/files/files.go b/core/commands/files/files.go index 60fde789fa1..c0f32a5b9c3 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -16,6 +16,8 @@ import ( path "github.com/ipfs/go-ipfs/path" ft "github.com/ipfs/go-ipfs/unixfs" u "github.com/ipfs/go-ipfs/util" + + context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" ) var log = u.Logger("cmds/files") @@ -54,44 +56,75 @@ var FilesStatCmd = &cmds.Command{ return } - path := req.Arguments()[0] - fsn, err := mfs.Lookup(node.FilesRoot, path) + path, err := checkPath(req.Arguments()[0]) if err != nil { res.SetError(err, cmds.ErrNormal) return } - nd, err := fsn.GetNode() + fsn, err := mfs.Lookup(node.FilesRoot, path) if err != nil { res.SetError(err, cmds.ErrNormal) return } - k, err := nd.Key() + o, err := statNode(fsn) if err != nil { res.SetError(err, cmds.ErrNormal) return } - res.SetOutput(&Object{ - Hash: k.B58String(), - }) + res.SetOutput(o) }, Marshalers: cmds.MarshalerMap{ cmds.Text: func(res cmds.Response) (io.Reader, error) { out := res.Output().(*Object) - return strings.NewReader(out.Hash), nil + buf := new(bytes.Buffer) + fmt.Fprintln(buf, out.Hash) + fmt.Fprintf(buf, "Size: %d\n", out.Size) + fmt.Fprintf(buf, "CumulativeSize: %d\n", out.CumulativeSize) + fmt.Fprintf(buf, "ChildBlocks: %d\n", out.Blocks) + return buf, nil }, }, Type: Object{}, } +func statNode(fsn mfs.FSNode) (*Object, error) { + nd, err := fsn.GetNode() + if err != nil { + return nil, err + } + + k, err := nd.Key() + if err != nil { + return nil, err + } + + d, err := ft.FromBytes(nd.Data) + if err != nil { + return nil, err + } + + cumulsize, err := nd.Size() + if err != nil { + return nil, err + } + + return &Object{ + Hash: k.B58String(), + Blocks: len(nd.Links), + Size: d.GetFilesize(), + CumulativeSize: cumulsize, + }, nil +} + var FilesCpCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "copy files into mfs", }, Arguments: []cmds.Argument{ - cmds.StringArg("src", true, false, "source object to copy"), + cmds.StringArg("source", true, false, "source object to copy"), cmds.StringArg("dest", true, false, "destination to copy object to"), }, Run: func(req cmds.Request, res cmds.Response) { @@ -101,39 +134,21 @@ var FilesCpCmd = &cmds.Command{ return } - src := req.Arguments()[0] - dst := req.Arguments()[1] - - var nd *dag.Node - switch { - case strings.HasPrefix(src, "/ipfs/"): - p, err := path.ParsePath(src) - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } - - obj, err := core.Resolve(req.Context(), node, p) - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } - - nd = obj - default: - fsn, err := mfs.Lookup(node.FilesRoot, src) - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } - - obj, err := fsn.GetNode() - if err != nil { - res.SetError(err, cmds.ErrNormal) - return - } + src, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + dst, err := checkPath(req.Arguments()[1]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } - nd = obj + nd, err := getNodeFromPath(req.Context(), node, src) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return } err = mfs.PutNode(node.FilesRoot, dst, nd) @@ -144,8 +159,30 @@ var FilesCpCmd = &cmds.Command{ }, } +func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.Node, error) { + switch { + case strings.HasPrefix(p, "/ipfs/"): + np, err := path.ParsePath(p) + if err != nil { + return nil, err + } + + return core.Resolve(ctx, node, np) + default: + fsn, err := mfs.Lookup(node.FilesRoot, p) + if err != nil { + return nil, err + } + + return fsn.GetNode() + } +} + type Object struct { - Hash string + Hash string + Size uint64 + CumulativeSize uint64 + Blocks int } type FilesLsOutput struct { @@ -180,7 +217,12 @@ Examples: cmds.BoolOption("l", "use long listing format"), }, Run: func(req cmds.Request, res cmds.Response) { - path := req.Arguments()[0] + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + nd, err := req.InvocContext().GetNode() if err != nil { res.SetError(err, cmds.ErrNormal) @@ -242,7 +284,7 @@ Examples: $ ipfs files read /test/hello hello - `, + `, }, Arguments: []cmds.Argument{ @@ -259,7 +301,12 @@ Examples: return } - path := req.Arguments()[0] + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + fsn, err := mfs.Lookup(n.FilesRoot, path) if err != nil { res.SetError(err, cmds.ErrNormal) @@ -272,7 +319,26 @@ Examples: return } - offset, _, _ := req.Option("offset").Int() + offset, _, err := req.Option("offset").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if offset < 0 { + res.SetError(fmt.Errorf("cannot specify negative offset"), cmds.ErrNormal) + return + } + + filen, err := fi.Size() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + if int64(offset) > filen { + res.SetError(fmt.Errorf("offset was past end of file (%d > %d)", offset, filen), cmds.ErrNormal) + return + } _, err = fi.Seek(int64(offset), os.SEEK_SET) if err != nil { @@ -281,7 +347,15 @@ Examples: } var r io.Reader = fi count, found, err := req.Option("count").Int() - if err == nil && found { + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if found { + if count < 0 { + res.SetError(fmt.Errorf("cannot specify negative 'count'"), cmds.ErrNormal) + return + } r = io.LimitReader(fi, int64(count)) } @@ -299,7 +373,7 @@ Example: $ ipfs files mv /myfs/a/b/c /myfs/foo/newc - `, +`, }, Arguments: []cmds.Argument{ @@ -313,8 +387,16 @@ Example: return } - src := req.Arguments()[0] - dst := req.Arguments()[1] + src, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + dst, err := checkPath(req.Arguments()[1]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } err = mfs.Mv(n.FilesRoot, src, dst) if err != nil { @@ -331,14 +413,14 @@ var FilesWriteCmd = &cmds.Command{ Write data to a file in a given filesystem. This command allows you to specify a beginning offset to write to. The entire length of the input will be written. -If the '--create' option is specified, the file will be create if it does not +If the '--create' option is specified, the file will be created if it does not exist. Nonexistant intermediate directories will not be created. Example: - echo "hello world" | ipfs files write --create /myfs/a/b/file - echo "hello world" | ipfs files write --truncate /myfs/a/b/file - `, + echo "hello world" | ipfs files write --create /myfs/a/b/file + echo "hello world" | ipfs files write --truncate /myfs/a/b/file +`, }, Arguments: []cmds.Argument{ cmds.StringArg("path", true, false, "path to write to"), @@ -346,11 +428,17 @@ Example: }, Options: []cmds.Option{ cmds.IntOption("o", "offset", "offset to write to"), - cmds.BoolOption("n", "create", "create the file if it does not exist"), + cmds.BoolOption("e", "create", "create the file if it does not exist"), cmds.BoolOption("t", "truncate", "truncate the file before writing"), + cmds.IntOption("n", "count", "maximum number of bytes to read"), }, Run: func(req cmds.Request, res cmds.Response) { - path := req.Arguments()[0] + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + create, _, _ := req.Option("create").Bool() trunc, _, _ := req.Option("truncate").Bool() @@ -374,7 +462,25 @@ Example: } } - offset, _, _ := req.Option("offset").Int() + offset, _, err := req.Option("offset").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if offset < 0 { + res.SetError(fmt.Errorf("cannot have negative write offset"), cmds.ErrNormal) + return + } + + count, countfound, err := req.Option("count").Int() + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + if countfound && count < 0 { + res.SetError(fmt.Errorf("cannot have negative byte count"), cmds.ErrNormal) + return + } _, err = fi.Seek(int64(offset), os.SEEK_SET) if err != nil { @@ -389,6 +495,11 @@ Example: return } + var r io.Reader = input + if countfound { + r = io.LimitReader(r, int64(count)) + } + n, err := io.Copy(fi, input) if err != nil { res.SetError(err, cmds.ErrNormal) @@ -410,7 +521,7 @@ Note: all paths must be absolute. Examples: $ ipfs mfs mkdir /test/newdir - $ ipfs mfs mkdir -p /test/does/not/exist/yet + $ ipfs mfs mkdir -p /test/does/not/exist/yet `, }, @@ -428,10 +539,9 @@ Examples: } dashp, _, _ := req.Option("parents").Bool() - dirtomake := req.Arguments()[0] - - if dirtomake[0] != '/' { - res.SetError(errors.New("paths must be absolute"), cmds.ErrNormal) + dirtomake, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) return } @@ -445,8 +555,17 @@ Examples: var FilesRmCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "remove a file", - ShortDescription: ``, + Tagline: "remove a file", + ShortDescription: ` +remove files or directories + + $ ipfs files rm /foo + $ ipfs files ls /bar + cat + dog + fish + $ ipfs files rm -r /bar +`, }, Arguments: []cmds.Argument{ @@ -462,7 +581,22 @@ var FilesRmCmd = &cmds.Command{ return } - path := req.Arguments()[0] + path, err := checkPath(req.Arguments()[0]) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } + + if path == "/" { + res.SetError(fmt.Errorf("cannot delete root"), cmds.ErrNormal) + return + } + + // 'rm a/b/c/' will fail unless we trim the slash at the end + if path[len(path)-1] == '/' { + path = path[:len(path)-1] + } + dir, name := gopath.Split(path) parent, err := mfs.Lookup(nd.FilesRoot, dir) if err != nil { @@ -545,11 +679,29 @@ func getFileHandle(r *mfs.Root, path string, create bool) (*mfs.File, error) { return nil, err } - // can unsafely cast, if it fails, that means programmer error - return fsn.(*mfs.File), nil + fi, ok := fsn.(*mfs.File) + if !ok { + return nil, errors.New("expected *mfs.File, didnt get it. This is likely a race condition") + } + return fi, nil default: - log.Error("GFH default") return nil, err } } + +func checkPath(p string) (string, error) { + if len(p) == 0 { + return "", fmt.Errorf("paths must not be empty") + } + + if p[0] != '/' { + return "", fmt.Errorf("paths must start with a leading slash") + } + + cleaned := gopath.Clean(p) + if p[len(p)-1] == '/' && p != "/" { + cleaned += "/" + } + return cleaned, nil +} diff --git a/core/core.go b/core/core.go index f90f15b7eac..ce5cd9bb62d 100644 --- a/core/core.go +++ b/core/core.go @@ -56,7 +56,7 @@ import ( pin "github.com/ipfs/go-ipfs/pin" repo "github.com/ipfs/go-ipfs/repo" config "github.com/ipfs/go-ipfs/repo/config" - unixfs "github.com/ipfs/go-ipfs/unixfs" + uio "github.com/ipfs/go-ipfs/unixfs/io" ) const IpnsValidatorTag = "ipns" @@ -405,7 +405,7 @@ func (n *IpfsNode) loadBootstrapPeers() ([]peer.PeerInfo, error) { } func (n *IpfsNode) loadFilesRoot() error { - dsk := ds.NewKey("/filesroot") + dsk := ds.NewKey("/local/filesroot") pf := func(ctx context.Context, k key.Key) error { return n.Repo.Datastore().Put(dsk, []byte(k)) } @@ -415,7 +415,7 @@ func (n *IpfsNode) loadFilesRoot() error { switch { case err == ds.ErrNotFound || val == nil: - nd = &merkledag.Node{Data: unixfs.FolderPBData()} + nd = uio.NewEmptyDirectory() _, err := n.DAG.Add(nd) if err != nil { return fmt.Errorf("failure writing to dagstore: %s", err) diff --git a/mfs/ops.go b/mfs/ops.go index 397aea65aa7..33514fc67a1 100644 --- a/mfs/ops.go +++ b/mfs/ops.go @@ -14,11 +14,6 @@ import ( func Mv(r *Root, src, dst string) error { srcDir, srcFname := gopath.Split(src) - srcObj, err := Lookup(r, src) - if err != nil { - return err - } - var dstDirStr string var filename string if dst[len(dst)-1] == '/' { @@ -28,28 +23,46 @@ func Mv(r *Root, src, dst string) error { dstDirStr, filename = gopath.Split(dst) } - dstDiri, err := Lookup(r, dstDirStr) + // get parent directories of both src and dest first + dstDir, err := lookupDir(r, dstDirStr) if err != nil { return err } - dstDir := dstDiri.(*Directory) - nd, err := srcObj.GetNode() + srcDirObj, err := lookupDir(r, srcDir) if err != nil { return err } - err = dstDir.AddChild(filename, nd) + srcObj, err := srcDirObj.Child(srcFname) if err != nil { return err } - srcDirObji, err := Lookup(r, srcDir) + nd, err := srcObj.GetNode() + if err != nil { + return err + } + + fsn, err := dstDir.Child(filename) + if err == nil { + switch n := fsn.(type) { + case *File: + _ = dstDir.Unlink(filename) + case *Directory: + dstDir = n + default: + return fmt.Errorf("unexpected type at path: %s", dst) + } + } else if err != os.ErrNotExist { + return err + } + + err = dstDir.AddChild(filename, nd) if err != nil { return err } - srcDirObj := srcDirObji.(*Directory) err = srcDirObj.Unlink(srcFname) if err != nil { return err @@ -58,18 +71,27 @@ func Mv(r *Root, src, dst string) error { return nil } +func lookupDir(r *Root, path string) (*Directory, error) { + di, err := Lookup(r, path) + if err != nil { + return nil, err + } + + d, ok := di.(*Directory) + if !ok { + return nil, fmt.Errorf("%s is not a directory", path) + } + + return d, nil +} + // PutNode inserts 'nd' at 'path' in the given mfs func PutNode(r *Root, path string, nd *dag.Node) error { dirp, filename := gopath.Split(path) - parent, err := Lookup(r, dirp) + pdir, err := lookupDir(r, dirp) if err != nil { - return fmt.Errorf("lookup '%s' failed: %s", dirp, err) - } - - pdir, ok := parent.(*Directory) - if !ok { - return fmt.Errorf("%s did not point to directory", dirp) + return err } return pdir.AddChild(filename, nd) @@ -83,17 +105,27 @@ func Mkdir(r *Root, path string, parents bool) error { parts = parts[1:] } + // allow 'mkdir /a/b/c/' to create c + if parts[len(parts)-1] == "" { + parts = parts[:len(parts)-1] + } + + if len(parts) == 0 { + // this will only happen on 'mkdir /' + return fmt.Errorf("cannot mkdir '%s'", path) + } + cur := r.GetValue().(*Directory) for i, d := range parts[:len(parts)-1] { fsn, err := cur.Child(d) - if err != nil { - if err == os.ErrNotExist && parents { - mkd, err := cur.Mkdir(d) - if err != nil { - return err - } - fsn = mkd + if err == os.ErrNotExist && parents { + mkd, err := cur.Mkdir(d) + if err != nil { + return err } + fsn = mkd + } else if err != nil { + return err } next, ok := fsn.(*Directory) diff --git a/test/sharness/t0250-files-api.sh b/test/sharness/t0250-files-api.sh index fb91c313848..4ee01bb114a 100755 --- a/test/sharness/t0250-files-api.sh +++ b/test/sharness/t0250-files-api.sh @@ -59,6 +59,19 @@ test_files_api() { verify_dir_contents /cats ' + test_expect_success "check root hash" ' + ipfs files stat / | head -n1 > roothash + ' + + test_expect_success "cannot mkdir /" ' + test_expect_code 1 ipfs files mkdir / + ' + + test_expect_success "check root hash was not changed" ' + ipfs files stat / | head -n1 > roothashafter && + test_cmp roothash roothashafter + ' + test_expect_success "can put files into directory" ' ipfs files cp /ipfs/$FILE1 /cats/file1 ' @@ -73,7 +86,7 @@ test_files_api() { test_expect_success "output looks good" ' echo foo > expected && - test_cmp file1out expected + test_cmp expected file1out ' test_expect_success "can put another file into root" ' @@ -90,7 +103,7 @@ test_files_api() { test_expect_success "output looks good" ' echo bar > expected && - test_cmp file2out expected + test_cmp expected file2out ' test_expect_success "can make deep directory" ' @@ -116,7 +129,7 @@ test_files_api() { test_expect_success "output looks good" ' echo baz > expected && - test_cmp output expected + test_cmp expected output ' test_expect_success "file shows up in dir" ' @@ -147,6 +160,19 @@ test_files_api() { verify_dir_contents / cats ' + test_expect_success "check root hash" ' + ipfs files stat / | head -n1 > roothash + ' + + test_expect_success "cannot remove root" ' + test_expect_code 1 ipfs files rm -r / + ' + + test_expect_success "check root hash was not changed" ' + ipfs files stat / | head -n1 > roothashafter && + test_cmp roothash roothashafter + ' + # test read options test_expect_success "read from offset works" ' @@ -155,7 +181,7 @@ test_files_api() { test_expect_success "output looks good" ' echo oo > expected && - test_cmp output expected + test_cmp expected output ' test_expect_success "read with size works" ' @@ -164,7 +190,55 @@ test_files_api() { test_expect_success "output looks good" ' printf fo > expected && - test_cmp output expected + test_cmp expected output + ' + + test_expect_success "cannot read from negative offset" ' + test_expect_code 1 ipfs files read --offset -3 /cats/file1 + ' + + test_expect_success "read from offset 0 works" ' + ipfs files read --offset 0 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo foo > expected && + test_cmp expected output + ' + + test_expect_success "read last byte works" ' + ipfs files read --offset 2 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo o > expected && + test_cmp expected output + ' + + test_expect_success "offset past end of file fails" ' + test_expect_code 1 ipfs files read --offset 5 /cats/file1 + ' + + test_expect_success "cannot read negative count bytes" ' + test_expect_code 1 ipfs read --count -1 /cats/file1 + ' + + test_expect_success "reading zero bytes prints nothing" ' + ipfs files read --count 0 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + printf "" > expected && + test_cmp expected output + ' + + test_expect_success "count > len(file) prints entire file" ' + ipfs files read --count 200 /cats/file1 > output + ' + + test_expect_success "output looks good" ' + echo foo > expected && + test_cmp expected output ' # test write @@ -189,7 +263,57 @@ test_files_api() { test_expect_success "file looks correct" ' echo "ipfs is super cool" > expected && ipfs files read /cats/ipfs > output && - test_cmp output expected + test_cmp expected output + ' + + test_expect_success "cant write to negative offset" ' + ipfs files stat /cats/ipfs | head -n1 > filehash && + test_expect_code 1 ipfs files write --offset -1 /cats/ipfs < output + ' + + test_expect_success "verify file was not changed" ' + ipfs files stat /cats/ipfs | head -n1 > afterhash && + test_cmp filehash afterhash + ' + + test_expect_success "write new file for testing" ' + echo foobar | ipfs files write --create /fun + ' + + test_expect_success "write to offset past end works" ' + echo blah | ipfs files write --offset 50 /fun + ' + + test_expect_success "can read file" ' + ipfs files read /fun > sparse_output + ' + + test_expect_success "output looks good" ' + echo foobar > sparse_expected && + echo blah | dd of=sparse_expected bs=50 seek=1 && + test_cmp sparse_expected sparse_output + ' + + test_expect_success "cleanup" ' + ipfs files rm /fun + ' + + test_expect_success "cannot write to directory" ' + ipfs files stat /cats | head -n1 > dirhash && + test_expect_code 1 ipfs files write /cats < output + ' + + test_expect_success "verify dir was not changed" ' + ipfs files stat /cats | head -n1 > afterdirhash && + test_cmp dirhash afterdirhash + ' + + test_expect_success "cannot write to nonexistant path" ' + test_expect_code 1 ipfs files write /cats/bar/ < output + ' + + test_expect_success "no new paths were created" ' + verify_dir_contents /cats file1 ipfs this ' # test mv diff --git a/unixfs/mod/dagmodifier.go b/unixfs/mod/dagmodifier.go index d2ad2fd8ff0..40cee0995c2 100644 --- a/unixfs/mod/dagmodifier.go +++ b/unixfs/mod/dagmodifier.go @@ -368,19 +368,31 @@ func (dm *DagModifier) Seek(offset int64, whence int) (int64, error) { return 0, err } + fisize, err := dm.Size() + if err != nil { + return 0, err + } + + var newoffset uint64 switch whence { case os.SEEK_CUR: - dm.curWrOff += uint64(offset) - dm.writeStart = dm.curWrOff + newoffset = dm.curWrOff + uint64(offset) case os.SEEK_SET: - dm.curWrOff = uint64(offset) - dm.writeStart = uint64(offset) + newoffset = uint64(offset) case os.SEEK_END: return 0, ErrSeekEndNotImpl default: return 0, ErrUnrecognizedWhence } + if offset > fisize { + if err := dm.expandSparse(offset - fisize); err != nil { + return 0, err + } + } + dm.curWrOff = newoffset + dm.writeStart = newoffset + if dm.read != nil { _, err = dm.read.Seek(offset, whence) if err != nil { diff --git a/unixfs/mod/dagmodifier_test.go b/unixfs/mod/dagmodifier_test.go index 6f53a90d1eb..f3341690c08 100644 --- a/unixfs/mod/dagmodifier_test.go +++ b/unixfs/mod/dagmodifier_test.go @@ -487,6 +487,53 @@ func TestSparseWrite(t *testing.T) { } } +func TestSeekPastEndWrite(t *testing.T) { + dserv := getMockDagServ(t) + _, n := getNode(t, dserv, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dagmod, err := NewDagModifier(ctx, n, dserv, sizeSplitterGen(512)) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5000) + u.NewTimeSeededRand().Read(buf[2500:]) + + nseek, err := dagmod.Seek(2500, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + + if nseek != 2500 { + t.Fatal("failed to seek") + } + + wrote, err := dagmod.Write(buf[2500:]) + if err != nil { + t.Fatal(err) + } + + if wrote != 2500 { + t.Fatal("incorrect write amount") + } + + _, err = dagmod.Seek(0, os.SEEK_SET) + if err != nil { + t.Fatal(err) + } + + out, err := ioutil.ReadAll(dagmod) + if err != nil { + t.Fatal(err) + } + + if err = arrComp(out, buf); err != nil { + t.Fatal(err) + } +} + func BenchmarkDagmodWrite(b *testing.B) { b.StopTimer() dserv := getMockDagServ(b)