From cbe6c3408353b6274afe2b6cfd2a72b812b4f8a8 Mon Sep 17 00:00:00 2001 From: Brian Tiger Chow Date: Sun, 14 Sep 2014 04:24:34 -0700 Subject: [PATCH 1/6] docs(godoc) add doc.go in project root --- doc.go | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 doc.go diff --git a/doc.go b/doc.go new file mode 100644 index 00000000000..4dadc4e6a62 --- /dev/null +++ b/doc.go @@ -0,0 +1,2 @@ +// IPFS is a global, versioned, peer-to-peer filesystem +package ipfs From 1a7c083850d2d34031304cce7286e3bd4eca5ef4 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 15 Sep 2014 02:04:55 +0000 Subject: [PATCH 2/6] implement something like rabin fingerprinting --- importer/importer_test.go | 93 +++++++++++++++++++++---- importer/rabin.go | 143 ++++++++++++++++++++++++++++++++++++++ importer/split_test.go | 48 ------------- importer/splitting.go | 50 ------------- 4 files changed, 221 insertions(+), 113 deletions(-) create mode 100644 importer/rabin.go delete mode 100644 importer/split_test.go diff --git a/importer/importer_test.go b/importer/importer_test.go index 534bce262a7..dbd91770819 100644 --- a/importer/importer_test.go +++ b/importer/importer_test.go @@ -3,42 +3,66 @@ package importer import ( "bytes" "crypto/rand" + "fmt" "io" "io/ioutil" + "os" "testing" dag "github.com/jbenet/go-ipfs/merkledag" ) -func TestFileConsistency(t *testing.T) { - buf := new(bytes.Buffer) - io.CopyN(buf, rand.Reader, 512*32) - should := buf.Bytes() - nd, err := NewDagFromReaderWithSplitter(buf, SplitterBySize(512)) +func TestBuildDag(t *testing.T) { + td := os.TempDir() + fi, err := os.Create(td + "/tmpfi") if err != nil { t.Fatal(err) } - r, err := dag.NewDagReader(nd, nil) + + _, err = io.CopyN(fi, rand.Reader, 1024*1024) if err != nil { t.Fatal(err) } - out, err := ioutil.ReadAll(r) + fi.Close() + + _, err = NewDagFromFile(td + "/tmpfi") if err != nil { t.Fatal(err) } +} - if !bytes.Equal(out, should) { - t.Fatal("Output not the same as input.") +//Test where calls to read are smaller than the chunk size +func TestSizeBasedSplit(t *testing.T) { + bs := SplitterBySize(512) + testFileConsistency(t, bs, 32*512) + bs = SplitterBySize(4096) + testFileConsistency(t, bs, 32*4096) + + // Uneven offset + testFileConsistency(t, bs, 31*4095) +} + +func TestOtherSplit(t *testing.T) { + //split := WhyrusleepingCantImplementRabin + //testFileConsistency(t, split, 4096*64) +} + +type testData struct{ n uint64 } + +func (t *testData) Read(b []byte) (int, error) { + for i, _ := range b { + b[i] = byte(t.n % 256) + t.n++ } + return len(b), nil } -//Test where calls to read are smaller than the chunk size -func TestFileConsistencyLargeBlocks(t *testing.T) { +func testFileConsistency(t *testing.T, bs BlockSplitter, nbytes int) { buf := new(bytes.Buffer) - io.CopyN(buf, rand.Reader, 4096*32) + io.CopyN(buf, rand.Reader, int64(nbytes)) should := buf.Bytes() - nd, err := NewDagFromReaderWithSplitter(buf, SplitterBySize(4096)) + nd, err := NewDagFromReaderWithSplitter(buf, bs) if err != nil { t.Fatal(err) } @@ -52,7 +76,46 @@ func TestFileConsistencyLargeBlocks(t *testing.T) { t.Fatal(err) } - if !bytes.Equal(out, should) { - t.Fatal("Output not the same as input.") + err = arrComp(out, should) + if err != nil { + t.Fatal(err) + } +} + +func arrComp(a, b []byte) error { + if len(a) != len(b) { + return fmt.Errorf("Arrays differ in length. %d != %d", len(a), len(b)) + } + for i, v := range a { + if v != b[i] { + return fmt.Errorf("Arrays differ at index: %d", i) + } + } + return nil +} + +func TestMaybeRabinConsistency(t *testing.T) { + testFileConsistency(t, ThisMightBeRabin, 256*4096) +} + +func TestRabinSplit(t *testing.T) { + + //Generate some random data + nbytes := 256 * 4096 + buf := new(bytes.Buffer) + io.CopyN(buf, rand.Reader, int64(nbytes)) + good := buf.Bytes() + + // Get block generator for random data + ch := ThisMightBeRabin(buf) + + i := 0 + var blocks [][]byte + for blk := range ch { + if !bytes.Equal(blk, good[i:len(blk)+i]) { + t.Fatalf("bad block! %v", blk[:32]) + } + i += len(blk) + blocks = append(blocks, blk) } } diff --git a/importer/rabin.go b/importer/rabin.go new file mode 100644 index 00000000000..4348c93b3c2 --- /dev/null +++ b/importer/rabin.go @@ -0,0 +1,143 @@ +package importer + +import ( + "bufio" + "bytes" + "fmt" + "io" +) + +//pseudocode stolen from the internet +func rollhash(S []byte) { + a := 10 + mask := 0xfff + MOD := 33554383 //randomly chosen + windowSize := 16 + an := 1 + rollingHash := 0 + for i := 0; i < windowSize; i++ { + rollingHash = (rollingHash*a + int(S[i])) % MOD + an = (an * a) % MOD + } + if rollingHash&mask == mask { + // "match" + fmt.Println("match") + } + for i := 1; i < len(S)-windowSize; i++ { + rollingHash = (rollingHash*a + int(S[i+windowSize-1]) - an*int(S[i-1])) % MOD + if rollingHash&mask == mask { + //print "match" + fmt.Println("match") + } + } +} + +func ThisMightBeRabin(r io.Reader) chan []byte { + out := make(chan []byte) + go func() { + inbuf := bufio.NewReader(r) + blkbuf := new(bytes.Buffer) + + // some bullshit numbers + a := 10 + mask := 0xfff //make this smaller for smaller blocks + MOD := 33554383 //randomly chosen + windowSize := 16 + an := 1 + rollingHash := 0 + + window := make([]byte, windowSize) + get := func(i int) int { return int(window[i%len(window)]) } + set := func(i int, val byte) { window[i%len(window)] = val } + dup := func(b []byte) []byte { + d := make([]byte, len(b)) + copy(d, b) + return d + } + + i := 0 + for ; i < windowSize; i++ { + b, err := inbuf.ReadByte() + if err != nil { + fmt.Println(err) + return + } + blkbuf.WriteByte(b) + window[i] = b + rollingHash = (rollingHash*a + int(b)) % MOD + an = (an * a) % MOD + } + /* This is too short for a block + if rollingHash&mask == mask { + // "match" + fmt.Println("match") + } + */ + for ; true; i++ { + b, err := inbuf.ReadByte() + if err != nil { + break + } + outval := get(i) + set(i, b) + blkbuf.WriteByte(b) + rollingHash = (rollingHash*a + get(i) - an*outval) % MOD + if rollingHash&mask == mask { + //print "match" + out <- dup(blkbuf.Bytes()) + blkbuf.Reset() + } + peek, err := inbuf.Peek(windowSize) + if err != nil { + break + } + if len(peek) != windowSize { + break + } + } + io.Copy(blkbuf, inbuf) + out <- blkbuf.Bytes() + close(out) + }() + return out +} + +/* +func WhyrusleepingCantImplementRabin(r io.Reader) chan []byte { + out := make(chan []byte, 4) + go func() { + buf := bufio.NewReader(r) + blkbuf := new(bytes.Buffer) + window := make([]byte, 16) + var val uint64 + prime := uint64(61) + + get := func(i int) uint64 { + return uint64(window[i%len(window)]) + } + + set := func(i int, val byte) { + window[i%len(window)] = val + } + + for i := 0; ; i++ { + curb, err := buf.ReadByte() + if err != nil { + break + } + set(i, curb) + blkbuf.WriteByte(curb) + + hash := md5.Sum(window) + if hash[0] == 0 && hash[1] == 0 { + out <- blkbuf.Bytes() + blkbuf.Reset() + } + } + out <- blkbuf.Bytes() + close(out) + }() + + return out +} +*/ diff --git a/importer/split_test.go b/importer/split_test.go deleted file mode 100644 index fe3a4d0e549..00000000000 --- a/importer/split_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package importer - -import ( - "bytes" - "crypto/rand" - "testing" -) - -func TestDataSplitting(t *testing.T) { - buf := make([]byte, 16*1024*1024) - rand.Read(buf) - split := Rabin(buf) - - if len(split) == 1 { - t.Fatal("No split occurred!") - } - - min := 2 << 15 - max := 0 - - mxcount := 0 - - n := 0 - for _, b := range split { - if !bytes.Equal(b, buf[n:n+len(b)]) { - t.Fatal("Split lost data!") - } - n += len(b) - - if len(b) < min { - min = len(b) - } - - if len(b) > max { - max = len(b) - } - - if len(b) == 16384 { - mxcount++ - } - } - - if n != len(buf) { - t.Fatal("missing some bytes!") - } - t.Log(len(split)) - t.Log(min, max, mxcount) -} diff --git a/importer/splitting.go b/importer/splitting.go index d2690c7841d..a4c19bf20c1 100644 --- a/importer/splitting.go +++ b/importer/splitting.go @@ -32,53 +32,3 @@ func SplitterBySize(n int) BlockSplitter { return out } } - -// TODO: this should take a reader, not a byte array. what if we're splitting a 3TB file? -//Rabin Fingerprinting for file chunking -func Rabin(b []byte) [][]byte { - var out [][]byte - windowsize := uint64(48) - chunkMax := 1024 * 16 - minBlkSize := 2048 - blkBegI := 0 - prime := uint64(61) - - var poly uint64 - var curchecksum uint64 - - // Smaller than a window? Get outa here! - if len(b) <= int(windowsize) { - return [][]byte{b} - } - - i := 0 - for n := i; i < n+int(windowsize); i++ { - cur := uint64(b[i]) - curchecksum = (curchecksum * prime) + cur - poly = (poly * prime) + cur - } - - for ; i < len(b); i++ { - cur := uint64(b[i]) - curchecksum = (curchecksum * prime) + cur - poly = (poly * prime) + cur - curchecksum -= (uint64(b[i-1]) * prime) - - if i-blkBegI >= chunkMax { - // push block - out = append(out, b[blkBegI:i]) - blkBegI = i - } - - // first 13 bits of polynomial are 0 - if poly%8192 == 0 && i-blkBegI >= minBlkSize { - // push block - out = append(out, b[blkBegI:i]) - blkBegI = i - } - } - if i > blkBegI { - out = append(out, b[blkBegI:]) - } - return out -} From 78454884db316d58c37e0a6addd3a7ed155a4c69 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 15 Sep 2014 04:17:29 +0000 Subject: [PATCH 3/6] clean up code, make it have a nicer interface --- importer/importer.go | 4 +- importer/importer_test.go | 43 ++----------------- importer/rabin.go | 88 ++++++++++++++++----------------------- importer/splitting.go | 46 ++++++++++---------- 4 files changed, 67 insertions(+), 114 deletions(-) diff --git a/importer/importer.go b/importer/importer.go index bdc5a30e038..607a989a65c 100644 --- a/importer/importer.go +++ b/importer/importer.go @@ -20,11 +20,11 @@ var ErrSizeLimitExceeded = fmt.Errorf("object size limit exceeded") // NewDagFromReader constructs a Merkle DAG from the given io.Reader. // size required for block construction. func NewDagFromReader(r io.Reader) (*dag.Node, error) { - return NewDagFromReaderWithSplitter(r, SplitterBySize(1024*512)) + return NewDagFromReaderWithSplitter(r, &SizeSplitter{1024 * 512}) } func NewDagFromReaderWithSplitter(r io.Reader, spl BlockSplitter) (*dag.Node, error) { - blkChan := spl(r) + blkChan := spl.Split(r) root := &dag.Node{Data: dag.FilePBData()} for blk := range blkChan { diff --git a/importer/importer_test.go b/importer/importer_test.go index dbd91770819..cefe4a9b49b 100644 --- a/importer/importer_test.go +++ b/importer/importer_test.go @@ -34,30 +34,15 @@ func TestBuildDag(t *testing.T) { //Test where calls to read are smaller than the chunk size func TestSizeBasedSplit(t *testing.T) { - bs := SplitterBySize(512) + bs := &SizeSplitter{512} testFileConsistency(t, bs, 32*512) - bs = SplitterBySize(4096) + bs = &SizeSplitter{4096} testFileConsistency(t, bs, 32*4096) // Uneven offset testFileConsistency(t, bs, 31*4095) } -func TestOtherSplit(t *testing.T) { - //split := WhyrusleepingCantImplementRabin - //testFileConsistency(t, split, 4096*64) -} - -type testData struct{ n uint64 } - -func (t *testData) Read(b []byte) (int, error) { - for i, _ := range b { - b[i] = byte(t.n % 256) - t.n++ - } - return len(b), nil -} - func testFileConsistency(t *testing.T, bs BlockSplitter, nbytes int) { buf := new(bytes.Buffer) io.CopyN(buf, rand.Reader, int64(nbytes)) @@ -95,27 +80,5 @@ func arrComp(a, b []byte) error { } func TestMaybeRabinConsistency(t *testing.T) { - testFileConsistency(t, ThisMightBeRabin, 256*4096) -} - -func TestRabinSplit(t *testing.T) { - - //Generate some random data - nbytes := 256 * 4096 - buf := new(bytes.Buffer) - io.CopyN(buf, rand.Reader, int64(nbytes)) - good := buf.Bytes() - - // Get block generator for random data - ch := ThisMightBeRabin(buf) - - i := 0 - var blocks [][]byte - for blk := range ch { - if !bytes.Equal(blk, good[i:len(blk)+i]) { - t.Fatalf("bad block! %v", blk[:32]) - } - i += len(blk) - blocks = append(blocks, blk) - } + testFileConsistency(t, NewMaybeRabin(4096), 256*4096) } diff --git a/importer/rabin.go b/importer/rabin.go index 4348c93b3c2..4671239acc2 100644 --- a/importer/rabin.go +++ b/importer/rabin.go @@ -5,93 +5,79 @@ import ( "bytes" "fmt" "io" + "math" ) -//pseudocode stolen from the internet -func rollhash(S []byte) { - a := 10 - mask := 0xfff - MOD := 33554383 //randomly chosen - windowSize := 16 - an := 1 - rollingHash := 0 - for i := 0; i < windowSize; i++ { - rollingHash = (rollingHash*a + int(S[i])) % MOD - an = (an * a) % MOD - } - if rollingHash&mask == mask { - // "match" - fmt.Println("match") - } - for i := 1; i < len(S)-windowSize; i++ { - rollingHash = (rollingHash*a + int(S[i+windowSize-1]) - an*int(S[i-1])) % MOD - if rollingHash&mask == mask { - //print "match" - fmt.Println("match") - } - } +type MaybeRabin struct { + mask int + windowSize int +} + +func NewMaybeRabin(avgBlkSize int) *MaybeRabin { + blkbits := uint(math.Log2(float64(avgBlkSize))) + rb := new(MaybeRabin) + rb.mask = (1 << blkbits) - 1 + rb.windowSize = 16 // probably a good number... + return rb } -func ThisMightBeRabin(r io.Reader) chan []byte { - out := make(chan []byte) +func (mr *MaybeRabin) Split(r io.Reader) chan []byte { + out := make(chan []byte, 16) go func() { inbuf := bufio.NewReader(r) blkbuf := new(bytes.Buffer) - // some bullshit numbers - a := 10 - mask := 0xfff //make this smaller for smaller blocks - MOD := 33554383 //randomly chosen - windowSize := 16 + // some bullshit numbers i made up + a := 10 // honestly, no idea what this is + MOD := 33554383 // randomly chosen (seriously) an := 1 rollingHash := 0 - window := make([]byte, windowSize) - get := func(i int) int { return int(window[i%len(window)]) } - set := func(i int, val byte) { window[i%len(window)] = val } + // Window is a circular buffer + window := make([]byte, mr.windowSize) + push := func(i int, val byte) (outval int) { + outval = int(window[i%len(window)]) + window[i%len(window)] = val + return + } + + // Duplicate byte slice dup := func(b []byte) []byte { d := make([]byte, len(b)) copy(d, b) return d } + // Fill up the window i := 0 - for ; i < windowSize; i++ { + for ; i < mr.windowSize; i++ { b, err := inbuf.ReadByte() if err != nil { fmt.Println(err) return } blkbuf.WriteByte(b) - window[i] = b + push(i, b) rollingHash = (rollingHash*a + int(b)) % MOD an = (an * a) % MOD } - /* This is too short for a block - if rollingHash&mask == mask { - // "match" - fmt.Println("match") - } - */ + for ; true; i++ { b, err := inbuf.ReadByte() if err != nil { break } - outval := get(i) - set(i, b) + outval := push(i, b) blkbuf.WriteByte(b) - rollingHash = (rollingHash*a + get(i) - an*outval) % MOD - if rollingHash&mask == mask { - //print "match" + rollingHash = (rollingHash*a + int(b) - an*outval) % MOD + if rollingHash&mr.mask == mr.mask { out <- dup(blkbuf.Bytes()) blkbuf.Reset() } - peek, err := inbuf.Peek(windowSize) - if err != nil { - break - } - if len(peek) != windowSize { + + // Check if there are enough remaining + peek, err := inbuf.Peek(mr.windowSize) + if err != nil || len(peek) != mr.windowSize { break } } diff --git a/importer/splitting.go b/importer/splitting.go index a4c19bf20c1..30ecc1e3e51 100644 --- a/importer/splitting.go +++ b/importer/splitting.go @@ -6,29 +6,33 @@ import ( u "github.com/jbenet/go-ipfs/util" ) -type BlockSplitter func(io.Reader) chan []byte +type BlockSplitter interface { + Split(io.Reader) chan []byte +} + +type SizeSplitter struct { + Size int +} -func SplitterBySize(n int) BlockSplitter { - return func(r io.Reader) chan []byte { - out := make(chan []byte) - go func(n int) { - defer close(out) - for { - chunk := make([]byte, n) - nread, err := r.Read(chunk) - if err != nil { - if err == io.EOF { - return - } - u.PErr("block split error: %v\n", err) +func (ss *SizeSplitter) Split(r io.Reader) chan []byte { + out := make(chan []byte) + go func() { + defer close(out) + for { + chunk := make([]byte, ss.Size) + nread, err := r.Read(chunk) + if err != nil { + if err == io.EOF { return } - if nread < n { - chunk = chunk[:nread] - } - out <- chunk + u.PErr("block split error: %v\n", err) + return + } + if nread < ss.Size { + chunk = chunk[:nread] } - }(n) - return out - } + out <- chunk + } + }() + return out } From fdcd015eedcdadbdb9d2b3ea8f03d01afe6dccc2 Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 15 Sep 2014 05:35:31 +0000 Subject: [PATCH 4/6] move first data block into top level dag node --- importer/importer.go | 3 ++- importer/importer_test.go | 16 ++++++++++++++++ importer/rabin.go | 11 ++++++++--- merkledag/dagreader.go | 1 + merkledag/merkledag.go | 3 ++- 5 files changed, 29 insertions(+), 5 deletions(-) diff --git a/importer/importer.go b/importer/importer.go index 607a989a65c..197eaef19fe 100644 --- a/importer/importer.go +++ b/importer/importer.go @@ -25,7 +25,8 @@ func NewDagFromReader(r io.Reader) (*dag.Node, error) { func NewDagFromReaderWithSplitter(r io.Reader, spl BlockSplitter) (*dag.Node, error) { blkChan := spl.Split(r) - root := &dag.Node{Data: dag.FilePBData()} + first := <-blkChan + root := &dag.Node{Data: dag.FilePBData(first)} for blk := range blkChan { child := &dag.Node{Data: dag.WrapData(blk)} diff --git a/importer/importer_test.go b/importer/importer_test.go index cefe4a9b49b..9fb1afa0809 100644 --- a/importer/importer_test.go +++ b/importer/importer_test.go @@ -82,3 +82,19 @@ func arrComp(a, b []byte) error { func TestMaybeRabinConsistency(t *testing.T) { testFileConsistency(t, NewMaybeRabin(4096), 256*4096) } + +func TestRabinBlockSize(t *testing.T) { + buf := new(bytes.Buffer) + nbytes := 1024 * 1024 + io.CopyN(buf, rand.Reader, int64(nbytes)) + rab := NewMaybeRabin(4096) + blkch := rab.Split(buf) + + var blocks [][]byte + for b := range blkch { + blocks = append(blocks, b) + } + + fmt.Printf("Avg block size: %d\n", nbytes/len(blocks)) + +} diff --git a/importer/rabin.go b/importer/rabin.go index 4671239acc2..3eab5bc9cf0 100644 --- a/importer/rabin.go +++ b/importer/rabin.go @@ -9,8 +9,10 @@ import ( ) type MaybeRabin struct { - mask int - windowSize int + mask int + windowSize int + MinBlockSize int + MaxBlockSize int } func NewMaybeRabin(avgBlkSize int) *MaybeRabin { @@ -18,6 +20,8 @@ func NewMaybeRabin(avgBlkSize int) *MaybeRabin { rb := new(MaybeRabin) rb.mask = (1 << blkbits) - 1 rb.windowSize = 16 // probably a good number... + rb.MinBlockSize = avgBlkSize / 2 + rb.MaxBlockSize = (avgBlkSize / 2) * 3 return rb } @@ -70,7 +74,8 @@ func (mr *MaybeRabin) Split(r io.Reader) chan []byte { outval := push(i, b) blkbuf.WriteByte(b) rollingHash = (rollingHash*a + int(b) - an*outval) % MOD - if rollingHash&mr.mask == mr.mask { + if (rollingHash&mr.mask == mr.mask && blkbuf.Len() > mr.MinBlockSize) || + blkbuf.Len() >= mr.MaxBlockSize { out <- dup(blkbuf.Bytes()) blkbuf.Reset() } diff --git a/merkledag/dagreader.go b/merkledag/dagreader.go index 967ec63a4e2..5cf4e238ef9 100644 --- a/merkledag/dagreader.go +++ b/merkledag/dagreader.go @@ -34,6 +34,7 @@ func NewDagReader(n *Node, serv *DAGService) (io.Reader, error) { node: n, thisData: pb.GetData(), serv: serv, + buf: bytes.NewBuffer(pb.GetData()), }, nil case PBData_Raw: return bytes.NewBuffer(pb.GetData()), nil diff --git a/merkledag/merkledag.go b/merkledag/merkledag.go index accebb70856..79530df6d91 100644 --- a/merkledag/merkledag.go +++ b/merkledag/merkledag.go @@ -157,10 +157,11 @@ func (n *DAGService) Get(k u.Key) (*Node, error) { return Decoded(b.Data) } -func FilePBData() []byte { +func FilePBData(data []byte) []byte { pbfile := new(PBData) typ := PBData_File pbfile.Type = &typ + pbfile.Data = data data, err := proto.Marshal(pbfile) if err != nil { From 8a21af092af63ef1ea4609ebeb3d60b7c762f3ad Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 15 Sep 2014 06:07:03 +0000 Subject: [PATCH 5/6] add basic test for blocks package #59 --- blocks/blocks.go | 3 +-- merkledag/dagreader.go | 12 +++++------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/blocks/blocks.go b/blocks/blocks.go index b514f85d999..c58ab9f209e 100644 --- a/blocks/blocks.go +++ b/blocks/blocks.go @@ -5,8 +5,7 @@ import ( u "github.com/jbenet/go-ipfs/util" ) -// Block is the ipfs blocks service. It is the way -// to retrieve blocks by the higher level ipfs modules +// Block is a singular block of data in ipfs type Block struct { Multihash mh.Multihash Data []byte diff --git a/merkledag/dagreader.go b/merkledag/dagreader.go index 5cf4e238ef9..1e8a0c8b9ad 100644 --- a/merkledag/dagreader.go +++ b/merkledag/dagreader.go @@ -17,7 +17,6 @@ type DagReader struct { node *Node position int buf *bytes.Buffer - thisData []byte } func NewDagReader(n *Node, serv *DAGService) (io.Reader, error) { @@ -31,10 +30,9 @@ func NewDagReader(n *Node, serv *DAGService) (io.Reader, error) { return nil, ErrIsDir case PBData_File: return &DagReader{ - node: n, - thisData: pb.GetData(), - serv: serv, - buf: bytes.NewBuffer(pb.GetData()), + node: n, + serv: serv, + buf: bytes.NewBuffer(pb.GetData()), }, nil case PBData_Raw: return bytes.NewBuffer(pb.GetData()), nil @@ -63,12 +61,12 @@ func (dr *DagReader) precalcNextBuf() error { } dr.position++ - // TODO: dont assume a single layer of indirection switch pb.GetType() { case PBData_Directory: panic("Why is there a directory under a file?") case PBData_File: - //TODO: maybe have a PBData_Block type for indirect blocks? + //TODO: this *should* work, needs testing first + //return NewDagReader(nxt, dr.serv) panic("Not yet handling different layers of indirection!") case PBData_Raw: dr.buf = bytes.NewBuffer(pb.GetData()) From 782976f473c689e54c1f83c845a782f55f5c610b Mon Sep 17 00:00:00 2001 From: Jeromy Date: Mon, 15 Sep 2014 06:08:49 +0000 Subject: [PATCH 6/6] add basic test for blocks package #59 (actually add file) --- blocks/blocks_test.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 blocks/blocks_test.go diff --git a/blocks/blocks_test.go b/blocks/blocks_test.go new file mode 100644 index 00000000000..915d84c023f --- /dev/null +++ b/blocks/blocks_test.go @@ -0,0 +1,25 @@ +package blocks + +import "testing" + +func TestBlocksBasic(t *testing.T) { + + // Test empty data + empty := []byte{} + _, err := NewBlock(empty) + if err != nil { + t.Fatal(err) + } + + // Test nil case + _, err = NewBlock(nil) + if err != nil { + t.Fatal(err) + } + + // Test some data + _, err = NewBlock([]byte("Hello world!")) + if err != nil { + t.Fatal(err) + } +}