Skip to content

Commit

Permalink
Make Golint happy in the blocks submodule.
Browse files Browse the repository at this point in the history
This has required changing the order of some parameters and
adding HashOnRead to the Blockstore interface (which I have in turn
added to all the wrapper implementations).

License: MIT
Signed-off-by: Hector Sanjuan <[email protected]>
  • Loading branch information
hsanjuan committed Mar 24, 2017
1 parent ff997c1 commit 63d326c
Show file tree
Hide file tree
Showing 15 changed files with 147 additions and 54 deletions.
22 changes: 16 additions & 6 deletions blocks/blocks.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
// package blocks contains the lowest level of IPFS data structures,
// the raw block with a checksum.
// Package blocks contains the lowest level of IPFS data structures.
// A block is raw data accompanied by a CID. The CID contains the multihash
// corresponding to the block.
package blocks

import (
Expand All @@ -11,16 +12,20 @@ import (
mh "gx/ipfs/QmbZ6Cee2uHjG7hf19qLHppgKDRtaG4CVtMzdmK9VCVqLu/go-multihash"
)

var ErrWrongHash = errors.New("data did not match given hash!")
// ErrWrongHash is returned when the Cid of a block is not the expected
// according to the contents. It is currently used only when debugging.
var ErrWrongHash = errors.New("data did not match given hash")

// Block provides abstraction for blocks implementations.
type Block interface {
RawData() []byte
Cid() *cid.Cid
String() string
Loggable() map[string]interface{}
}

// Block is a singular block of data in ipfs
// A BasicBlock is a singular block of data in ipfs. It implements the Block
// interface.
type BasicBlock struct {
cid *cid.Cid
data []byte
Expand All @@ -32,9 +37,9 @@ func NewBlock(data []byte) *BasicBlock {
return &BasicBlock{data: data, cid: cid.NewCidV0(u.Hash(data))}
}

// NewBlockWithHash creates a new block when the hash of the data
// NewBlockWithCid creates a new block when the hash of the data
// is already known, this is used to save time in situations where
// we are able to be confident that the data is correct
// we are able to be confident that the data is correct.
func NewBlockWithCid(data []byte, c *cid.Cid) (*BasicBlock, error) {
if u.Debug {
chkc, err := c.Prefix().Sum(data)
Expand All @@ -49,22 +54,27 @@ func NewBlockWithCid(data []byte, c *cid.Cid) (*BasicBlock, error) {
return &BasicBlock{data: data, cid: c}, nil
}

// Multihash returns the hash contained in the block CID.
func (b *BasicBlock) Multihash() mh.Multihash {
return b.cid.Hash()
}

// RawData returns the block raw contents as a byte slice.
func (b *BasicBlock) RawData() []byte {
return b.data
}

// Cid returns the content identifier of the block.
func (b *BasicBlock) Cid() *cid.Cid {
return b.cid
}

// String provides a human-readable representation of the block CID.
func (b *BasicBlock) String() string {
return fmt.Sprintf("[Block %s]", b.Cid())
}

// Loggable returns a go-log loggable item.
func (b *BasicBlock) Loggable() map[string]interface{} {
return map[string]interface{}{
"block": b.Cid().String(),
Expand Down
7 changes: 7 additions & 0 deletions blocks/blockstore/arc_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ import (
lru "gx/ipfs/QmVYxfoJQiZijTgPNHCHgHELvQpbsJNTg6Crmc3dQkj3yy/golang-lru"
)

// arccache wraps a BlockStore with an Adaptive Replacement Cache (ARC) for
// block Cids. This provides block access-time improvements, allowing
// to short-cut many searches without query-ing the underlying datastore.
type arccache struct {
arc *lru.ARCCache
blockstore Blockstore
Expand Down Expand Up @@ -128,6 +131,10 @@ func (b *arccache) PutMany(bs []blocks.Block) error {
return nil
}

func (b *arccache) HashOnRead(enabled bool) {
b.blockstore.HashOnRead(enabled)
}

func (b *arccache) addCache(c *cid.Cid, has bool) {
b.arc.Add(c.KeyString(), has)
}
Expand Down
11 changes: 5 additions & 6 deletions blocks/blockstore/arc_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,25 +13,24 @@ import (

var exampleBlock = blocks.NewBlock([]byte("foo"))

func testArcCached(bs Blockstore, ctx context.Context) (*arccache, error) {
func testArcCached(ctx context.Context, bs Blockstore) (*arccache, error) {
if ctx == nil {
ctx = context.TODO()
}
opts := DefaultCacheOpts()
opts.HasBloomFilterSize = 0
opts.HasBloomFilterHashes = 0
bbs, err := CachedBlockstore(bs, ctx, opts)
bbs, err := CachedBlockstore(ctx, bs, opts)
if err == nil {
return bbs.(*arccache), nil
} else {
return nil, err
}
return nil, err
}

func createStores(t *testing.T) (*arccache, *blockstore, *callbackDatastore) {
func createStores(t *testing.T) (*arccache, Blockstore, *callbackDatastore) {
cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()}
bs := NewBlockstore(syncds.MutexWrap(cd))
arc, err := testArcCached(bs, nil)
arc, err := testArcCached(nil, bs)
if err != nil {
t.Fatal(err)
}
Expand Down
47 changes: 35 additions & 12 deletions blocks/blockstore/blockstore.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// package blockstore implements a thin wrapper over a datastore, giving a
// Package blockstore implements a thin wrapper over a datastore, giving a
// clean interface for Getting and Putting block objects.
package blockstore

Expand All @@ -23,22 +23,36 @@ var log = logging.Logger("blockstore")
// BlockPrefix namespaces blockstore datastores
var BlockPrefix = ds.NewKey("blocks")

var ValueTypeMismatch = errors.New("the retrieved value is not a Block")
// ErrValueTypeMismatch is an error returned when the item retrieved from
// the datatstore is not a block.
var ErrValueTypeMismatch = errors.New("the retrieved value is not a Block")

// ErrHashMismatch is an error returned when the hash of a block
// is different than expected.
var ErrHashMismatch = errors.New("block in storage has different hash than requested")

// ErrNotFound is an error returned when a block is not found.
var ErrNotFound = errors.New("blockstore: block not found")

// Blockstore wraps a Datastore
// Blockstore wraps a Datastore block-centered methods and provides a layer
// of abstraction which allows to add different caching strategies.
type Blockstore interface {
DeleteBlock(*cid.Cid) error
Has(*cid.Cid) (bool, error)
Get(*cid.Cid) (blocks.Block, error)
Put(blocks.Block) error
PutMany([]blocks.Block) error

// AllKeysChan returns a channel from which
// the CIDs in the Blockstore can be read. It should respect
// the given context, closing the channel if it becomes Done.
AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error)
// HashOnRead specifies if every read block should be
// rehashed to make sure it matches its CID.
HashOnRead(enabled bool)
}

// GCLocker abstract functionality to lock a blockstore when performing
// garbage-collection operations.
type GCLocker interface {
// GCLock locks the blockstore for garbage collection. No operations
// that expect to finish with a pin should ocurr simultaneously.
Expand All @@ -56,11 +70,15 @@ type GCLocker interface {
GCRequested() bool
}

// GCBlockstore is a blockstore that can safely run garbage-collection
// operations.
type GCBlockstore interface {
Blockstore
GCLocker
}

// NewGCBlockstore returns a default implementation of GCBlockstore
// using the given Blockstore and GCLocker.
func NewGCBlockstore(bs Blockstore, gcl GCLocker) GCBlockstore {
return gcBlockstore{bs, gcl}
}
Expand All @@ -70,7 +88,9 @@ type gcBlockstore struct {
GCLocker
}

func NewBlockstore(d ds.Batching) *blockstore {
// NewBlockstore returns a default Blockstore implementation
// using the provided datastore.Batching backend.
func NewBlockstore(d ds.Batching) Blockstore {
var dsb ds.Batching
dd := dsns.Wrap(d, BlockPrefix)
dsb = dd
Expand Down Expand Up @@ -108,7 +128,7 @@ func (bs *blockstore) Get(k *cid.Cid) (blocks.Block, error) {
}
bdata, ok := maybeData.([]byte)
if !ok {
return nil, ValueTypeMismatch
return nil, ErrValueTypeMismatch
}

if bs.rehash {
Expand All @@ -122,9 +142,8 @@ func (bs *blockstore) Get(k *cid.Cid) (blocks.Block, error) {
}

return blocks.NewBlockWithCid(bdata, rbcid)
} else {
return blocks.NewBlockWithCid(bdata, k)
}
return blocks.NewBlockWithCid(bdata, k)
}

func (bs *blockstore) Put(block blocks.Block) error {
Expand Down Expand Up @@ -162,8 +181,8 @@ func (bs *blockstore) Has(k *cid.Cid) (bool, error) {
return bs.datastore.Has(dshelp.CidToDsKey(k))
}

func (s *blockstore) DeleteBlock(k *cid.Cid) error {
err := s.datastore.Delete(dshelp.CidToDsKey(k))
func (bs *blockstore) DeleteBlock(k *cid.Cid) error {
err := bs.datastore.Delete(dshelp.CidToDsKey(k))
if err == ds.ErrNotFound {
return ErrNotFound
}
Expand All @@ -173,7 +192,7 @@ func (s *blockstore) DeleteBlock(k *cid.Cid) error {
// AllKeysChan runs a query for keys from the blockstore.
// this is very simplistic, in the future, take dsq.Query as a param?
//
// AllKeysChan respects context
// AllKeysChan respects context.
func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error) {

// KeysOnly, because that would be _a lot_ of data.
Expand Down Expand Up @@ -220,7 +239,9 @@ func (bs *blockstore) AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error)
return output, nil
}

func NewGCLocker() *gclocker {
// NewGCLocker returns a default implementation of
// GCLocker using standard [RW] mutexes.
func NewGCLocker() GCLocker {
return &gclocker{}
}

Expand All @@ -230,6 +251,8 @@ type gclocker struct {
gcreqlk sync.Mutex
}

// Unlocker represents an object which can Unlock
// something.
type Unlocker interface {
Unlock()
}
Expand Down
4 changes: 2 additions & 2 deletions blocks/blockstore/blockstore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ func TestAllKeysRespectsContext(t *testing.T) {

}

func TestValueTypeMismatch(t *testing.T) {
func TestErrValueTypeMismatch(t *testing.T) {
block := blocks.NewBlock([]byte("some data"))

datastore := ds.NewMapDatastore()
Expand All @@ -196,7 +196,7 @@ func TestValueTypeMismatch(t *testing.T) {
blockstore := NewBlockstore(ds_sync.MutexWrap(datastore))

_, err := blockstore.Get(block.Cid())
if err != ValueTypeMismatch {
if err != ErrValueTypeMismatch {
t.Fatal(err)
}
}
Expand Down
11 changes: 8 additions & 3 deletions blocks/blockstore/bloom_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@ import (
bloom "gx/ipfs/QmeiMCBkYHxkDkDfnDadzz4YxY5ruL5Pj499essE4vRsGM/bbloom"
)

// bloomCached returns Blockstore that caches Has requests using Bloom filter
// Size is size of bloom filter in bytes
func bloomCached(bs Blockstore, ctx context.Context, bloomSize, hashCount int) (*bloomcache, error) {
// bloomCached returns a Blockstore that caches Has requests using a Bloom
// filter. bloomSize is size of bloom filter in bytes. hashCount specifies the
// number of hashing functions in the bloom filter (usually known as k).
func bloomCached(ctx context.Context, bs Blockstore, bloomSize, hashCount int) (*bloomcache, error) {
bl, err := bloom.New(float64(bloomSize), float64(hashCount))
if err != nil {
return nil, err
Expand Down Expand Up @@ -165,6 +166,10 @@ func (b *bloomcache) PutMany(bs []blocks.Block) error {
return nil
}

func (b *bloomcache) HashOnRead(enabled bool) {
b.blockstore.HashOnRead(enabled)
}

func (b *bloomcache) AllKeysChan(ctx context.Context) (<-chan *cid.Cid, error) {
return b.blockstore.AllKeysChan(ctx)
}
Expand Down
13 changes: 6 additions & 7 deletions blocks/blockstore/bloom_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,17 @@ import (
syncds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync"
)

func testBloomCached(bs Blockstore, ctx context.Context) (*bloomcache, error) {
func testBloomCached(ctx context.Context, bs Blockstore) (*bloomcache, error) {
if ctx == nil {
ctx = context.TODO()
}
opts := DefaultCacheOpts()
opts.HasARCCacheSize = 0
bbs, err := CachedBlockstore(bs, ctx, opts)
bbs, err := CachedBlockstore(ctx, bs, opts)
if err == nil {
return bbs.(*bloomcache), nil
} else {
return nil, err
}
return nil, err
}

func TestPutManyAddsToBloom(t *testing.T) {
Expand All @@ -34,7 +33,7 @@ func TestPutManyAddsToBloom(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()

cachedbs, err := testBloomCached(bs, ctx)
cachedbs, err := testBloomCached(ctx, bs)

select {
case <-cachedbs.rebuildChan:
Expand Down Expand Up @@ -65,7 +64,7 @@ func TestPutManyAddsToBloom(t *testing.T) {

func TestReturnsErrorWhenSizeNegative(t *testing.T) {
bs := NewBlockstore(syncds.MutexWrap(ds.NewMapDatastore()))
_, err := bloomCached(bs, context.TODO(), -1, 1)
_, err := bloomCached(context.TODO(), bs, -1, 1)
if err == nil {
t.Fail()
}
Expand All @@ -80,7 +79,7 @@ func TestHasIsBloomCached(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()

cachedbs, err := testBloomCached(bs, ctx)
cachedbs, err := testBloomCached(ctx, bs)
if err != nil {
t.Fatal(err)
}
Expand Down
12 changes: 9 additions & 3 deletions blocks/blockstore/caching.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,15 @@ import (
"gx/ipfs/QmRg1gKTHzc3CZXSKzem8aR4E3TubFhbgXwfVuWnSK5CC5/go-metrics-interface"
)

// CacheOpts wraps options for CachedBlockStore().
// Next to each option is it aproximate memory usage per unit
type CacheOpts struct {
HasBloomFilterSize int // 1 byte
HasBloomFilterHashes int // No size, 7 is usually best, consult bloom papers
HasARCCacheSize int // 32 bytes
}

// DefaultCacheOpts returns a CacheOpts initialized with default values.
func DefaultCacheOpts() CacheOpts {
return CacheOpts{
HasBloomFilterSize: 512 << 10,
Expand All @@ -22,8 +24,12 @@ func DefaultCacheOpts() CacheOpts {
}
}

func CachedBlockstore(bs Blockstore,
ctx context.Context, opts CacheOpts) (cbs Blockstore, err error) {
// CachedBlockstore returns a blockstore wrapped in an ARCCache and
// then in a bloom filter cache, if the options indicate it.
func CachedBlockstore(
ctx context.Context,
bs Blockstore,
opts CacheOpts) (cbs Blockstore, err error) {
cbs = bs

if opts.HasBloomFilterSize < 0 || opts.HasBloomFilterHashes < 0 ||
Expand All @@ -42,7 +48,7 @@ func CachedBlockstore(bs Blockstore,
}
if opts.HasBloomFilterSize != 0 {
// *8 because of bytes to bits conversion
cbs, err = bloomCached(cbs, ctx, opts.HasBloomFilterSize*8, opts.HasBloomFilterHashes)
cbs, err = bloomCached(ctx, cbs, opts.HasBloomFilterSize*8, opts.HasBloomFilterHashes)
}

return cbs, err
Expand Down
Loading

0 comments on commit 63d326c

Please sign in to comment.