diff --git a/api/http/test_server.go b/api/http/test_server.go
index 6a6d5b1bc1..5a1412dc04 100644
--- a/api/http/test_server.go
+++ b/api/http/test_server.go
@@ -57,7 +57,7 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API, *pin.API) TestSe
}
tags := chunk.NewTags()
- fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams(), tags)
+ fileStore := storage.NewFileStore(localStore, localStore, storage.NewFileStoreParams(), tags)
// Swarm feeds test setup
feedsDir, err := ioutil.TempDir("", "swarm-feeds-test")
diff --git a/api/inspector.go b/api/inspector.go
index 5fffc16b25..5b56eb40e6 100644
--- a/api/inspector.go
+++ b/api/inspector.go
@@ -18,6 +18,7 @@ package api
import (
"context"
+ "encoding/json"
"fmt"
"strings"
"time"
@@ -25,17 +26,21 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/network"
+ stream "github.com/ethersphere/swarm/network/stream/v2"
"github.com/ethersphere/swarm/storage"
)
+const InspectorIsPullSyncingTolerance = 15 * time.Second
+
type Inspector struct {
api *API
hive *network.Hive
netStore *storage.NetStore
+ stream *stream.Registry
}
-func NewInspector(api *API, hive *network.Hive, netStore *storage.NetStore) *Inspector {
- return &Inspector{api, hive, netStore}
+func NewInspector(api *API, hive *network.Hive, netStore *storage.NetStore, pullSyncer *stream.Registry) *Inspector {
+ return &Inspector{api, hive, netStore, pullSyncer}
}
// Hive prints the kademlia table
@@ -49,15 +54,12 @@ func (i *Inspector) KademliaInfo() network.KademliaInfo {
}
func (i *Inspector) IsPullSyncing() bool {
- lastReceivedChunksMsg := metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)
-
- // last received chunks msg time
- lrct := time.Unix(0, lastReceivedChunksMsg.Value())
+ t := i.stream.LastReceivedChunkTime()
// if last received chunks msg time is after now-15sec. (i.e. within the last 15sec.) then we say that the node is still syncing
// technically this is not correct, because this might have been a retrieve request, but for the time being it works for our purposes
// because we know we are not making retrieve requests on the node while checking this
- return lrct.After(time.Now().Add(-15 * time.Second))
+ return t.After(time.Now().Add(-InspectorIsPullSyncingTolerance))
}
// DeliveriesPerPeer returns the sum of chunks we received from a given peer
@@ -96,3 +98,15 @@ func (i *Inspector) Has(chunkAddresses []storage.Address) string {
return strings.Join(hostChunks, "")
}
+
+func (i *Inspector) PeerStreams() (string, error) {
+ peerInfo, err := i.stream.PeerInfo()
+ if err != nil {
+ return "", err
+ }
+ v, err := json.Marshal(peerInfo)
+ if err != nil {
+ return "", err
+ }
+ return string(v), nil
+}
diff --git a/api/inspector_test.go b/api/inspector_test.go
new file mode 100644
index 0000000000..2caefc3cb9
--- /dev/null
+++ b/api/inspector_test.go
@@ -0,0 +1,66 @@
+package api
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/ethersphere/swarm/network"
+ stream "github.com/ethersphere/swarm/network/stream/v2"
+ "github.com/ethersphere/swarm/storage"
+ "github.com/ethersphere/swarm/storage/localstore"
+
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethersphere/swarm/state"
+)
+
+// TestInspectorPeerStreams validates that response from RPC peerStream has at
+// least some data.
+func TestInspectorPeerStreams(t *testing.T) {
+ dir, err := ioutil.TempDir("", "swarm-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ baseKey := make([]byte, 32)
+ _, err = rand.Read(baseKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ localStore, err := localstore.New(dir, baseKey, &localstore.Options{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ netStore := storage.NewNetStore(localStore, baseKey, enode.ID{})
+
+ i := NewInspector(nil, nil, netStore, stream.New(state.NewInmemoryStore(), baseKey, stream.NewSyncProvider(netStore, network.NewKademlia(
+ baseKey,
+ network.NewKadParams(),
+ ), false, false)))
+
+ server := rpc.NewServer()
+ if err := server.RegisterName("inspector", i); err != nil {
+ t.Fatal(err)
+ }
+
+ client := rpc.DialInProc(server)
+
+ var peerInfo string
+
+ err = client.Call(&peerInfo, "inspector_peerStreams")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !strings.Contains(peerInfo, `"base":"`+hex.EncodeToString(baseKey)[:16]+`"`) {
+ t.Error("missing base key in response")
+ }
+
+ t.Log(peerInfo)
+}
diff --git a/api/manifest_test.go b/api/manifest_test.go
index f7664fab0d..6908d1bf30 100644
--- a/api/manifest_test.go
+++ b/api/manifest_test.go
@@ -43,7 +43,7 @@ func manifest(paths ...string) (manifestReader storage.LazySectionReader) {
func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie {
quitC := make(chan bool)
- fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
+ fileStore := storage.NewFileStore(nil, nil, storage.NewFileStoreParams(), chunk.NewTags())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC, NOOPDecrypt)
if err != nil {
@@ -100,7 +100,7 @@ func TestGetEntry(t *testing.T) {
func TestExactMatch(t *testing.T) {
quitC := make(chan bool)
mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map")
- fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
+ fileStore := storage.NewFileStore(nil, nil, storage.NewFileStoreParams(), chunk.NewTags())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(mf, ref, fileStore, false, quitC, nil)
if err != nil {
@@ -133,7 +133,7 @@ func TestAddFileWithManifestPath(t *testing.T) {
reader := &storage.LazyTestSectionReader{
SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))),
}
- fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
+ fileStore := storage.NewFileStore(nil, nil, storage.NewFileStoreParams(), chunk.NewTags())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(reader, ref, fileStore, false, nil, NOOPDecrypt)
if err != nil {
diff --git a/cmd/swarm-smoke/upload_and_sync.go b/cmd/swarm-smoke/upload_and_sync.go
index 8961ad8af1..5f0a045da6 100644
--- a/cmd/swarm-smoke/upload_and_sync.go
+++ b/cmd/swarm-smoke/upload_and_sync.go
@@ -156,6 +156,7 @@ func trackChunks(testData []byte) error {
wg.Wait()
checkChunksVsMostProxHosts(addrs, allHostChunks, bzzAddrs)
+ metrics.GetOrRegisterGauge("deployment.nodes", nil).Update(int64(len(hosts)))
if !hasErr {
// remove the chunks stored on the uploader node
@@ -207,11 +208,10 @@ func checkChunksVsMostProxHosts(addrs []storage.Address, allHostChunks map[strin
}
}
- log.Debug("sync mode", "sync mode", syncMode)
-
if syncMode == "pullsync" || syncMode == "both" {
for _, maxProxHost := range maxProxHosts {
if allHostChunks[maxProxHost][i] == '0' {
+ metrics.GetOrRegisterCounter("upload-and-sync.pull-sync.chunk-not-max-prox", nil).Inc(1)
log.Error("chunk not found at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
} else {
log.Trace("chunk present at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
@@ -220,6 +220,7 @@ func checkChunksVsMostProxHosts(addrs []storage.Address, allHostChunks map[strin
// if chunk found at less than 2 hosts, which is actually less that the min size of a NN
if foundAt < 2 {
+ metrics.GetOrRegisterCounter("upload-and-sync.pull-sync.chunk-less-nn", nil).Inc(1)
log.Error("chunk found at less than two hosts", "foundAt", foundAt, "ref", addrs[i])
}
}
@@ -235,6 +236,7 @@ func checkChunksVsMostProxHosts(addrs []storage.Address, allHostChunks map[strin
if !found {
for _, maxProxHost := range maxProxHosts {
+ metrics.GetOrRegisterCounter("upload-and-sync.push-sync.chunk-not-max-prox", nil).Inc(1)
log.Error("chunk not found at any max prox host", "ref", addrs[i], "hosts", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
}
}
@@ -270,6 +272,8 @@ func uploadAndSync(c *cli.Context, randomBytes []byte) error {
}
t2 := time.Since(t1)
metrics.GetOrRegisterResettingTimer("upload-and-sync.upload-time", nil).Update(t2)
+ uploadSpeed := float64(len(randomBytes)) / t2.Seconds() // bytes per second
+ metrics.GetOrRegisterGauge("upload-and-sync.upload-speed", nil).Update(int64(uploadSpeed))
fhash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
@@ -315,6 +319,9 @@ func uploadAndSync(c *cli.Context, randomBytes []byte) error {
ended := time.Since(start)
metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended)
+ downloadSpeed := float64(len(randomBytes)) / ended.Seconds() // bytes per second
+ metrics.GetOrRegisterGauge("upload-and-sync.download-speed", nil).Update(int64(downloadSpeed))
+
log.Info("fetch successful", "took", ended, "endpoint", httpEndpoint(hosts[randIndex]))
break
}
diff --git a/cmd/swarm-snapshot/create.go b/cmd/swarm-snapshot/create.go
index b31e037bf9..7d4cdcc0f8 100644
--- a/cmd/swarm-snapshot/create.go
+++ b/cmd/swarm-snapshot/create.go
@@ -74,7 +74,7 @@ func createSnapshot(filename string, nodes int, services []string) (err error) {
UnderlayAddr: addr.Under(),
HiveParams: hp,
}
- return network.NewBzz(config, kad, nil, nil, nil), nil, nil
+ return network.NewBzz(config, kad, nil, nil, nil, nil, nil), nil, nil
},
})
defer sim.Close()
diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go
index e68d3748ca..63867bc280 100644
--- a/cmd/swarm/config_test.go
+++ b/cmd/swarm/config_test.go
@@ -34,10 +34,11 @@ import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethersphere/swarm"
"github.com/ethersphere/swarm/api"
+ "github.com/ethersphere/swarm/testutil"
)
func TestConfigDump(t *testing.T) {
- swarm := runSwarm(t, "dumpconfig")
+ swarm := runSwarm(t, "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel), "dumpconfig")
defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
@@ -53,6 +54,7 @@ func TestConfigFailsSwapEnabledNoBackendURL(t *testing.T) {
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
fmt.Sprintf("--%s", utils.ListenPortFlag.Name), "0",
fmt.Sprintf("--%s", SwarmSwapEnabledFlag.Name),
+ "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
swarm := runSwarm(t, flags...)
@@ -89,6 +91,7 @@ func TestBzzKeyFlag(t *testing.T) {
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
fmt.Sprintf("--%s", SwarmBzzKeyHexFlag.Name), hexKey,
+ "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
@@ -137,6 +140,7 @@ func TestEmptyBzzAccountFlagMultipleAccounts(t *testing.T) {
fmt.Sprintf("--%s", SwarmPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.ListenPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
@@ -160,6 +164,7 @@ func TestEmptyBzzAccountFlagSingleAccount(t *testing.T) {
fmt.Sprintf("--%s", utils.ListenPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
+ "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
@@ -205,6 +210,7 @@ func TestEmptyBzzAccountFlagNoAccountWrongPassword(t *testing.T) {
fmt.Sprintf("--%s", SwarmPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.ListenPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
+ "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
@@ -244,6 +250,7 @@ func TestConfigCmdLineOverrides(t *testing.T) {
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
+ "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -342,6 +349,7 @@ func TestConfigFileOverrides(t *testing.T) {
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
+ "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
@@ -420,6 +428,7 @@ func TestConfigEnvVars(t *testing.T) {
"--ens-api", "",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
+ "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
//node.Cmd = runSwarm(t,flags...)
@@ -551,6 +560,7 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
+ "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
diff --git a/cmd/swarm/explore.go b/cmd/swarm/explore.go
index 7e7fd6757e..9223f92242 100644
--- a/cmd/swarm/explore.go
+++ b/cmd/swarm/explore.go
@@ -48,7 +48,7 @@ func hashes(ctx *cli.Context) {
}
defer f.Close()
- fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
+ fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, &storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
refs, err := fileStore.GetAllReferences(context.TODO(), f)
if err != nil {
utils.Fatalf("%v\n", err)
diff --git a/cmd/swarm/hash.go b/cmd/swarm/hash.go
index 4dfadf771c..a85a688d5e 100644
--- a/cmd/swarm/hash.go
+++ b/cmd/swarm/hash.go
@@ -78,7 +78,7 @@ func hash(ctx *cli.Context) {
defer f.Close()
stat, _ := f.Stat()
- fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
+ fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, &storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false)
if err != nil {
diff --git a/network/networkid_test.go b/network/networkid_test.go
index 217c50a3e6..cd60e89849 100644
--- a/network/networkid_test.go
+++ b/network/networkid_test.go
@@ -216,7 +216,7 @@ func newServices() adapters.Services {
HiveParams: hp,
NetworkID: uint64(currentNetworkID),
}
- return NewBzz(config, kademlia(ctx.Config.ID), nil, nil, nil), nil
+ return NewBzz(config, kademlia(ctx.Config.ID), nil, nil, nil, nil, nil), nil
},
}
}
diff --git a/network/newstream/common_test.go b/network/newstream/common_test.go
deleted file mode 100644
index c479b28dc6..0000000000
--- a/network/newstream/common_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package newstream
-
-import (
- "io/ioutil"
- "os"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/storage/localstore"
- "github.com/ethersphere/swarm/storage/mock"
- "github.com/ethersphere/swarm/testutil"
-)
-
-func init() {
- testutil.Init()
-}
-
-func newTestLocalStore(id enode.ID, addr *network.BzzAddr, globalStore mock.GlobalStorer) (localStore *localstore.DB, cleanup func(), err error) {
- dir, err := ioutil.TempDir("", "swarm-stream-")
- if err != nil {
- return nil, nil, err
- }
- cleanup = func() {
- os.RemoveAll(dir)
- }
-
- var mockStore *mock.NodeStore
- if globalStore != nil {
- mockStore = globalStore.NewNodeStore(common.BytesToAddress(id.Bytes()))
- }
-
- localStore, err = localstore.New(dir, addr.Over(), &localstore.Options{
- MockStore: mockStore,
- })
- if err != nil {
- cleanup()
- return nil, nil, err
- }
- return localStore, cleanup, nil
-}
diff --git a/network/newstream/peer.go b/network/newstream/peer.go
deleted file mode 100644
index ed814fadc8..0000000000
--- a/network/newstream/peer.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package newstream
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/network/bitvector"
- "github.com/ethersphere/swarm/state"
-)
-
-var ErrEmptyBatch = errors.New("empty batch")
-
-const (
- HashSize = 32
- BatchSize = 16
-)
-
-// Peer is the Peer extension for the streaming protocol
-type Peer struct {
- *network.BzzPeer
- mtx sync.Mutex
- providers map[string]StreamProvider
- intervalsStore state.Store
-
- streamCursorsMu sync.Mutex
- streamCursors map[string]uint64 // key: Stream ID string representation, value: session cursor. Keeps cursors for all streams. when unset - we are not interested in that bin
- dirtyStreams map[string]bool // key: stream ID, value: whether cursors for a stream should be updated
- activeBoundedGets map[string]chan struct{}
- openWants map[uint]*want // maintain open wants on the client side
- openOffers map[uint]offer // maintain open offers on the server side
- quit chan struct{} // closed when peer is going offline
-}
-
-// NewPeer is the constructor for Peer
-func NewPeer(peer *network.BzzPeer, i state.Store, providers map[string]StreamProvider) *Peer {
- p := &Peer{
- BzzPeer: peer,
- providers: providers,
- intervalsStore: i,
- streamCursors: make(map[string]uint64),
- dirtyStreams: make(map[string]bool),
- openWants: make(map[uint]*want),
- openOffers: make(map[uint]offer),
- quit: make(chan struct{}),
- }
- return p
-}
-func (p *Peer) Left() {
- close(p.quit)
-}
-
-// HandleMsg is the message handler that delegates incoming messages
-func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
- switch msg := msg.(type) {
- default:
- return fmt.Errorf("unknown message type: %T", msg)
- }
-}
-
-type offer struct {
- ruid uint
- stream ID
- hashes []byte
- requested time.Time
-}
-
-type want struct {
- ruid uint
- from uint64
- to uint64
- stream ID
- hashes map[string]bool
- bv *bitvector.BitVector
- requested time.Time
- remaining uint64
- chunks chan chunk.Chunk
- done chan error
-}
diff --git a/network/newstream/stream.go b/network/newstream/stream.go
deleted file mode 100644
index 77b1d8ca7d..0000000000
--- a/network/newstream/stream.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2019 The Swarm Authors
-// This file is part of the Swarm library.
-//
-// The Swarm library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The Swarm library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the Swarm library. If not, see .
-
-package newstream
-
-import (
- "sync"
-
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/rpc"
- "github.com/ethersphere/swarm/log"
- "github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/p2p/protocols"
- "github.com/ethersphere/swarm/state"
-)
-
-// SlipStream implements node.Service
-var _ node.Service = (*SlipStream)(nil)
-
-var SyncerSpec = &protocols.Spec{
- Name: "bzz-stream",
- Version: 8,
- MaxMsgSize: 10 * 1024 * 1024,
- Messages: []interface{}{
- StreamInfoReq{},
- StreamInfoRes{},
- GetRange{},
- OfferedHashes{},
- ChunkDelivery{},
- WantedHashes{},
- },
-}
-
-// SlipStream is the base type that handles all client/server operations on a node
-// it is instantiated once per stream protocol instance, that is, it should have
-// one instance per node
-type SlipStream struct {
- mtx sync.RWMutex
- intervalsStore state.Store //every protocol would make use of this
- peers map[enode.ID]*Peer
- kad *network.Kademlia
-
- providers map[string]StreamProvider
-
- spec *protocols.Spec //this protocol's spec
- balance protocols.Balance //implements protocols.Balance, for accounting
- prices protocols.Prices //implements protocols.Prices, provides prices to accounting
-
- quit chan struct{} // terminates registry goroutines
-}
-
-func NewSlipStream(intervalsStore state.Store, kad *network.Kademlia, providers ...StreamProvider) *SlipStream {
- slipStream := &SlipStream{
- intervalsStore: intervalsStore,
- kad: kad,
- peers: make(map[enode.ID]*Peer),
- providers: make(map[string]StreamProvider),
- quit: make(chan struct{}),
- }
-
- for _, p := range providers {
- slipStream.providers[p.StreamName()] = p
- }
-
- slipStream.spec = SyncerSpec
-
- return slipStream
-}
-
-func (s *SlipStream) getPeer(id enode.ID) *Peer {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- p := s.peers[id]
- return p
-}
-
-func (s *SlipStream) addPeer(p *Peer) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- s.peers[p.ID()] = p
-}
-
-func (s *SlipStream) removePeer(p *Peer) {
- s.mtx.Lock()
- defer s.mtx.Unlock()
- if _, found := s.peers[p.ID()]; found {
- log.Error("removing peer", "id", p.ID())
- delete(s.peers, p.ID())
- p.Left()
- } else {
- log.Warn("peer was marked for removal but not found", "peer", p.ID())
- }
-}
-
-// Run is being dispatched when 2 nodes connect
-func (s *SlipStream) Run(p *p2p.Peer, rw p2p.MsgReadWriter) error {
- peer := protocols.NewPeer(p, rw, s.spec)
- bp := network.NewBzzPeer(peer)
-
- np := network.NewPeer(bp, s.kad)
- s.kad.On(np)
- defer s.kad.Off(np)
-
- sp := NewPeer(bp, s.intervalsStore, s.providers)
- s.addPeer(sp)
- defer s.removePeer(sp)
- return peer.Run(sp.HandleMsg)
-}
-
-func (s *SlipStream) Protocols() []p2p.Protocol {
- return []p2p.Protocol{
- {
- Name: "bzz-stream",
- Version: 1,
- Length: 10 * 1024 * 1024,
- Run: s.Run,
- },
- }
-}
-
-func (s *SlipStream) APIs() []rpc.API {
- return []rpc.API{
- {
- Namespace: "bzz-stream",
- Version: "1.0",
- Service: NewAPI(s),
- Public: false,
- },
- }
-}
-
-// Additional public methods accessible through API for pss
-type API struct {
- *SlipStream
-}
-
-func NewAPI(s *SlipStream) *API {
- return &API{SlipStream: s}
-}
-
-func (s *SlipStream) Start(server *p2p.Server) error {
- log.Info("slip stream starting")
- return nil
-}
-
-func (s *SlipStream) Stop() error {
- log.Info("slip stream closing")
- s.mtx.Lock()
- defer s.mtx.Unlock()
- close(s.quit)
- return nil
-}
diff --git a/network/protocol.go b/network/protocol.go
index f27dc6beda..be8cc0d024 100644
--- a/network/protocol.go
+++ b/network/protocol.go
@@ -119,13 +119,15 @@ type BzzConfig struct {
// Bzz is the swarm protocol bundle
type Bzz struct {
*Hive
- NetworkID uint64
- localAddr *BzzAddr
- mtx sync.Mutex
- handshakes map[enode.ID]*HandshakeMsg
- streamerSpec *protocols.Spec
- streamerRun func(*BzzPeer) error
- capabilities *capability.Capabilities // capabilities control and state
+ NetworkID uint64
+ localAddr *BzzAddr
+ mtx sync.Mutex
+ handshakes map[enode.ID]*HandshakeMsg
+ streamerSpec *protocols.Spec
+ streamerRun func(*BzzPeer) error
+ capabilities *capability.Capabilities // capabilities control and state
+ retrievalSpec *protocols.Spec
+ retrievalRun func(*BzzPeer) error
}
// NewBzz is the swarm protocol constructor
@@ -133,20 +135,24 @@ type Bzz struct {
// * bzz config
// * overlay driver
// * peer store
-func NewBzz(config *BzzConfig, kad *Kademlia, store state.Store, streamerSpec *protocols.Spec, streamerRun func(*BzzPeer) error) *Bzz {
+func NewBzz(config *BzzConfig, kad *Kademlia, store state.Store, streamerSpec, retrievalSpec *protocols.Spec, streamerRun, retrievalRun func(*BzzPeer) error) *Bzz {
bzz := &Bzz{
- Hive: NewHive(config.HiveParams, kad, store),
- NetworkID: config.NetworkID,
- localAddr: &BzzAddr{config.OverlayAddr, config.UnderlayAddr},
- handshakes: make(map[enode.ID]*HandshakeMsg),
- streamerRun: streamerRun,
- streamerSpec: streamerSpec,
- capabilities: capability.NewCapabilities(),
+ Hive: NewHive(config.HiveParams, kad, store),
+ NetworkID: config.NetworkID,
+ localAddr: &BzzAddr{config.OverlayAddr, config.UnderlayAddr},
+ handshakes: make(map[enode.ID]*HandshakeMsg),
+ streamerRun: streamerRun,
+ streamerSpec: streamerSpec,
+ retrievalRun: retrievalRun,
+ retrievalSpec: retrievalSpec,
+ capabilities: capability.NewCapabilities(),
}
if config.BootnodeMode {
bzz.streamerRun = nil
bzz.streamerSpec = nil
+ bzz.retrievalRun = nil
+ bzz.retrievalSpec = nil
}
// temporary soon-to-be-legacy light/full, as above
@@ -209,6 +215,14 @@ func (b *Bzz) Protocols() []p2p.Protocol {
Run: b.RunProtocol(b.streamerSpec, b.streamerRun),
})
}
+ if b.retrievalSpec != nil && b.retrievalRun != nil {
+ protocol = append(protocol, p2p.Protocol{
+ Name: b.retrievalSpec.Name,
+ Version: b.retrievalSpec.Version,
+ Length: b.retrievalSpec.Length(),
+ Run: b.RunProtocol(b.retrievalSpec, b.retrievalRun),
+ })
+ }
return protocol
}
diff --git a/network/protocol_test.go b/network/protocol_test.go
index 486b948774..6e36515b52 100644
--- a/network/protocol_test.go
+++ b/network/protocol_test.go
@@ -175,7 +175,7 @@ func newBzz(addr *BzzAddr, lightNode bool) *Bzz {
LightNode: lightNode,
}
kad := NewKademlia(addr.OAddr, NewKadParams())
- bzz := NewBzz(config, kad, nil, nil, nil)
+ bzz := NewBzz(config, kad, nil, nil, nil, nil, nil)
return bzz
}
diff --git a/network/retrieval/retrieve.go b/network/retrieval/retrieve.go
index 79f61f9bd6..ce8bb08488 100644
--- a/network/retrieval/retrieve.go
+++ b/network/retrieval/retrieve.go
@@ -19,18 +19,20 @@ package retrieval
import (
"bytes"
"context"
+ "encoding/hex"
"errors"
"fmt"
+ "reflect"
"sync"
"time"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/network"
"github.com/ethersphere/swarm/network/timeouts"
"github.com/ethersphere/swarm/p2p/protocols"
@@ -45,14 +47,13 @@ var (
_ node.Service = &Retrieval{}
// Metrics
- processReceivedChunksCount = metrics.NewRegisteredCounter("network.retrieve.received_chunks.count", nil)
- handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.retrieve.handle_retrieve_request_msg.count", nil)
- retrieveChunkFail = metrics.NewRegisteredCounter("network.retrieve.retrieve_chunks_fail.count", nil)
+ processReceivedChunksCount = metrics.NewRegisteredCounter("network.retrieve.received_chunks_handled", nil)
+ handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.retrieve.handle_retrieve_request_msg", nil)
+ retrieveChunkFail = metrics.NewRegisteredCounter("network.retrieve.retrieve_chunks_fail", nil)
- lastReceivedRetrieveChunksMsg = metrics.GetOrRegisterGauge("network.retrieve.received_chunks", nil)
+ retrievalPeers = metrics.GetOrRegisterGauge("network.retrieve.peers", nil)
- // Protocol spec
- spec = &protocols.Spec{
+ Spec = &protocols.Spec{
Name: "bzz-retrieve",
Version: 1,
MaxMsgSize: 10 * 1024 * 1024,
@@ -65,61 +66,105 @@ var (
ErrNoPeerFound = errors.New("no peer found")
)
+// RetrievalPrices define the price matrix that correlates a message
+// type to a certain price (or price function)
+type RetrievalPrices struct {
+ priceMatrix map[reflect.Type]*protocols.Price
+}
+
+// Price implements the protocols.Price interface and returns the price for a specific message
+func (p *RetrievalPrices) Price(msg interface{}) *protocols.Price {
+ t := reflect.TypeOf(msg).Elem()
+ return p.priceMatrix[t]
+}
+
+func (p *RetrievalPrices) retrieveRequestPrice() uint64 {
+ return uint64(1)
+}
+
+func (p *RetrievalPrices) chunkDeliveryPrice() uint64 {
+ return uint64(1)
+}
+
+// createPriceOracle sets up a matrix which can be queried to get
+// the price for a message via the Price method
+func (r *Retrieval) createPriceOracle() {
+ p := &RetrievalPrices{}
+ p.priceMatrix = map[reflect.Type]*protocols.Price{
+ reflect.TypeOf(ChunkDelivery{}): {
+ Value: p.chunkDeliveryPrice(),
+ PerByte: true,
+ Payer: protocols.Receiver,
+ },
+ reflect.TypeOf(RetrieveRequest{}): {
+ Value: p.retrieveRequestPrice(),
+ PerByte: false,
+ Payer: protocols.Sender,
+ },
+ }
+ r.prices = p
+}
+
// Retrieval holds state and handles protocol messages for the `bzz-retrieve` protocol
type Retrieval struct {
- mtx sync.Mutex
+ mtx sync.RWMutex
netStore *storage.NetStore
kad *network.Kademlia
peers map[enode.ID]*Peer
- spec *protocols.Spec //this protocol's spec
-
- quit chan struct{} // termination
+ prices protocols.Prices
+ logger log.Logger
+ quit chan struct{}
}
-// NewRetrieval returns a new instance of the retrieval protocol handler
-func New(kad *network.Kademlia, ns *storage.NetStore) *Retrieval {
- return &Retrieval{
+// New returns a new instance of the retrieval protocol handler
+func New(kad *network.Kademlia, ns *storage.NetStore, baseKey []byte) *Retrieval {
+ r := &Retrieval{
kad: kad,
peers: make(map[enode.ID]*Peer),
netStore: ns,
+ logger: log.New("base", hex.EncodeToString(baseKey)[:16]),
quit: make(chan struct{}),
- spec: spec,
}
+ r.createPriceOracle()
+ return r
}
func (r *Retrieval) addPeer(p *Peer) {
r.mtx.Lock()
defer r.mtx.Unlock()
r.peers[p.ID()] = p
+ retrievalPeers.Update(int64(len(r.peers)))
}
func (r *Retrieval) removePeer(p *Peer) {
r.mtx.Lock()
defer r.mtx.Unlock()
delete(r.peers, p.ID())
+ retrievalPeers.Update(int64(len(r.peers)))
}
func (r *Retrieval) getPeer(id enode.ID) *Peer {
- r.mtx.Lock()
- defer r.mtx.Unlock()
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
return r.peers[id]
}
-// Run protocol function
-func (r *Retrieval) Run(p *p2p.Peer, rw p2p.MsgReadWriter) error {
- peer := protocols.NewPeer(p, rw, r.spec)
- bp := network.NewBzzPeer(peer)
+// Run is being dispatched when 2 nodes connect
+func (r *Retrieval) Run(bp *network.BzzPeer) error {
sp := NewPeer(bp)
r.addPeer(sp)
defer r.removePeer(sp)
- return peer.Run(r.handleMsg(sp))
+
+ return sp.Peer.Run(r.handleMsg(sp))
}
func (r *Retrieval) handleMsg(p *Peer) func(context.Context, interface{}) error {
return func(ctx context.Context, msg interface{}) error {
switch msg := msg.(type) {
case *RetrieveRequest:
+ // we must handle them in a different goroutine otherwise parallel requests
+ // for other chunks from the same peer will get stuck in the queue
go r.handleRetrieveRequest(ctx, p, msg)
case *ChunkDelivery:
go r.handleChunkDelivery(ctx, p, msg)
@@ -134,7 +179,7 @@ func (r *Retrieval) handleMsg(p *Peer) func(context.Context, interface{}) error
// this is used only for tracing, and can probably be refactor so that we don't have to
// iterater over Kademlia
func (r *Retrieval) getOriginPo(req *storage.Request) int {
- log.Trace("retrieval.getOriginPo", "req.Addr", req.Addr)
+ r.logger.Trace("retrieval.getOriginPo", "req.Addr", req.Addr)
originPo := -1
r.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int) bool {
@@ -154,7 +199,7 @@ func (r *Retrieval) getOriginPo(req *storage.Request) int {
// findPeer finds a peer we need to ask for a specific chunk from according to our kademlia
func (r *Retrieval) findPeer(ctx context.Context, req *storage.Request) (retPeer *network.Peer, err error) {
- log.Trace("retrieval.findPeer", "req.Addr", req.Addr)
+ r.logger.Trace("retrieval.findPeer", "req.Addr", req.Addr)
osp, _ := ctx.Value("remote.fetch").(opentracing.Span)
// originPo - proximity of the node that made the request; -1 if the request originator is our node;
@@ -193,33 +238,33 @@ func (r *Retrieval) findPeer(ctx context.Context, req *storage.Request) (retPeer
// skip peers that we have already tried
if req.SkipPeer(id.String()) {
- log.Trace("findpeer skip peer", "peer", id, "ref", req.Addr.String())
+ r.logger.Trace("findpeer skip peer", "peer", id, "ref", req.Addr.String())
return true
}
if myPo < depth { // chunk is NOT within the neighbourhood
if po <= myPo { // always choose a peer strictly closer to chunk than us
- log.Trace("findpeer1a", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
+ r.logger.Trace("findpeer1a", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
return false
} else {
- log.Trace("findpeer1b", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
+ r.logger.Trace("findpeer1b", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
}
} else { // chunk IS WITHIN neighbourhood
if po < depth { // do not select peer outside the neighbourhood. But allows peers further from the chunk than us
- log.Trace("findpeer2a", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
+ r.logger.Trace("findpeer2a", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
return false
} else if po <= originPo { // avoid loop in neighbourhood, so not forward when a request comes from the neighbourhood
- log.Trace("findpeer2b", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
+ r.logger.Trace("findpeer2b", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
return false
} else {
- log.Trace("findpeer2c", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
+ r.logger.Trace("findpeer2c", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
}
}
// if selected peer is not in the depth (2nd condition; if depth <= po, then peer is in nearest neighbourhood)
// and they have a lower po than ours, return error
if po < myPo && depth > po {
- log.Trace("findpeer4 skip peer because origin was closer", "originpo", originPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
+ r.logger.Trace("findpeer4 skip peer because origin was closer", "originpo", originPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
err = fmt.Errorf("not asking peers further away from origin; ref=%s originpo=%v po=%v depth=%v myPo=%v", req.Addr.String(), originPo, po, depth, myPo)
return false
@@ -228,7 +273,7 @@ func (r *Retrieval) findPeer(ctx context.Context, req *storage.Request) (retPeer
// if chunk falls in our nearest neighbourhood (1st condition), but suggested peer is not in
// the nearest neighbourhood (2nd condition), don't forward the request to suggested peer
if depth <= myPo && depth > po {
- log.Trace("findpeer5 skip peer because depth", "originpo", originPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
+ r.logger.Trace("findpeer5 skip peer because depth", "originpo", originPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
err = fmt.Errorf("not going outside of depth; ref=%s originpo=%v po=%v depth=%v myPo=%v", req.Addr.String(), originPo, po, depth, myPo)
return false
@@ -289,7 +334,7 @@ func (r *Retrieval) handleRetrieveRequest(ctx context.Context, p *Peer, msg *Ret
chunk, err := r.netStore.Get(ctx, chunk.ModeGetRequest, req)
if err != nil {
retrieveChunkFail.Inc(1)
- p.logger.Debug("netstore.Get can not retrieve chunk", "ref", msg.Addr, "err", err)
+ p.logger.Error("netstore.Get can not retrieve chunk", "ref", msg.Addr, "err", err)
return
}
@@ -312,7 +357,7 @@ func (r *Retrieval) handleRetrieveRequest(ctx context.Context, p *Peer, msg *Ret
// handleChunkDelivery handles a ChunkDelivery message from a certain peer
// if the chunk proximity order in relation to our base address is within depth
// we treat the chunk as a chunk received in syncing
-func (r *Retrieval) handleChunkDelivery(ctx context.Context, p *Peer, msg *ChunkDelivery) error {
+func (r *Retrieval) handleChunkDelivery(ctx context.Context, p *Peer, msg *ChunkDelivery) {
p.logger.Debug("retrieval.handleChunkDelivery", "ref", msg.Addr)
var osp opentracing.Span
ctx, osp = spancontext.StartSpan(
@@ -321,11 +366,8 @@ func (r *Retrieval) handleChunkDelivery(ctx context.Context, p *Peer, msg *Chunk
processReceivedChunksCount.Inc(1)
- // record the last time we received a chunk delivery message
- lastReceivedRetrieveChunksMsg.Update(time.Now().UnixNano())
-
// count how many chunks we receive for retrieve requests per peer
- peermetric := fmt.Sprintf("chunk.delivery.%x", p.BzzAddr.Over()[:16])
+ peermetric := fmt.Sprintf("network.retrieve.chunk.delivery.%x", p.BzzAddr.Over()[:16])
metrics.GetOrRegisterCounter(peermetric, nil).Inc(1)
peerPO := chunk.Proximity(p.BzzAddr.Over(), msg.Addr)
@@ -340,26 +382,20 @@ func (r *Retrieval) handleChunkDelivery(ctx context.Context, p *Peer, msg *Chunk
// do not sync if peer that is sending us a chunk is closer to the chunk then we are
mode = chunk.ModePutRequest
}
+ defer osp.Finish()
- p.logger.Trace("handle.chunk.delivery", "ref", msg.Addr)
-
- go func() {
- defer osp.Finish()
- p.logger.Trace("handle.chunk.delivery", "put", msg.Addr)
- _, err := r.netStore.Put(ctx, mode, storage.NewChunk(msg.Addr, msg.SData))
- if err != nil {
- if err == storage.ErrChunkInvalid {
- p.Drop()
- }
+ _, err := r.netStore.Put(ctx, mode, storage.NewChunk(msg.Addr, msg.SData))
+ if err != nil {
+ p.logger.Error("netstore error putting chunk to localstore", "err", err)
+ if err == storage.ErrChunkInvalid {
+ p.Drop()
}
- p.logger.Trace("handle.chunk.delivery", "done put", msg.Addr, "err", err)
- }()
- return nil
+ }
}
// RequestFromPeers sends a chunk retrieve request to the next found peer
func (r *Retrieval) RequestFromPeers(ctx context.Context, req *storage.Request, localID enode.ID) (*enode.ID, error) {
- log.Debug("retrieval.requestFromPeers", "req.Addr", req.Addr)
+ r.logger.Debug("retrieval.requestFromPeers", "req.Addr", req.Addr, "localID", localID)
metrics.GetOrRegisterCounter("network.retrieve.request_from_peers", nil).Inc(1)
const maxFindPeerRetries = 5
@@ -368,15 +404,17 @@ func (r *Retrieval) RequestFromPeers(ctx context.Context, req *storage.Request,
FINDPEER:
sp, err := r.findPeer(ctx, req)
if err != nil {
- log.Trace(err.Error())
+ r.logger.Error(err.Error())
return nil, err
}
protoPeer := r.getPeer(sp.ID())
if protoPeer == nil {
+ r.logger.Warn("findPeer returned a peer to skip", "peer", sp.String(), "retry", retries)
+ req.PeersToSkip.Store(sp.ID().String(), time.Now())
retries++
if retries == maxFindPeerRetries {
- log.Error("max find peer retries reached", "max retries", maxFindPeerRetries)
+ r.logger.Error("max find peer retries reached", "max retries", maxFindPeerRetries)
return nil, ErrNoPeerFound
}
@@ -398,12 +436,12 @@ FINDPEER:
}
func (r *Retrieval) Start(server *p2p.Server) error {
- log.Info("starting bzz-retrieve")
+ r.logger.Info("starting bzz-retrieve")
return nil
}
func (r *Retrieval) Stop() error {
- log.Info("shutting down bzz-retrieve")
+ r.logger.Info("shutting down bzz-retrieve")
close(r.quit)
return nil
}
@@ -411,14 +449,21 @@ func (r *Retrieval) Stop() error {
func (r *Retrieval) Protocols() []p2p.Protocol {
return []p2p.Protocol{
{
- Name: r.spec.Name,
- Version: r.spec.Version,
- Length: r.spec.Length(),
- Run: r.Run,
+ Name: Spec.Name,
+ Version: Spec.Version,
+ Length: Spec.Length(),
+ Run: r.runProtocol,
},
}
}
+func (r *Retrieval) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
+ peer := protocols.NewPeer(p, rw, Spec)
+ bp := network.NewBzzPeer(peer)
+
+ return r.Run(bp)
+}
+
func (r *Retrieval) APIs() []rpc.API {
return nil
}
diff --git a/network/retrieval/retrieve_test.go b/network/retrieval/retrieve_test.go
index 072fe6c158..f9a6f60417 100644
--- a/network/retrieval/retrieve_test.go
+++ b/network/retrieval/retrieve_test.go
@@ -242,7 +242,7 @@ func TestRequestFromPeers(t *testing.T) {
to.On(peer)
- s := New(to, nil)
+ s := New(to, nil, to.BaseAddr())
req := storage.NewRequest(storage.Address(hash0[:]))
id, err := s.findPeer(context.Background(), req)
@@ -273,7 +273,7 @@ func TestRequestFromPeersWithLightNode(t *testing.T) {
to.On(peer)
- r := New(to, nil)
+ r := New(to, nil, to.BaseAddr())
req := storage.NewRequest(storage.Address(hash0[:]))
// making a request which should return with "no peer found"
@@ -284,6 +284,31 @@ func TestRequestFromPeersWithLightNode(t *testing.T) {
}
}
+//TestHasPriceImplementation is to check that Retrieval implements protocols.Prices
+func TestHasPriceImplementation(t *testing.T) {
+ addr := network.RandomAddr()
+ to := network.NewKademlia(addr.OAddr, network.NewKadParams())
+ r := New(to, nil, to.BaseAddr())
+
+ if r.prices == nil {
+ t.Fatal("No prices implementation available for retrieve protocol")
+ }
+
+ pricesInstance, ok := r.prices.(*RetrievalPrices)
+ if !ok {
+ t.Fatal("Retrieval does not have the expected Prices instance")
+ }
+ price := pricesInstance.Price(&ChunkDelivery{})
+ if price == nil || price.Value == 0 || price.Value != pricesInstance.chunkDeliveryPrice() {
+ t.Fatal("No prices set for chunk delivery msg")
+ }
+
+ price = pricesInstance.Price(&RetrieveRequest{})
+ if price == nil || price.Value == 0 || price.Value != pricesInstance.retrieveRequestPrice() {
+ t.Fatal("No prices set for retrieve requests")
+ }
+}
+
func newBzzRetrieveWithLocalstore(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
n := ctx.Config.Node()
addr := network.NewAddr(n)
@@ -301,9 +326,9 @@ func newBzzRetrieveWithLocalstore(ctx *adapters.ServiceContext, bucket *sync.Map
bucket.Store(simulation.BucketKeyKademlia, kad)
}
- netStore := storage.NewNetStore(localStore, n.ID())
+ netStore := storage.NewNetStore(localStore, kad.BaseAddr(), n.ID())
lnetStore := storage.NewLNetStore(netStore)
- fileStore := storage.NewFileStore(lnetStore, storage.NewFileStoreParams(), chunk.NewTags())
+ fileStore := storage.NewFileStore(lnetStore, lnetStore, storage.NewFileStoreParams(), chunk.NewTags())
var store *state.DBStore
// Use on-disk DBStore to reduce memory consumption in race tests.
@@ -316,7 +341,7 @@ func newBzzRetrieveWithLocalstore(ctx *adapters.ServiceContext, bucket *sync.Map
return nil, nil, err
}
- r := New(kad, netStore)
+ r := New(kad, netStore, kad.BaseAddr())
netStore.RemoteGet = r.RequestFromPeers
bucket.Store(bucketKeyFileStore, fileStore)
bucket.Store(bucketKeyNetstore, netStore)
diff --git a/network/retrieval/wire.go b/network/retrieval/wire.go
index d92db6250f..58a5dd69e5 100644
--- a/network/retrieval/wire.go
+++ b/network/retrieval/wire.go
@@ -18,7 +18,7 @@ package retrieval
import "github.com/ethersphere/swarm/storage"
-// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
+// RetrieveRequest is the protocol msg for chunk retrieve requests
type RetrieveRequest struct {
Addr storage.Address
}
@@ -26,5 +26,5 @@ type RetrieveRequest struct {
// ChunkDelivery is the protocol msg for delivering a solicited chunk to a peer
type ChunkDelivery struct {
Addr storage.Address
- SData []byte // the stored chunk Data (incl size)
+ SData []byte
}
diff --git a/network/simulation/example_test.go b/network/simulation/example_test.go
index 9e2601219f..f8d126552f 100644
--- a/network/simulation/example_test.go
+++ b/network/simulation/example_test.go
@@ -48,7 +48,7 @@ func ExampleSimulation_WaitTillHealthy() {
// store kademlia in node's bucket under BucketKeyKademlia
// so that it can be found by WaitTillHealthy method.
b.Store(simulation.BucketKeyKademlia, kad)
- return network.NewBzz(config, kad, nil, nil, nil), nil, nil
+ return network.NewBzz(config, kad, nil, nil, nil, nil, nil), nil, nil
},
})
defer sim.Close()
diff --git a/network/simulation/kademlia_test.go b/network/simulation/kademlia_test.go
index c69832d29e..73f8e71a8d 100644
--- a/network/simulation/kademlia_test.go
+++ b/network/simulation/kademlia_test.go
@@ -140,7 +140,7 @@ func createSimServiceMap(discovery bool) map[string]ServiceFunc {
// store kademlia in node's bucket under BucketKeyKademlia
// so that it can be found by WaitTillHealthy method.
b.Store(BucketKeyKademlia, kad)
- return network.NewBzz(config, kad, nil, nil, nil), nil, nil
+ return network.NewBzz(config, kad, nil, nil, nil, nil, nil), nil, nil
},
}
}
diff --git a/network/simulation/node.go b/network/simulation/node.go
index 0a3774be50..2e2352c678 100644
--- a/network/simulation/node.go
+++ b/network/simulation/node.go
@@ -298,8 +298,19 @@ func (s *Simulation) StopNode(id enode.ID) (err error) {
}
// StopRandomNode stops a random node.
-func (s *Simulation) StopRandomNode() (id enode.ID, err error) {
- n := s.Net.GetRandomUpNode()
+func (s *Simulation) StopRandomNode(protect ...enode.ID) (id enode.ID, err error) {
+ found := false
+ var n *simulations.Node
+outer:
+ for !found {
+ n = s.Net.GetRandomUpNode()
+ for _, v := range protect {
+ if bytes.Equal(n.ID().Bytes(), v.Bytes()) {
+ continue outer
+ }
+ }
+ found = true
+ }
if n == nil {
return id, ErrNodeNotFound
}
diff --git a/network/simulation/node_test.go b/network/simulation/node_test.go
index 1435eae6cd..84699f1e62 100644
--- a/network/simulation/node_test.go
+++ b/network/simulation/node_test.go
@@ -290,7 +290,7 @@ func TestUploadSnapshot(t *testing.T) {
}
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
b.Store(BucketKeyKademlia, kad)
- return network.NewBzz(config, kad, nil, nil, nil), nil, nil
+ return network.NewBzz(config, kad, nil, nil, nil, nil, nil), nil, nil
},
})
defer s.Close()
@@ -299,7 +299,7 @@ func TestUploadSnapshot(t *testing.T) {
log.Debug("Uploading snapshot")
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
- err := s.UploadSnapshot(ctx, fmt.Sprintf("../stream/testing/snapshot_%d.json", nodeCount))
+ err := s.UploadSnapshot(ctx, fmt.Sprintf("../stream/testdata/snapshot_%d.json", nodeCount))
if err != nil {
t.Fatalf("Error uploading snapshot to simulation network: %v", err)
}
diff --git a/network/simulation/simulation.go b/network/simulation/simulation.go
index 18a866c825..9b75f87087 100644
--- a/network/simulation/simulation.go
+++ b/network/simulation/simulation.go
@@ -103,6 +103,7 @@ func NewInProc(services map[string]ServiceFunc) (s *Simulation) {
func NewBzzInProc(services map[string]ServiceFunc) (s *Simulation) {
services["bzz"] = func(ctx *adapters.ServiceContext, bucket *sync.Map) (node.Service, func(), error) {
addr := network.NewAddr(ctx.Config.Node())
+
hp := network.NewHiveParams()
hp.KeepAliveInterval = time.Duration(200) * time.Millisecond
hp.Discovery = false
@@ -121,7 +122,7 @@ func NewBzzInProc(services map[string]ServiceFunc) (s *Simulation) {
UnderlayAddr: addr.Under(),
HiveParams: hp,
}
- return network.NewBzz(config, kad, nil, nil, nil), nil, nil
+ return network.NewBzz(config, kad, nil, nil, nil, nil, nil), nil, nil
}
return NewInProc(services)
@@ -253,6 +254,19 @@ func (s *Simulation) Close() {
close(s.done)
sem := make(chan struct{}, maxParallelCleanups)
+ if s.httpSrv != nil {
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ err := s.httpSrv.Shutdown(ctx)
+ if err != nil {
+ log.Error("Error shutting down HTTP server!", "err", err)
+ }
+ close(s.runC)
+ }
+
+ s.shutdownWG.Wait()
+ s.Net.Shutdown()
+
s.mu.RLock()
cleanupFuncs := make([]func(), len(s.cleanupFuncs))
for i, f := range s.cleanupFuncs {
@@ -274,18 +288,6 @@ func (s *Simulation) Close() {
}
cleanupWG.Wait()
- if s.httpSrv != nil {
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
- defer cancel()
- err := s.httpSrv.Shutdown(ctx)
- if err != nil {
- log.Error("Error shutting down HTTP server!", "err", err)
- }
- close(s.runC)
- }
-
- s.shutdownWG.Wait()
- s.Net.Shutdown()
if s.baseDir != "" {
os.RemoveAll(s.baseDir)
}
diff --git a/network/simulations/discovery/discovery_test.go b/network/simulations/discovery/discovery_test.go
index b908abb570..1a8f90b163 100644
--- a/network/simulations/discovery/discovery_test.go
+++ b/network/simulations/discovery/discovery_test.go
@@ -520,8 +520,8 @@ func newService(ctx *adapters.ServiceContext) (node.Service, error) {
if err != nil {
return nil, err
}
- return network.NewBzz(config, kad, store, nil, nil), nil
+ return network.NewBzz(config, kad, store, nil, nil, nil, nil), nil
}
- return network.NewBzz(config, kad, nil, nil, nil), nil
+ return network.NewBzz(config, kad, nil, nil, nil, nil, nil), nil
}
diff --git a/network/simulations/overlay.go b/network/simulations/overlay.go
index a2cc32f1a5..cae543d6ad 100644
--- a/network/simulations/overlay.go
+++ b/network/simulations/overlay.go
@@ -85,7 +85,7 @@ func (s *Simulation) NewService(ctx *adapters.ServiceContext) (node.Service, err
HiveParams: hp,
}
- return network.NewBzz(config, kad, store, nil, nil), nil
+ return network.NewBzz(config, kad, store, nil, nil, nil, nil), nil
}
//create the simulation network
diff --git a/network/stream/common_test.go b/network/stream/common_test.go
deleted file mode 100644
index cd688b6809..0000000000
--- a/network/stream/common_test.go
+++ /dev/null
@@ -1,406 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "bytes"
- "context"
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "math/rand"
- "os"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/network/simulation"
- p2ptest "github.com/ethersphere/swarm/p2p/testing"
- "github.com/ethersphere/swarm/state"
- "github.com/ethersphere/swarm/storage"
- "github.com/ethersphere/swarm/storage/localstore"
- "github.com/ethersphere/swarm/storage/mock"
- "github.com/ethersphere/swarm/testutil"
-)
-
-var (
- nodes = flag.Int("nodes", 0, "number of nodes")
- chunks = flag.Int("chunks", 0, "number of chunks")
- useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)")
-
- bucketKeyStore = simulation.BucketKey("store")
- bucketKeyFileStore = simulation.BucketKey("filestore")
- bucketKeyDelivery = simulation.BucketKey("delivery")
- bucketKeyRegistry = simulation.BucketKey("registry")
-
- chunkSize = 4096
- pof = network.Pof
-)
-
-func init() {
- testutil.Init()
- rand.Seed(time.Now().UnixNano())
-}
-
-// newNetStoreAndDelivery is a default constructor for BzzAddr, NetStore and Delivery, used in Simulations
-func newNetStoreAndDelivery(ctx *adapters.ServiceContext, bucket *sync.Map) (*network.BzzAddr, *storage.NetStore, *Delivery, func(), error) {
- addr := network.NewAddr(ctx.Config.Node())
-
- netStore, delivery, cleanup, err := netStoreAndDeliveryWithAddr(ctx, bucket, addr)
- if err != nil {
- return nil, nil, nil, nil, err
- }
-
- netStore.RemoteGet = delivery.RequestFromPeers
-
- return addr, netStore, delivery, cleanup, nil
-}
-
-// newNetStoreAndDeliveryWithBzzAddr is a constructor for NetStore and Delivery, used in Simulations, accepting any BzzAddr
-func newNetStoreAndDeliveryWithBzzAddr(ctx *adapters.ServiceContext, bucket *sync.Map, addr *network.BzzAddr) (*storage.NetStore, *Delivery, func(), error) {
- netStore, delivery, cleanup, err := netStoreAndDeliveryWithAddr(ctx, bucket, addr)
- if err != nil {
- return nil, nil, nil, err
- }
-
- netStore.RemoteGet = delivery.RequestFromPeers
-
- return netStore, delivery, cleanup, nil
-}
-
-// newNetStoreAndDeliveryWithRequestFunc is a constructor for NetStore and Delivery, used in Simulations, accepting any NetStore.RequestFunc
-func newNetStoreAndDeliveryWithRequestFunc(ctx *adapters.ServiceContext, bucket *sync.Map, rf storage.RemoteGetFunc) (*network.BzzAddr, *storage.NetStore, *Delivery, func(), error) {
- addr := network.NewAddr(ctx.Config.Node())
-
- netStore, delivery, cleanup, err := netStoreAndDeliveryWithAddr(ctx, bucket, addr)
- if err != nil {
- return nil, nil, nil, nil, err
- }
-
- netStore.RemoteGet = rf
-
- return addr, netStore, delivery, cleanup, nil
-}
-
-func netStoreAndDeliveryWithAddr(ctx *adapters.ServiceContext, bucket *sync.Map, addr *network.BzzAddr) (*storage.NetStore, *Delivery, func(), error) {
- n := ctx.Config.Node()
-
- localStore, localStoreCleanup, err := newTestLocalStore(n.ID(), addr, nil)
- if err != nil {
- return nil, nil, nil, err
- }
-
- netStore := storage.NewNetStore(localStore, enode.ID{})
- lnetStore := storage.NewLNetStore(netStore)
- fileStore := storage.NewFileStore(lnetStore, storage.NewFileStoreParams(), chunk.NewTags())
-
- kad := network.NewKademlia(addr.Over(), network.NewKadParams())
- delivery := NewDelivery(kad, netStore)
-
- bucket.Store(bucketKeyStore, localStore)
- bucket.Store(bucketKeyDelivery, delivery)
- bucket.Store(bucketKeyFileStore, fileStore)
- // for the kademlia object, we use the global key from the simulation package,
- // as the simulation will try to access it in the WaitTillHealthy with that key
- bucket.Store(simulation.BucketKeyKademlia, kad)
-
- cleanup := func() {
- netStore.Close()
- localStoreCleanup()
- }
-
- return netStore, delivery, cleanup, nil
-}
-
-func newStreamerTester(registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *localstore.DB, func(), error) {
- // setup
- addr := network.RandomAddr() // tested peers peer address
- to := network.NewKademlia(addr.OAddr, network.NewKadParams())
-
- // temp datadir
- datadir, err := ioutil.TempDir("", "streamer")
- if err != nil {
- return nil, nil, nil, nil, err
- }
- removeDataDir := func() {
- os.RemoveAll(datadir)
- }
-
- localStore, err := localstore.New(datadir, addr.Over(), nil)
- if err != nil {
- removeDataDir()
- return nil, nil, nil, nil, err
- }
-
- netStore := storage.NewNetStore(localStore, enode.ID{})
-
- delivery := NewDelivery(to, netStore)
- netStore.RemoteGet = delivery.RequestFromPeers
-
- intervalsStore := state.NewInmemoryStore()
- streamer := NewRegistry(addr.ID(), delivery, netStore, intervalsStore, registryOptions, nil)
-
- prvkey, err := crypto.GenerateKey()
- if err != nil {
- removeDataDir()
- return nil, nil, nil, nil, err
- }
-
- protocolTester := p2ptest.NewProtocolTester(prvkey, 1, streamer.runProtocol)
- teardown := func() {
- protocolTester.Stop()
- streamer.Close()
- intervalsStore.Close()
- netStore.Close()
- removeDataDir()
- }
- err = waitForPeers(streamer, 10*time.Second, 1)
- if err != nil {
- teardown()
- return nil, nil, nil, nil, errors.New("timeout: peer is not created")
- }
-
- return protocolTester, streamer, localStore, teardown, nil
-}
-
-func waitForPeers(streamer *Registry, timeout time.Duration, expectedPeers int) error {
- ticker := time.NewTicker(10 * time.Millisecond)
- timeoutTimer := time.NewTimer(timeout)
- for {
- select {
- case <-ticker.C:
- if streamer.peersCount() >= expectedPeers {
- return nil
- }
- case <-timeoutTimer.C:
- return errors.New("timeout")
- }
- }
-}
-
-type roundRobinStore struct {
- index uint32
- stores []storage.ChunkStore
-}
-
-func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore {
- return &roundRobinStore{
- stores: stores,
- }
-}
-
-// not used in this context, only to fulfill ChunkStore interface
-func (rrs *roundRobinStore) Has(_ context.Context, _ storage.Address) (bool, error) {
- return false, errors.New("roundRobinStore doesn't support Has")
-}
-
-func (rrs *roundRobinStore) Get(_ context.Context, _ chunk.ModeGet, _ storage.Address) (storage.Chunk, error) {
- return nil, errors.New("roundRobinStore doesn't support Get")
-}
-
-func (rrs *roundRobinStore) GetMulti(_ context.Context, _ chunk.ModeGet, _ ...storage.Address) ([]storage.Chunk, error) {
- return nil, errors.New("roundRobinStore doesn't support GetMulti")
-}
-
-func (rrs *roundRobinStore) Put(ctx context.Context, mode chunk.ModePut, chs ...storage.Chunk) ([]bool, error) {
- i := atomic.AddUint32(&rrs.index, 1)
- idx := int(i) % len(rrs.stores)
- return rrs.stores[idx].Put(ctx, mode, chs...)
-}
-
-func (rrs *roundRobinStore) Set(ctx context.Context, mode chunk.ModeSet, addr chunk.Address) (err error) {
- return errors.New("roundRobinStore doesn't support Set")
-}
-
-func (rrs *roundRobinStore) LastPullSubscriptionBinID(bin uint8) (id uint64, err error) {
- return 0, errors.New("roundRobinStore doesn't support LastPullSubscriptionBinID")
-}
-
-func (rrs *roundRobinStore) SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan chunk.Descriptor, stop func()) {
- return nil, nil
-}
-
-func (rrs *roundRobinStore) Close() error {
- for _, store := range rrs.stores {
- store.Close()
- }
- return nil
-}
-
-func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
- r, _ := fileStore.Retrieve(context.TODO(), hash)
- buf := make([]byte, 1024)
- var n int
- var total int64
- var err error
- for (total == 0 || n > 0) && err == nil {
- n, err = r.ReadAt(buf, total)
- total += int64(n)
- }
- if err != nil && err != io.EOF {
- return total, err
- }
- return total, nil
-}
-
-func uploadFilesToNodes(sim *simulation.Simulation) ([]storage.Address, []string, error) {
- nodes := sim.UpNodeIDs()
- nodeCnt := len(nodes)
- log.Debug(fmt.Sprintf("Uploading %d files to nodes", nodeCnt))
- //array holding generated files
- rfiles := make([]string, nodeCnt)
- //array holding the root hashes of the files
- rootAddrs := make([]storage.Address, nodeCnt)
-
- var err error
- //for every node, generate a file and upload
- for i, id := range nodes {
- item, ok := sim.NodeItem(id, bucketKeyFileStore)
- if !ok {
- return nil, nil, fmt.Errorf("Error accessing localstore")
- }
- fileStore := item.(*storage.FileStore)
- //generate a file
- rfiles[i], err = generateRandomFile()
- if err != nil {
- return nil, nil, err
- }
- //store it (upload it) on the FileStore
- ctx := context.TODO()
- rk, wait, err := fileStore.Store(ctx, strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false)
- log.Debug("Uploaded random string file to node")
- if err != nil {
- return nil, nil, err
- }
- err = wait(ctx)
- if err != nil {
- return nil, nil, err
- }
- rootAddrs[i] = rk
- }
- return rootAddrs, rfiles, nil
-}
-
-//generate a random file (string)
-func generateRandomFile() (string, error) {
- //generate a random file size between minFileSize and maxFileSize
- fileSize := rand.Intn(maxFileSize-minFileSize) + minFileSize
- log.Debug(fmt.Sprintf("Generated file with filesize %d kB", fileSize))
- b := testutil.RandomBytes(1, fileSize*1024)
- return string(b), nil
-}
-
-func newTestLocalStore(id enode.ID, addr *network.BzzAddr, globalStore mock.GlobalStorer) (localStore *localstore.DB, cleanup func(), err error) {
- dir, err := ioutil.TempDir("", "swarm-stream-")
- if err != nil {
- return nil, nil, err
- }
- cleanup = func() {
- os.RemoveAll(dir)
- }
-
- var mockStore *mock.NodeStore
- if globalStore != nil {
- mockStore = globalStore.NewNodeStore(common.BytesToAddress(id.Bytes()))
- }
-
- localStore, err = localstore.New(dir, addr.Over(), &localstore.Options{
- MockStore: mockStore,
- })
- if err != nil {
- cleanup()
- return nil, nil, err
- }
- return localStore, cleanup, nil
-}
-
-// watchDisconnections receives simulation peer events in a new goroutine and sets atomic value
-// disconnected to true in case of a disconnect event.
-func watchDisconnections(ctx context.Context, sim *simulation.Simulation) (disconnected *boolean) {
- log.Debug("Watching for disconnections")
- disconnections := sim.PeerEvents(
- ctx,
- sim.NodeIDs(),
- simulation.NewPeerEventsFilter().Drop(),
- )
- disconnected = new(boolean)
- go func() {
- for {
- select {
- case <-ctx.Done():
- return
- case d := <-disconnections:
- if d.Error != nil {
- log.Error("peer drop event error", "node", d.NodeID, "peer", d.PeerID, "err", d.Error)
- } else {
- log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
- }
- disconnected.set(true)
- }
- }
- }()
- return disconnected
-}
-
-// boolean is used to concurrently set
-// and read a boolean value.
-type boolean struct {
- v bool
- mu sync.RWMutex
-}
-
-// set sets the value.
-func (b *boolean) set(v bool) {
- b.mu.Lock()
- defer b.mu.Unlock()
-
- b.v = v
-}
-
-// bool reads the value.
-func (b *boolean) bool() bool {
- b.mu.RLock()
- defer b.mu.RUnlock()
-
- return b.v
-}
-
-func getAllRefs(testData []byte) (storage.AddressCollection, error) {
- datadir, err := ioutil.TempDir("", "chunk-debug")
- if err != nil {
- return nil, fmt.Errorf("unable to create temp dir: %v", err)
- }
- defer os.RemoveAll(datadir)
- fileStore, cleanup, err := storage.NewLocalFileStore(datadir, make([]byte, 32), chunk.NewTags())
- if err != nil {
- return nil, err
- }
- defer cleanup()
-
- reader := bytes.NewReader(testData)
- return fileStore.GetAllReferences(context.Background(), reader)
-}
diff --git a/network/stream/delivery.go b/network/stream/delivery.go
deleted file mode 100644
index dd80e3824a..0000000000
--- a/network/stream/delivery.go
+++ /dev/null
@@ -1,346 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "context"
- "errors"
- "fmt"
- "time"
-
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/log"
- "github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/network/timeouts"
- "github.com/ethersphere/swarm/spancontext"
- "github.com/ethersphere/swarm/storage"
- "github.com/opentracing/opentracing-go"
- olog "github.com/opentracing/opentracing-go/log"
-)
-
-var (
- processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil)
- handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
- retrieveChunkFail = metrics.NewRegisteredCounter("network.stream.retrieve_chunks_fail.count", nil)
-
- lastReceivedChunksMsg = metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)
-)
-
-type Delivery struct {
- netStore *storage.NetStore
- kad *network.Kademlia
- getPeer func(enode.ID) *Peer
- quit chan struct{}
-}
-
-func NewDelivery(kad *network.Kademlia, netStore *storage.NetStore) *Delivery {
- return &Delivery{
- netStore: netStore,
- kad: kad,
- quit: make(chan struct{}),
- }
-}
-
-// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
-type RetrieveRequestMsg struct {
- Addr storage.Address
-}
-
-func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *RetrieveRequestMsg) error {
- log.Trace("handle retrieve request", "peer", sp.ID(), "hash", req.Addr)
- handleRetrieveRequestMsgCount.Inc(1)
-
- ctx, osp := spancontext.StartSpan(
- ctx,
- "handle.retrieve.request")
-
- osp.LogFields(olog.String("ref", req.Addr.String()))
-
- defer osp.Finish()
-
- ctx, cancel := context.WithTimeout(ctx, timeouts.FetcherGlobalTimeout)
- defer cancel()
-
- r := &storage.Request{
- Addr: req.Addr,
- Origin: sp.ID(),
- }
- chunk, err := d.netStore.Get(ctx, chunk.ModeGetRequest, r)
- if err != nil {
- retrieveChunkFail.Inc(1)
- log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "err", err)
- return nil
- }
-
- log.Trace("retrieve request, delivery", "ref", req.Addr, "peer", sp.ID())
- syncing := false
- err = sp.Deliver(ctx, chunk, 0, syncing)
- if err != nil {
- log.Error("sp.Deliver errored", "err", err)
- }
- osp.LogFields(olog.Bool("delivered", true))
-
- return nil
-}
-
-//Chunk delivery always uses the same message type....
-type ChunkDeliveryMsg struct {
- Addr storage.Address
- SData []byte // the stored chunk Data (incl size)
- peer *Peer // set in handleChunkDeliveryMsg
-}
-
-//...but swap accounting needs to disambiguate if it is a delivery for syncing or for retrieval
-//as it decides based on message type if it needs to account for this message or not
-
-//defines a chunk delivery for retrieval (with accounting)
-type ChunkDeliveryMsgRetrieval ChunkDeliveryMsg
-
-//defines a chunk delivery for syncing (without accounting)
-type ChunkDeliveryMsgSyncing ChunkDeliveryMsg
-
-// chunk delivery msg is response to retrieverequest msg
-func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req interface{}) error {
- var osp opentracing.Span
- ctx, osp = spancontext.StartSpan(
- ctx,
- "handle.chunk.delivery")
-
- processReceivedChunksCount.Inc(1)
-
- // record the last time we received a chunk delivery message
- lastReceivedChunksMsg.Update(time.Now().UnixNano())
-
- var msg *ChunkDeliveryMsg
- var mode chunk.ModePut
- switch r := req.(type) {
- case *ChunkDeliveryMsgRetrieval:
- // count how many chunks we receive for retrieve requests per peer
- peermetric := fmt.Sprintf("chunk.delivery.%x", sp.BzzAddr.Over()[:16])
- metrics.GetOrRegisterCounter(peermetric, nil).Inc(1)
-
- msg = (*ChunkDeliveryMsg)(r)
- peerPO := chunk.Proximity(sp.BzzAddr.Over(), msg.Addr)
- po := chunk.Proximity(d.kad.BaseAddr(), msg.Addr)
- depth := d.kad.NeighbourhoodDepth()
- // chunks within the area of responsibility should always sync
- // https://github.com/ethersphere/go-ethereum/pull/1282#discussion_r269406125
- if po >= depth || peerPO < po {
- mode = chunk.ModePutSync
- } else {
- // do not sync if peer that is sending us a chunk is closer to the chunk then we are
- mode = chunk.ModePutRequest
- }
- case *ChunkDeliveryMsgSyncing:
- msg = (*ChunkDeliveryMsg)(r)
- mode = chunk.ModePutSync
- case *ChunkDeliveryMsg:
- msg = r
- mode = chunk.ModePutSync
- }
-
- log.Trace("handle.chunk.delivery", "ref", msg.Addr, "from peer", sp.ID())
-
- go func() {
- defer osp.Finish()
-
- msg.peer = sp
- log.Trace("handle.chunk.delivery", "put", msg.Addr)
-
- _, err := d.netStore.Put(ctx, mode, storage.NewChunk(msg.Addr, msg.SData))
- if err != nil {
- if err == storage.ErrChunkInvalid {
- // we removed this log because it spams the logs
- // TODO: Enable this log line
- // log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", msg.Addr, )
- msg.peer.Drop()
- }
- }
- log.Trace("handle.chunk.delivery", "done put", msg.Addr, "err", err)
- }()
- return nil
-}
-
-func (d *Delivery) Close() {
- close(d.quit)
-}
-
-// getOriginPo returns the originPo if the incoming Request has an Origin
-// if our node is the first node that requests this chunk, then we don't have an Origin,
-// and return -1
-// this is used only for tracing, and can probably be refactor so that we don't have to
-// iterater over Kademlia
-func (d *Delivery) getOriginPo(req *storage.Request) int {
- originPo := -1
-
- d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int) bool {
- id := p.ID()
-
- // get po between chunk and origin
- if req.Origin.String() == id.String() {
- originPo = po
- return false
- }
-
- return true
- })
-
- return originPo
-}
-
-// FindPeer is returning the closest peer from Kademlia that a chunk
-// request hasn't already been sent to
-func (d *Delivery) FindPeer(ctx context.Context, req *storage.Request) (*Peer, error) {
- var sp *Peer
- var err error
-
- osp, _ := ctx.Value("remote.fetch").(opentracing.Span)
-
- // originPo - proximity of the node that made the request; -1 if the request originator is our node;
- // myPo - this node's proximity with the requested chunk
- // selectedPeerPo - kademlia suggested node's proximity with the requested chunk (computed further below)
- originPo := d.getOriginPo(req)
- myPo := chunk.Proximity(req.Addr, d.kad.BaseAddr())
- selectedPeerPo := -1
-
- depth := d.kad.NeighbourhoodDepth()
-
- if osp != nil {
- osp.LogFields(olog.Int("originPo", originPo))
- osp.LogFields(olog.Int("depth", depth))
- osp.LogFields(olog.Int("myPo", myPo))
- }
-
- // do not forward requests if origin proximity is bigger than our node's proximity
- // this means that origin is closer to the chunk
- if originPo > myPo {
- return nil, errors.New("not forwarding request, origin node is closer to chunk than this node")
- }
-
- d.kad.EachConn(req.Addr[:], 255, func(p *network.Peer, po int) bool {
- id := p.ID()
-
- // skip light nodes
- if p.LightNode {
- return true
- }
-
- // do not send request back to peer who asked us. maybe merge with SkipPeer at some point
- if req.Origin.String() == id.String() {
- return true
- }
-
- // skip peers that we have already tried
- if req.SkipPeer(id.String()) {
- log.Trace("findpeer skip peer", "peer", id, "ref", req.Addr.String())
- return true
- }
-
- if myPo < depth { // chunk is NOT within the neighbourhood
- if po <= myPo { // always choose a peer strictly closer to chunk than us
- log.Trace("findpeer1a", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
- return false
- } else {
- log.Trace("findpeer1b", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
- }
- } else { // chunk IS WITHIN neighbourhood
- if po < depth { // do not select peer outside the neighbourhood. But allows peers further from the chunk than us
- log.Trace("findpeer2a", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
- return false
- } else if po <= originPo { // avoid loop in neighbourhood, so not forward when a request comes from the neighbourhood
- log.Trace("findpeer2b", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
- return false
- } else {
- log.Trace("findpeer2c", "originpo", originPo, "mypo", myPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
- }
- }
-
- // if selected peer is not in the depth (2nd condition; if depth <= po, then peer is in nearest neighbourhood)
- // and they have a lower po than ours, return error
- if po < myPo && depth > po {
- log.Trace("findpeer4 skip peer because origin was closer", "originpo", originPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
-
- err = fmt.Errorf("not asking peers further away from origin; ref=%s originpo=%v po=%v depth=%v myPo=%v", req.Addr.String(), originPo, po, depth, myPo)
- return false
- }
-
- // if chunk falls in our nearest neighbourhood (1st condition), but suggested peer is not in
- // the nearest neighbourhood (2nd condition), don't forward the request to suggested peer
- if depth <= myPo && depth > po {
- log.Trace("findpeer5 skip peer because depth", "originpo", originPo, "po", po, "depth", depth, "peer", id, "ref", req.Addr.String())
-
- err = fmt.Errorf("not going outside of depth; ref=%s originpo=%v po=%v depth=%v myPo=%v", req.Addr.String(), originPo, po, depth, myPo)
- return false
- }
-
- sp = d.getPeer(id)
-
- // sp could be nil, if we encountered a peer that is not registered for delivery, i.e. doesn't support the `stream` protocol
- // if sp is not nil, then we have selected the next peer and we stop iterating
- // if sp is nil, we continue iterating
- if sp != nil {
- selectedPeerPo = po
-
- return false
- }
-
- // continue iterating
- return true
- })
-
- if osp != nil {
- osp.LogFields(olog.Int("selectedPeerPo", selectedPeerPo))
- }
-
- if err != nil {
- return nil, err
- }
-
- if sp == nil {
- return nil, errors.New("no peer found")
- }
-
- return sp, nil
-}
-
-// RequestFromPeers sends a chunk retrieve request to the next found peer
-func (d *Delivery) RequestFromPeers(ctx context.Context, req *storage.Request, localID enode.ID) (*enode.ID, error) {
- metrics.GetOrRegisterCounter("delivery.requestfrompeers", nil).Inc(1)
-
- sp, err := d.FindPeer(ctx, req)
- if err != nil {
- log.Trace(err.Error())
- return nil, err
- }
-
- // setting this value in the context creates a new span that can persist across the sendpriority queue and the network roundtrip
- // this span will finish only when delivery is handled (or times out)
- r := &RetrieveRequestMsg{
- Addr: req.Addr,
- }
- log.Trace("sending retrieve request", "ref", r.Addr, "peer", sp.ID().String(), "origin", localID)
- err = sp.Send(ctx, r)
- if err != nil {
- log.Error(err.Error())
- return nil, err
- }
-
- spID := sp.ID()
- return &spID, nil
-}
diff --git a/network/stream/delivery_test.go b/network/stream/delivery_test.go
deleted file mode 100644
index a1a658b3ad..0000000000
--- a/network/stream/delivery_test.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "bytes"
- "context"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/network"
- pq "github.com/ethersphere/swarm/network/priorityqueue"
- "github.com/ethersphere/swarm/p2p/protocols"
- p2ptest "github.com/ethersphere/swarm/p2p/testing"
- "github.com/ethersphere/swarm/storage"
-)
-
-//Test requesting a chunk from a peer then issuing a "empty" OfferedHashesMsg (no hashes available yet)
-//Should time out as the peer does not have the chunk (no syncing happened previously)
-func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
- tester, _, _, teardown, err := newStreamerTester(&RegistryOptions{
- Syncing: SyncingDisabled, //do no syncing
- })
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- node := tester.Nodes[0]
-
- chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
-
- //test the exchange
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "RetrieveRequestMsg",
- Triggers: []p2ptest.Trigger{
- { //then the actual RETRIEVE_REQUEST....
- Code: 5,
- Msg: &RetrieveRequestMsg{
- Addr: chunk.Address()[:],
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- { //to which the peer responds with offered hashes
- Code: 1,
- Msg: &OfferedHashesMsg{
- Hashes: nil,
- From: 0,
- To: 0,
- },
- Peer: node.ID(),
- },
- },
- })
-
- //should fail with a timeout as the peer we are requesting
- //the chunk from does not have the chunk
- expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
- if err == nil || err.Error() != expectedError {
- t.Fatalf("Expected error %v, got %v", expectedError, err)
- }
-}
-
-// upstream request server receives a retrieve Request and responds with
-// offered hashes or delivery if skipHash is set to true
-func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
- tester, _, localStore, teardown, err := newStreamerTester(&RegistryOptions{
- Syncing: SyncingDisabled,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- node := tester.Nodes[0]
-
- hash := storage.Address(hash1[:])
- ch := storage.NewChunk(hash, hash1[:])
- _, err = localStore.Put(context.TODO(), chunk.ModePutUpload, ch)
- if err != nil {
- t.Fatalf("Expected no err got %v", err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "RetrieveRequestMsg",
- Triggers: []p2ptest.Trigger{
- {
- Code: 5,
- Msg: &RetrieveRequestMsg{
- Addr: hash,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 6,
- Msg: &ChunkDeliveryMsg{
- Addr: ch.Address(),
- SData: ch.Data(),
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// if there is one peer in the Kademlia, RequestFromPeers should return it
-func TestRequestFromPeers(t *testing.T) {
- dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
-
- addr := network.RandomAddr()
- to := network.NewKademlia(addr.OAddr, network.NewKadParams())
- delivery := NewDelivery(to, nil)
- protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
- peer := network.NewPeer(&network.BzzPeer{
- BzzAddr: network.RandomAddr(),
- LightNode: false,
- Peer: protocolsPeer,
- }, to)
- to.On(peer)
- r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
-
- // an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
- sp := &Peer{
- BzzPeer: &network.BzzPeer{Peer: protocolsPeer, BzzAddr: addr},
- pq: pq.New(int(PriorityQueue), PriorityQueueCap),
- streamer: r,
- }
- r.setPeer(sp)
- req := storage.NewRequest(storage.Address(hash0[:]))
- id, err := delivery.FindPeer(context.TODO(), req)
- if err != nil {
- t.Fatal(err)
- }
- if id.ID() != dummyPeerID {
- t.Fatalf("Expected an id, got %v", id)
- }
-}
-
-// RequestFromPeers should not return light nodes
-func TestRequestFromPeersWithLightNode(t *testing.T) {
- dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
-
- addr := network.RandomAddr()
- to := network.NewKademlia(addr.OAddr, network.NewKadParams())
- delivery := NewDelivery(to, nil)
-
- protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
- // setting up a lightnode
- peer := network.NewPeer(&network.BzzPeer{
- BzzAddr: network.RandomAddr(),
- LightNode: true,
- Peer: protocolsPeer,
- }, to)
- to.On(peer)
- r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
- // an empty priorityQueue has to be created to prevent a goroutine being called after the test has finished
- sp := &Peer{
- BzzPeer: &network.BzzPeer{Peer: protocolsPeer, BzzAddr: addr},
- pq: pq.New(int(PriorityQueue), PriorityQueueCap),
- streamer: r,
- }
- r.setPeer(sp)
-
- req := storage.NewRequest(storage.Address(hash0[:]))
-
- // making a request which should return with "no peer found"
- _, err := delivery.FindPeer(context.TODO(), req)
-
- expectedError := "no peer found"
- if err.Error() != expectedError {
- t.Fatalf("expected '%v', got %v", expectedError, err)
- }
-}
-
-func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
- tester, streamer, localStore, teardown, err := newStreamerTester(&RegistryOptions{
- Syncing: SyncingDisabled,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
- return &testClient{
- t: t,
- }, nil
- })
-
- node := tester.Nodes[0]
-
- //subscribe to custom stream
- stream := NewStream("foo", "", true)
- err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- chunkKey := hash0[:]
- chunkData := hash1[:]
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Expects: []p2ptest.Expect{
- { //first expect subscription to the custom stream...
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- },
- p2ptest.Exchange{
- Label: "ChunkDelivery message",
- Triggers: []p2ptest.Trigger{
- { //...then trigger a chunk delivery for the given chunk from peer in order for
- //local node to get the chunk delivered
- Code: 6,
- Msg: &ChunkDeliveryMsg{
- Addr: chunkKey,
- SData: chunkData,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
- defer cancel()
-
- // wait for the chunk to get stored
- storedChunk, err := localStore.Get(ctx, chunk.ModeGetRequest, chunkKey)
- for err != nil {
- select {
- case <-ctx.Done():
- t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
- default:
- }
- storedChunk, err = localStore.Get(ctx, chunk.ModeGetRequest, chunkKey)
- time.Sleep(50 * time.Millisecond)
- }
-
- if !bytes.Equal(storedChunk.Data(), chunkData) {
- t.Fatal("Retrieved chunk has different data than original")
- }
-
-}
diff --git a/network/stream/intervals/intervals.go b/network/stream/intervals/intervals.go
index 562c3df9ae..b87ccbd080 100644
--- a/network/stream/intervals/intervals.go
+++ b/network/stream/intervals/intervals.go
@@ -36,7 +36,7 @@ type Intervals struct {
// New creates a new instance of Intervals.
// Start argument limits the lower bound of intervals.
-// No range bellow start bound will be added by Add method or
+// No range below start bound will be added by Add method or
// returned by Next method. This limit may be used for
// tracking "live" synchronization, where the sync session
// starts from a specific value, and if "live" sync intervals
@@ -115,26 +115,44 @@ func (i *Intervals) Merge(m *Intervals) {
// Next returns the first range interval that is not fulfilled. Returned
// start and end values are both inclusive, meaning that the whole range
-// including start and end need to be added in order to full the gap
+// including start and end need to be added in order to fill the gap
// in intervals.
// Returned value for end is 0 if the next interval is after the whole
// range that is stored in Intervals. Zero end value represents no limit
// on the next interval length.
-func (i *Intervals) Next() (start, end uint64) {
+// Argument ceiling is the upper bound for the returned range.
+// Returned empty boolean indicates if both start and end values have
+// reached the ceiling value which means that the returned range is empty,
+// not containing a single element.
+func (i *Intervals) Next(ceiling uint64) (start, end uint64, empty bool) {
i.mu.RLock()
- defer i.mu.RUnlock()
+ defer func() {
+ if ceiling > 0 {
+ var ceilingHitStart, ceilingHitEnd bool
+ if start > ceiling {
+ start = ceiling
+ ceilingHitStart = true
+ }
+ if end == 0 || end > ceiling {
+ end = ceiling
+ ceilingHitEnd = true
+ }
+ empty = ceilingHitStart && ceilingHitEnd
+ }
+ i.mu.RUnlock()
+ }()
l := len(i.ranges)
if l == 0 {
- return i.start, 0
+ return i.start, 0, false
}
if i.ranges[0][0] != i.start {
- return i.start, i.ranges[0][0] - 1
+ return i.start, i.ranges[0][0] - 1, false
}
if l == 1 {
- return i.ranges[0][1] + 1, 0
+ return i.ranges[0][1] + 1, 0, false
}
- return i.ranges[0][1] + 1, i.ranges[1][0] - 1
+ return i.ranges[0][1] + 1, i.ranges[1][0] - 1, false
}
// Last returns the value that is at the end of the last interval.
diff --git a/network/stream/intervals/intervals_test.go b/network/stream/intervals/intervals_test.go
index b5212f0d91..70ef4fd77a 100644
--- a/network/stream/intervals/intervals_test.go
+++ b/network/stream/intervals/intervals_test.go
@@ -22,14 +22,16 @@ import "testing"
// initial state.
func Test(t *testing.T) {
for i, tc := range []struct {
- startLimit uint64
- initial [][2]uint64
- start uint64
- end uint64
- expected string
- nextStart uint64
- nextEnd uint64
- last uint64
+ startLimit uint64
+ initial [][2]uint64
+ start uint64
+ end uint64
+ expected string
+ nextStart uint64
+ nextEnd uint64
+ nextEmptyRange bool
+ last uint64
+ ceiling uint64
}{
{
initial: nil,
@@ -316,6 +318,79 @@ func Test(t *testing.T) {
nextEnd: 119,
last: 130,
},
+ {
+ initial: nil,
+ start: 0,
+ end: 0,
+ expected: "[[0 0]]",
+ nextStart: 1,
+ nextEnd: 10,
+ last: 0,
+ ceiling: 10,
+ },
+ {
+ initial: nil,
+ start: 0,
+ end: 9,
+ expected: "[[0 9]]",
+ nextStart: 9,
+ nextEnd: 9,
+ nextEmptyRange: true,
+ last: 9,
+ ceiling: 9,
+ },
+ {
+ initial: nil,
+ start: 0,
+ end: 9,
+ expected: "[[0 9]]",
+ nextStart: 10,
+ nextEnd: 10,
+ nextEmptyRange: false,
+ last: 9,
+ ceiling: 10,
+ },
+ {
+ initial: nil,
+ start: 0,
+ end: 10,
+ expected: "[[0 10]]",
+ nextStart: 11,
+ nextEnd: 15,
+ last: 10,
+ ceiling: 15,
+ },
+ {
+ initial: [][2]uint64{{0, 0}},
+ start: 5,
+ end: 15,
+ expected: "[[0 0] [5 15]]",
+ nextStart: 1,
+ nextEnd: 3,
+ last: 15,
+ ceiling: 3,
+ },
+ {
+ initial: [][2]uint64{{0, 0}},
+ start: 5,
+ end: 15,
+ expected: "[[0 0] [5 15]]",
+ nextStart: 1,
+ nextEnd: 4,
+ last: 15,
+ ceiling: 20,
+ },
+ {
+ startLimit: 100,
+ initial: nil,
+ start: 120,
+ end: 130,
+ expected: "[[120 130]]",
+ nextStart: 100,
+ nextEnd: 110,
+ last: 130,
+ ceiling: 110,
+ },
} {
intervals := NewIntervals(tc.startLimit)
intervals.ranges = tc.initial
@@ -324,13 +399,16 @@ func Test(t *testing.T) {
if got != tc.expected {
t.Errorf("interval #%d: expected %s, got %s", i, tc.expected, got)
}
- nextStart, nextEnd := intervals.Next()
+ nextStart, nextEnd, nextEmptyRange := intervals.Next(tc.ceiling)
if nextStart != tc.nextStart {
t.Errorf("interval #%d, expected next start %d, got %d", i, tc.nextStart, nextStart)
}
if nextEnd != tc.nextEnd {
t.Errorf("interval #%d, expected next end %d, got %d", i, tc.nextEnd, nextEnd)
}
+ if nextEmptyRange != tc.nextEmptyRange {
+ t.Errorf("interval #%d, expected empty range %v, got %v", i, tc.nextEmptyRange, nextEmptyRange)
+ }
last := intervals.Last()
if last != tc.last {
t.Errorf("interval #%d, expected last %d, got %d", i, tc.last, last)
diff --git a/network/stream/intervals_test.go b/network/stream/intervals_test.go
deleted file mode 100644
index 96a1efd4eb..0000000000
--- a/network/stream/intervals_test.go
+++ /dev/null
@@ -1,362 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "context"
- "encoding/binary"
- "errors"
- "fmt"
- "sync"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
- "github.com/ethersphere/swarm/network/simulation"
- "github.com/ethersphere/swarm/network/timeouts"
- "github.com/ethersphere/swarm/state"
- "github.com/ethersphere/swarm/storage"
- "github.com/ethersphere/swarm/testutil"
-)
-
-func TestIntervalsLive(t *testing.T) {
- testIntervals(t, true, nil, false)
- testIntervals(t, true, nil, true)
-}
-
-func TestIntervalsHistory(t *testing.T) {
- testIntervals(t, false, NewRange(9, 26), false)
- testIntervals(t, false, NewRange(9, 26), true)
-}
-
-func TestIntervalsLiveAndHistory(t *testing.T) {
- testIntervals(t, true, NewRange(9, 26), false)
- testIntervals(t, true, NewRange(9, 26), true)
-}
-
-func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
-
- nodes := 2
- chunkCount := dataChunkCount
- externalStreamName := "externalStream"
- externalStreamSessionAt := uint64(50)
- externalStreamMaxKeys := uint64(100)
-
- sim := simulation.NewInProc(map[string]simulation.ServiceFunc{
- "intervalsStreamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (node.Service, func(), error) {
- addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
- if err != nil {
- return nil, nil, err
- }
-
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Syncing: SyncingRegisterOnly,
- SkipCheck: skipCheck,
- }, nil)
- bucket.Store(bucketKeyRegistry, r)
-
- r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
- return newTestExternalClient(netStore), nil
- })
- r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
- return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys), nil
- })
-
- cleanup := func() {
- r.Close()
- clean()
- }
-
- return r, cleanup, nil
- },
- })
- defer sim.Close()
-
- log.Info("Adding nodes to simulation")
- _, err := sim.AddNodesAndConnectChain(nodes)
- if err != nil {
- t.Fatal(err)
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
- defer cancel()
-
- if _, err := sim.WaitTillHealthy(ctx); err != nil {
- t.Fatal(err)
- }
-
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
- nodeIDs := sim.UpNodeIDs()
- storer := nodeIDs[0]
- checker := nodeIDs[1]
-
- item, ok := sim.NodeItem(storer, bucketKeyFileStore)
- if !ok {
- return fmt.Errorf("No filestore")
- }
- fileStore := item.(*storage.FileStore)
-
- size := chunkCount * chunkSize
-
- _, wait, err := fileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
- if err != nil {
- return fmt.Errorf("store: %v", err)
- }
- err = wait(ctx)
- if err != nil {
- return fmt.Errorf("wait store: %v", err)
- }
-
- item, ok = sim.NodeItem(checker, bucketKeyRegistry)
- if !ok {
- return fmt.Errorf("No registry")
- }
- registry := item.(*Registry)
-
- liveErrC := make(chan error)
- historyErrC := make(chan error)
-
- err = registry.Subscribe(storer, NewStream(externalStreamName, "", live), history, Top)
- if err != nil {
- return err
- }
-
- disconnected := watchDisconnections(ctx, sim)
- defer func() {
- if err != nil && disconnected.bool() {
- err = errors.New("disconnect events received")
- }
- }()
-
- go func() {
- if !live {
- close(liveErrC)
- return
- }
-
- var err error
- defer func() {
- liveErrC <- err
- }()
-
- // live stream
- var liveHashesChan chan []byte
- liveHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", true))
- if err != nil {
- log.Error("get hashes", "err", err)
- return
- }
- i := externalStreamSessionAt
-
- // we have subscribed, enable notifications
- err = enableNotifications(registry, storer, NewStream(externalStreamName, "", true))
- if err != nil {
- return
- }
-
- for {
- select {
- case hash := <-liveHashesChan:
- h := binary.BigEndian.Uint64(hash)
- if h != i {
- err = fmt.Errorf("expected live hash %d, got %d", i, h)
- return
- }
- i++
- if i > externalStreamMaxKeys {
- return
- }
- case <-ctx.Done():
- return
- }
- }
- }()
-
- go func() {
- if live && history == nil {
- close(historyErrC)
- return
- }
-
- var err error
- defer func() {
- historyErrC <- err
- }()
-
- // history stream
- var historyHashesChan chan []byte
- historyHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", false))
- if err != nil {
- log.Error("get hashes", "err", err)
- return
- }
-
- var i uint64
- historyTo := externalStreamMaxKeys
- if history != nil {
- i = history.From
- if history.To != 0 {
- historyTo = history.To
- }
- }
-
- // we have subscribed, enable notifications
- err = enableNotifications(registry, storer, NewStream(externalStreamName, "", false))
- if err != nil {
- return
- }
-
- for {
- select {
- case hash := <-historyHashesChan:
- h := binary.BigEndian.Uint64(hash)
- if h != i {
- err = fmt.Errorf("expected history hash %d, got %d", i, h)
- return
- }
- i++
- if i > historyTo {
- return
- }
- case <-ctx.Done():
- return
- }
- }
- }()
-
- if err := <-liveErrC; err != nil {
- return err
- }
- if err := <-historyErrC; err != nil {
- return err
- }
-
- return nil
- })
-
- if result.Error != nil {
- t.Fatal(result.Error)
- }
-}
-
-func getHashes(ctx context.Context, r *Registry, peerID enode.ID, s Stream) (chan []byte, error) {
- peer := r.getPeer(peerID)
-
- client, err := peer.getClient(ctx, s)
- if err != nil {
- return nil, err
- }
-
- c := client.Client.(*testExternalClient)
-
- return c.hashes, nil
-}
-
-func enableNotifications(r *Registry, peerID enode.ID, s Stream) error {
- peer := r.getPeer(peerID)
-
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- client, err := peer.getClient(ctx, s)
- if err != nil {
- return err
- }
-
- close(client.Client.(*testExternalClient).enableNotificationsC)
-
- return nil
-}
-
-type testExternalClient struct {
- hashes chan []byte
- netStore *storage.NetStore
- enableNotificationsC chan struct{}
-}
-
-func newTestExternalClient(netStore *storage.NetStore) *testExternalClient {
- return &testExternalClient{
- hashes: make(chan []byte),
- netStore: netStore,
- enableNotificationsC: make(chan struct{}),
- }
-}
-
-func (c *testExternalClient) NeedData(ctx context.Context, key []byte) (bool, func(context.Context) error) {
- fi, loaded, ok := c.netStore.GetOrCreateFetcher(ctx, key, "syncer")
- if !ok {
- return loaded, nil
- }
-
- select {
- case c.hashes <- key:
- case <-ctx.Done():
- log.Warn("testExternalClient NeedData context", "err", ctx.Err())
- return false, func(_ context.Context) error {
- return ctx.Err()
- }
- }
-
- return loaded, func(ctx context.Context) error {
- select {
- case <-fi.Delivered:
- case <-time.After(timeouts.SyncerClientWaitTimeout):
- return fmt.Errorf("chunk not delivered through syncing after %dsec. ref=%s", timeouts.SyncerClientWaitTimeout, fmt.Sprintf("%x", key))
- }
- return nil
- }
-}
-
-func (c *testExternalClient) Close() {}
-
-type testExternalServer struct {
- t string
- sessionAt uint64
- maxKeys uint64
-}
-
-func newTestExternalServer(t string, sessionAt, maxKeys uint64) *testExternalServer {
- return &testExternalServer{
- t: t,
- sessionAt: sessionAt,
- maxKeys: maxKeys,
- }
-}
-
-func (s *testExternalServer) SessionIndex() (uint64, error) {
- return s.sessionAt, nil
-}
-
-func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, error) {
- if to > s.maxKeys {
- to = s.maxKeys
- }
- b := make([]byte, HashSize*(to-from+1))
- for i := from; i <= to; i++ {
- binary.BigEndian.PutUint64(b[(i-from)*HashSize:(i-from+1)*HashSize], i)
- }
- return b, from, to, nil
-}
-
-func (s *testExternalServer) GetData(context.Context, []byte) ([]byte, error) {
- return make([]byte, 4096), nil
-}
-
-func (s *testExternalServer) Close() {}
diff --git a/network/stream/lightnode_test.go b/network/stream/lightnode_test.go
deleted file mode 100644
index 0687e81864..0000000000
--- a/network/stream/lightnode_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-package stream
-
-import (
- "testing"
-
- p2ptest "github.com/ethersphere/swarm/p2p/testing"
-)
-
-// This test checks the default behavior of the server, that is
-// when syncing is enabled.
-func TestLigthnodeRequestSubscriptionWithSync(t *testing.T) {
- registryOptions := &RegistryOptions{
- Syncing: SyncingRegisterOnly,
- }
- tester, _, _, teardown, err := newStreamerTester(registryOptions)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- node := tester.Nodes[0]
-
- syncStream := NewStream("SYNC", FormatSyncBinKey(1), false)
-
- err = tester.TestExchanges(
- p2ptest.Exchange{
- Label: "RequestSubscription",
- Triggers: []p2ptest.Trigger{
- {
- Code: 8,
- Msg: &RequestSubscriptionMsg{
- Stream: syncStream,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: syncStream,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatalf("Got %v", err)
- }
-}
-
-// This test checks the Lightnode behavior of the server, that is
-// when syncing is disabled.
-func TestLigthnodeRequestSubscriptionWithoutSync(t *testing.T) {
- registryOptions := &RegistryOptions{
- Syncing: SyncingDisabled,
- }
- tester, _, _, teardown, err := newStreamerTester(registryOptions)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- node := tester.Nodes[0]
-
- syncStream := NewStream("SYNC", FormatSyncBinKey(1), false)
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "RequestSubscription",
- Triggers: []p2ptest.Trigger{
- {
- Code: 8,
- Msg: &RequestSubscriptionMsg{
- Stream: syncStream,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 7,
- Msg: &SubscribeErrorMsg{
- Error: "stream SYNC not registered",
- },
- Peer: node.ID(),
- },
- },
- }, p2ptest.Exchange{
- Label: "RequestSubscription",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: syncStream,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 7,
- Msg: &SubscribeErrorMsg{
- Error: "stream SYNC not registered",
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatalf("Got %v", err)
- }
-}
diff --git a/network/stream/messages.go b/network/stream/messages.go
deleted file mode 100644
index 261782c2ff..0000000000
--- a/network/stream/messages.go
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethersphere/swarm/log"
- bv "github.com/ethersphere/swarm/network/bitvector"
- "github.com/ethersphere/swarm/storage"
-)
-
-var syncBatchTimeout = 30 * time.Second
-
-// Stream defines a unique stream identifier.
-type Stream struct {
- // Name is used for Client and Server functions identification.
- Name string
- // Key is the name of specific stream data.
- Key string
- // Live defines whether the stream delivers only new data
- // for the specific stream.
- Live bool
-}
-
-func NewStream(name string, key string, live bool) Stream {
- return Stream{
- Name: name,
- Key: key,
- Live: live,
- }
-}
-
-// String return a stream id based on all Stream fields.
-func (s Stream) String() string {
- t := "h"
- if s.Live {
- t = "l"
- }
- return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t)
-}
-
-// SubcribeMsg is the protocol msg for requesting a stream(section)
-type SubscribeMsg struct {
- Stream Stream
- History *Range `rlp:"nil"`
- Priority uint8 // delivered on priority channel
-}
-
-// RequestSubscriptionMsg is the protocol msg for a node to request subscription to a
-// specific stream
-type RequestSubscriptionMsg struct {
- Stream Stream
- History *Range `rlp:"nil"`
- Priority uint8 // delivered on priority channel
-}
-
-func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) {
- log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr, p.ID(), req.Stream))
- if err = p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority); err != nil {
- // The error will be sent as a subscribe error message
- // and will not be returned as it will prevent any new message
- // exchange between peers over p2p. Instead, error will be returned
- // only if there is one from sending subscribe error message.
- err = p.Send(ctx, &SubscribeErrorMsg{
- Error: err.Error(),
- })
- }
- return err
-}
-
-func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) {
- metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1)
-
- defer func() {
- if err != nil {
- // The error will be sent as a subscribe error message
- // and will not be returned as it will prevent any new message
- // exchange between peers over p2p. Instead, error will be returned
- // only if there is one from sending subscribe error message.
- err = p.Send(context.TODO(), &SubscribeErrorMsg{
- Error: err.Error(),
- })
- }
- }()
-
- log.Debug("received subscription", "from", p.streamer.addr, "peer", p.ID(), "stream", req.Stream, "history", req.History)
-
- f, err := p.streamer.GetServerFunc(req.Stream.Name)
- if err != nil {
- return err
- }
-
- s, err := f(p, req.Stream.Key, req.Stream.Live)
- if err != nil {
- return err
- }
- os, err := p.setServer(req.Stream, s, req.Priority)
- if err != nil {
- return err
- }
-
- var from uint64
- var to uint64
- if !req.Stream.Live && req.History != nil {
- from = req.History.From
- to = req.History.To
- }
-
- go func() {
- if err := p.SendOfferedHashes(os, from, to); err != nil {
- log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
- }
- }()
-
- if req.Stream.Live && req.History != nil {
- // subscribe to the history stream
- s, err := f(p, req.Stream.Key, false)
- if err != nil {
- return err
- }
-
- os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority))
- if err != nil {
- return err
- }
- go func() {
- if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
- log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
- }
- }()
- }
-
- return nil
-}
-
-type SubscribeErrorMsg struct {
- Error string
-}
-
-func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) {
- //TODO the error should be channeled to whoever calls the subscribe
- return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error)
-}
-
-type UnsubscribeMsg struct {
- Stream Stream
-}
-
-func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error {
- return p.removeServer(req.Stream)
-}
-
-type QuitMsg struct {
- Stream Stream
-}
-
-func (p *Peer) handleQuitMsg(req *QuitMsg) error {
- err := p.removeClient(req.Stream)
- if _, ok := err.(*notFoundError); ok {
- return nil
- }
- return err
-}
-
-// OfferedHashesMsg is the protocol msg for offering to hand over a
-// stream section
-type OfferedHashesMsg struct {
- Stream Stream // name of Stream
- From, To uint64 // peer and db-specific entry count
- Hashes []byte // stream of hashes (128)
-}
-
-// String pretty prints OfferedHashesMsg
-func (m OfferedHashesMsg) String() string {
- return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize)
-}
-
-// handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface
-// Filter method
-func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error {
- metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
-
- c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
- if err != nil {
- return err
- }
-
- hashes := req.Hashes
- lenHashes := len(hashes)
- if lenHashes%HashSize != 0 {
- return fmt.Errorf("error invalid hashes length (len: %v)", lenHashes)
- }
-
- want, err := bv.New(lenHashes / HashSize)
- if err != nil {
- return fmt.Errorf("error initiaising bitvector of length %v: %v", lenHashes/HashSize, err)
- }
-
- var wantDelaySet bool
- var wantDelay time.Time
-
- ctr := 0
- errC := make(chan error)
- ctx, cancel := context.WithTimeout(ctx, syncBatchTimeout)
-
- ctx = context.WithValue(ctx, "source", p.ID().String())
- for i := 0; i < lenHashes; i += HashSize {
- hash := hashes[i : i+HashSize]
-
- log.Trace("checking offered hash", "ref", fmt.Sprintf("%x", hash))
-
- if _, wait := c.NeedData(ctx, hash); wait != nil {
- ctr++
-
- // set the bit, so create a request
- want.Set(i / HashSize)
- log.Trace("need data", "ref", fmt.Sprintf("%x", hash), "request", true)
-
- // measure how long it takes before we mark chunks for retrieval, and actually send the request
- if !wantDelaySet {
- wantDelaySet = true
- wantDelay = time.Now()
- }
-
- // create request and wait until the chunk data arrives and is stored
- go func(w func(context.Context) error) {
- select {
- case errC <- w(ctx):
- case <-ctx.Done():
- }
- }(wait)
- }
- }
-
- go func() {
- defer cancel()
- for i := 0; i < ctr; i++ {
- select {
- case err := <-errC:
- if err != nil {
- log.Debug("client.handleOfferedHashesMsg() error waiting for chunk, dropping peer", "peer", p.ID(), "err", err)
- p.Drop()
- return
- }
- case <-ctx.Done():
- log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
- return
- case <-c.quit:
- log.Debug("client.handleOfferedHashesMsg() quit")
- return
- }
- }
- select {
- case c.next <- c.AddInterval(req.From, req.To):
- case <-c.quit:
- log.Debug("client.handleOfferedHashesMsg() quit")
- case <-ctx.Done():
- log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
- }
- }()
- // only send wantedKeysMsg if all missing chunks of the previous batch arrived
- // except
- if c.stream.Live {
- c.sessionAt = req.From
- }
- from, to := c.nextBatch(req.To + 1)
- log.Trace("set next batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "addr", p.streamer.addr)
- if from == to {
- return nil
- }
-
- msg := &WantedHashesMsg{
- Stream: req.Stream,
- Want: want.Bytes(),
- From: from,
- To: to,
- }
-
- log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
- select {
- case err := <-c.next:
- if err != nil {
- log.Warn("c.next error dropping peer", "err", err)
- p.Drop()
- return err
- }
- case <-c.quit:
- log.Debug("client.handleOfferedHashesMsg() quit")
- return nil
- case <-ctx.Done():
- log.Debug("client.handleOfferedHashesMsg() context done", "ctx.Err()", ctx.Err())
- return nil
- }
- log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
-
- // record want delay
- if wantDelaySet {
- metrics.GetOrRegisterResettingTimer("handleoffered.wantdelay", nil).UpdateSince(wantDelay)
- }
-
- err = p.SendPriority(ctx, msg, c.priority)
- if err != nil {
- log.Warn("SendPriority error", "err", err)
- }
-
- return nil
-}
-
-// WantedHashesMsg is the protocol msg data for signaling which hashes
-// offered in OfferedHashesMsg downstream peer actually wants sent over
-type WantedHashesMsg struct {
- Stream Stream
- Want []byte // bitvector indicating which keys of the batch needed
- From, To uint64 // next interval offset - empty if not to be continued
-}
-
-// String pretty prints WantedHashesMsg
-func (m WantedHashesMsg) String() string {
- return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To)
-}
-
-// handleWantedHashesMsg protocol msg handler
-// * sends the next batch of unsynced keys
-// * sends the actual data chunks as per WantedHashesMsg
-func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) error {
- metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1)
-
- log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
- s, err := p.getServer(req.Stream)
- if err != nil {
- return err
- }
- hashes := s.currentBatch
- // launch in go routine since GetBatch blocks until new hashes arrive
- go func() {
- if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
- log.Warn("SendOfferedHashes error", "peer", p.ID().TerminalString(), "err", err)
- }
- }()
- // go p.SendOfferedHashes(s, req.From, req.To)
- l := len(hashes) / HashSize
-
- log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l)
- want, err := bv.NewFromBytes(req.Want, l)
- if err != nil {
- return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err)
- }
- for i := 0; i < l; i++ {
- if want.Get(i) {
- metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1)
-
- hash := hashes[i*HashSize : (i+1)*HashSize]
- data, err := s.GetData(ctx, hash)
- if err != nil {
- return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
- }
- chunk := storage.NewChunk(hash, data)
- syncing := true
- if err := p.Deliver(ctx, chunk, s.priority, syncing); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// Handover represents a statement that the upstream peer hands over the stream section
-type Handover struct {
- Stream Stream // name of stream
- Start, End uint64 // index of hashes
- Root []byte // Root hash for indexed segment inclusion proofs
-}
-
-// Takeover represents a statement that downstream peer took over (stored all data)
-// handed over
-type Takeover Handover
-
-// TakeoverProof represents a signed statement that the downstream peer took over
-// the stream section
-type TakeoverProof struct {
- Sig []byte // Sign(Hash(Serialisation(Takeover)))
- *Takeover
-}
-
-// TakeoverProofMsg is the protocol msg sent by downstream peer
-type TakeoverProofMsg TakeoverProof
-
-// String pretty prints TakeoverProofMsg
-func (m TakeoverProofMsg) String() string {
- return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig)
-}
-
-func (p *Peer) handleTakeoverProofMsg(ctx context.Context, req *TakeoverProofMsg) error {
- _, err := p.getServer(req.Stream)
- // store the strongest takeoverproof for the stream in streamer
- return err
-}
diff --git a/network/stream/peer.go b/network/stream/peer.go
deleted file mode 100644
index 1f29888347..0000000000
--- a/network/stream/peer.go
+++ /dev/null
@@ -1,573 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/log"
- "github.com/ethersphere/swarm/network"
- pq "github.com/ethersphere/swarm/network/priorityqueue"
- "github.com/ethersphere/swarm/network/stream/intervals"
- "github.com/ethersphere/swarm/state"
- "github.com/ethersphere/swarm/storage"
- "github.com/ethersphere/swarm/tracing"
-)
-
-type notFoundError struct {
- t string
- s Stream
-}
-
-func newNotFoundError(t string, s Stream) *notFoundError {
- return ¬FoundError{t: t, s: s}
-}
-
-func (e *notFoundError) Error() string {
- return fmt.Sprintf("%s not found for stream %q", e.t, e.s)
-}
-
-// ErrMaxPeerServers will be returned if peer server limit is reached.
-// It will be sent in the SubscribeErrorMsg.
-var ErrMaxPeerServers = errors.New("max peer servers")
-
-// Peer is the Peer extension for the streaming protocol
-type Peer struct {
- *network.BzzPeer
- streamer *Registry
- pq *pq.PriorityQueue
- serverMu sync.RWMutex
- clientMu sync.RWMutex // protects both clients and clientParams
- servers map[Stream]*server
- clients map[Stream]*client
- // clientParams map keeps required client arguments
- // that are set on Registry.Subscribe and used
- // on creating a new client in offered hashes handler.
- clientParams map[Stream]*clientParams
- quit chan struct{}
-}
-
-type WrappedPriorityMsg struct {
- Context context.Context
- Msg interface{}
-}
-
-// NewPeer is the constructor for Peer
-func NewPeer(peer *network.BzzPeer, streamer *Registry) *Peer {
- p := &Peer{
- BzzPeer: peer,
- pq: pq.New(int(PriorityQueue), PriorityQueueCap),
- streamer: streamer,
- servers: make(map[Stream]*server),
- clients: make(map[Stream]*client),
- clientParams: make(map[Stream]*clientParams),
- quit: make(chan struct{}),
- }
- ctx, cancel := context.WithCancel(context.Background())
- go p.pq.Run(ctx, func(i interface{}) {
- wmsg := i.(WrappedPriorityMsg)
- err := p.Send(wmsg.Context, wmsg.Msg)
- if err != nil {
- log.Error("Message send error, dropping peer", "peer", p.ID(), "err", err)
- p.Drop()
- }
- })
-
- // basic monitoring for pq contention
- go func(pq *pq.PriorityQueue) {
- ticker := time.NewTicker(5 * time.Second)
- defer ticker.Stop()
- for {
- select {
- case <-ticker.C:
- var lenMaxi int
- var capMaxi int
- for k := range pq.Queues {
- if lenMaxi < len(pq.Queues[k]) {
- lenMaxi = len(pq.Queues[k])
- }
-
- if capMaxi < cap(pq.Queues[k]) {
- capMaxi = cap(pq.Queues[k])
- }
- }
-
- metrics.GetOrRegisterGauge(fmt.Sprintf("pq_len_%s", p.ID().TerminalString()), nil).Update(int64(lenMaxi))
- metrics.GetOrRegisterGauge(fmt.Sprintf("pq_cap_%s", p.ID().TerminalString()), nil).Update(int64(capMaxi))
- case <-p.quit:
- return
- }
- }
- }(p.pq)
-
- go func() {
- <-p.quit
-
- cancel()
- }()
- return p
-}
-
-// Deliver sends a storeRequestMsg protocol message to the peer
-// Depending on the `syncing` parameter we send different message types
-func (p *Peer) Deliver(ctx context.Context, chunk storage.Chunk, priority uint8, syncing bool) error {
- var msg interface{}
-
- metrics.GetOrRegisterCounter("peer.deliver", nil).Inc(1)
-
- //we send different types of messages if delivery is for syncing or retrievals,
- //even if handling and content of the message are the same,
- //because swap accounting decides which messages need accounting based on the message type
- if syncing {
- msg = &ChunkDeliveryMsgSyncing{
- Addr: chunk.Address(),
- SData: chunk.Data(),
- }
- } else {
- msg = &ChunkDeliveryMsgRetrieval{
- Addr: chunk.Address(),
- SData: chunk.Data(),
- }
- }
-
- return p.SendPriority(ctx, msg, priority)
-}
-
-// SendPriority sends message to the peer using the outgoing priority queue
-func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8) error {
- defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now())
- ctx = tracing.StartSaveSpan(ctx)
- metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1)
- wmsg := WrappedPriorityMsg{
- Context: ctx,
- Msg: msg,
- }
- err := p.pq.Push(wmsg, int(priority))
- if err != nil {
- log.Error("err on p.pq.Push", "err", err, "peer", p.ID())
- }
- return err
-}
-
-// SendOfferedHashes sends OfferedHashesMsg protocol msg
-func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error {
- defer metrics.GetOrRegisterResettingTimer("send.offered.hashes", nil).UpdateSince(time.Now())
-
- hashes, from, to, err := s.setNextBatch(f, t)
- if err != nil {
- return err
- }
- // true only when quitting
- if len(hashes) == 0 {
- return nil
- }
- s.currentBatch = hashes
- msg := &OfferedHashesMsg{
- Hashes: hashes,
- From: from,
- To: to,
- Stream: s.stream,
- }
- log.Trace("Swarm syncer offer batch", "peer", p.ID(), "stream", s.stream, "len", len(hashes), "from", from, "to", to)
- return p.SendPriority(context.TODO(), msg, s.priority)
-}
-
-func (p *Peer) getServer(s Stream) (*server, error) {
- p.serverMu.RLock()
- defer p.serverMu.RUnlock()
-
- server := p.servers[s]
- if server == nil {
- return nil, newNotFoundError("server", s)
- }
- return server, nil
-}
-
-func (p *Peer) setServer(s Stream, o Server, priority uint8) (*server, error) {
- p.serverMu.Lock()
- defer p.serverMu.Unlock()
-
- if p.servers[s] != nil {
- return nil, fmt.Errorf("server %s already registered", s)
- }
-
- if p.streamer.maxPeerServers > 0 && len(p.servers) >= p.streamer.maxPeerServers {
- return nil, ErrMaxPeerServers
- }
-
- sessionIndex, err := o.SessionIndex()
- if err != nil {
- return nil, err
- }
- os := &server{
- Server: o,
- stream: s,
- priority: priority,
- sessionIndex: sessionIndex,
- }
- p.servers[s] = os
- return os, nil
-}
-
-func (p *Peer) removeServer(s Stream) error {
- p.serverMu.Lock()
- defer p.serverMu.Unlock()
-
- server, ok := p.servers[s]
- if !ok {
- return newNotFoundError("server", s)
- }
- server.Close()
- delete(p.servers, s)
- return nil
-}
-
-func (p *Peer) getClient(ctx context.Context, s Stream) (c *client, err error) {
- var params *clientParams
- func() {
- p.clientMu.RLock()
- defer p.clientMu.RUnlock()
-
- c = p.clients[s]
- if c != nil {
- return
- }
- params = p.clientParams[s]
- }()
- if c != nil {
- return c, nil
- }
-
- if params != nil {
- //debug.PrintStack()
- if err := params.waitClient(ctx); err != nil {
- return nil, err
- }
- }
-
- p.clientMu.RLock()
- defer p.clientMu.RUnlock()
-
- c = p.clients[s]
- if c != nil {
- return c, nil
- }
- return nil, newNotFoundError("client", s)
-}
-
-func (p *Peer) getOrSetClient(s Stream, from, to uint64) (c *client, created bool, err error) {
- p.clientMu.Lock()
- defer p.clientMu.Unlock()
-
- c = p.clients[s]
- if c != nil {
- return c, false, nil
- }
-
- f, err := p.streamer.GetClientFunc(s.Name)
- if err != nil {
- return nil, false, err
- }
-
- is, err := f(p, s.Key, s.Live)
- if err != nil {
- return nil, false, err
- }
-
- cp, err := p.getClientParams(s)
- if err != nil {
- return nil, false, err
- }
- defer func() {
- if err == nil {
- if err := p.removeClientParams(s); err != nil {
- log.Error("stream set client: remove client params", "stream", s, "peer", p, "err", err)
- }
- }
- }()
-
- intervalsKey := peerStreamIntervalsKey(p, s)
- if s.Live {
- // try to find previous history and live intervals and merge live into history
- historyKey := peerStreamIntervalsKey(p, NewStream(s.Name, s.Key, false))
- historyIntervals := &intervals.Intervals{}
- err := p.streamer.intervalsStore.Get(historyKey, historyIntervals)
- switch err {
- case nil:
- liveIntervals := &intervals.Intervals{}
- err := p.streamer.intervalsStore.Get(intervalsKey, liveIntervals)
- switch err {
- case nil:
- historyIntervals.Merge(liveIntervals)
- if err := p.streamer.intervalsStore.Put(historyKey, historyIntervals); err != nil {
- log.Error("stream set client: put history intervals", "stream", s, "peer", p, "err", err)
- }
- case state.ErrNotFound:
- default:
- log.Error("stream set client: get live intervals", "stream", s, "peer", p, "err", err)
- }
- case state.ErrNotFound:
- default:
- log.Error("stream set client: get history intervals", "stream", s, "peer", p, "err", err)
- }
- }
-
- if err := p.streamer.intervalsStore.Put(intervalsKey, intervals.NewIntervals(from)); err != nil {
- return nil, false, err
- }
-
- next := make(chan error, 1)
- c = &client{
- Client: is,
- stream: s,
- priority: cp.priority,
- to: cp.to,
- next: next,
- quit: make(chan struct{}),
- intervalsStore: p.streamer.intervalsStore,
- intervalsKey: intervalsKey,
- }
- p.clients[s] = c
- cp.clientCreated() // unblock all possible getClient calls that are waiting
- next <- nil // this is to allow wantedKeysMsg before first batch arrives
- return c, true, nil
-}
-
-func (p *Peer) removeClient(s Stream) error {
- p.clientMu.Lock()
- defer p.clientMu.Unlock()
-
- client, ok := p.clients[s]
- if !ok {
- return newNotFoundError("client", s)
- }
- client.close()
- delete(p.clients, s)
- return nil
-}
-
-func (p *Peer) setClientParams(s Stream, params *clientParams) error {
- p.clientMu.Lock()
- defer p.clientMu.Unlock()
-
- if p.clients[s] != nil {
- return fmt.Errorf("client %s already exists", s)
- }
- if p.clientParams[s] != nil {
- return fmt.Errorf("client params %s already set", s)
- }
- p.clientParams[s] = params
- return nil
-}
-
-func (p *Peer) getClientParams(s Stream) (*clientParams, error) {
- params := p.clientParams[s]
- if params == nil {
- return nil, fmt.Errorf("client params '%v' not provided to peer %v", s, p.ID())
- }
- return params, nil
-}
-
-func (p *Peer) removeClientParams(s Stream) error {
- _, ok := p.clientParams[s]
- if !ok {
- return newNotFoundError("client params", s)
- }
- delete(p.clientParams, s)
- return nil
-}
-
-func (p *Peer) close() {
- p.serverMu.Lock()
- defer p.serverMu.Unlock()
-
- for _, s := range p.servers {
- s.Close()
- }
-
- p.servers = nil
-}
-
-// runUpdateSyncing is a long running function that creates the initial
-// syncing subscriptions to the peer and waits for neighbourhood depth change
-// to create new ones or quit existing ones based on the new neighbourhood depth
-// and if peer enters or leaves nearest neighbourhood by using
-// syncSubscriptionsDiff and updateSyncSubscriptions functions.
-func (p *Peer) runUpdateSyncing() {
- timer := time.NewTimer(p.streamer.syncUpdateDelay)
- defer timer.Stop()
-
- select {
- case <-timer.C:
- case <-p.streamer.quit:
- return
- }
-
- kad := p.streamer.delivery.kad
- po := chunk.Proximity(p.BzzAddr.Over(), kad.BaseAddr())
-
- depth := kad.NeighbourhoodDepth()
-
- log.Debug("update syncing subscriptions: initial", "peer", p.ID(), "po", po, "depth", depth)
-
- // initial subscriptions
- p.updateSyncSubscriptions(syncSubscriptionsDiff(po, -1, depth, kad.MaxProxDisplay))
-
- depthChangeSignal, unsubscribeDepthChangeSignal := kad.SubscribeToNeighbourhoodDepthChange()
- defer unsubscribeDepthChangeSignal()
-
- prevDepth := depth
- for {
- select {
- case _, ok := <-depthChangeSignal:
- if !ok {
- return
- }
- // update subscriptions for this peer when depth changes
- depth := kad.NeighbourhoodDepth()
- log.Debug("update syncing subscriptions", "peer", p.ID(), "po", po, "depth", depth)
- p.updateSyncSubscriptions(syncSubscriptionsDiff(po, prevDepth, depth, kad.MaxProxDisplay))
- prevDepth = depth
- case <-p.streamer.quit:
- return
- case <-p.quit:
- return
- }
- }
-}
-
-// updateSyncSubscriptions accepts two slices of integers, the first one
-// representing proximity order bins for required syncing subscriptions
-// and the second one representing bins for syncing subscriptions that
-// need to be removed. This function sends request for subscription
-// messages and quit messages for provided bins.
-func (p *Peer) updateSyncSubscriptions(subBins, quitBins []int) {
- if p.streamer.getPeer(p.ID()) == nil {
- log.Debug("update syncing subscriptions", "peer not found", p.ID())
- return
- }
- log.Debug("update syncing subscriptions", "peer", p.ID(), "subscribe", subBins, "quit", quitBins)
- for _, po := range subBins {
- p.subscribeSync(po)
- }
- for _, po := range quitBins {
- p.quitSync(po)
- }
-}
-
-// subscribeSync send the request for syncing subscriptions to the peer
-// using subscriptionFunc. This function is used to request syncing subscriptions
-// when new peer is added to the registry and on neighbourhood depth change.
-func (p *Peer) subscribeSync(po int) {
- err := subscriptionFunc(p.streamer, p.ID(), uint8(po))
- if err != nil {
- log.Error("subscription", "err", err)
- }
-}
-
-// quitSync sends the quit message for live and history syncing streams to the peer.
-// This function is used in runUpdateSyncing indirectly over updateSyncSubscriptions
-// to remove unneeded syncing subscriptions on neighbourhood depth change.
-func (p *Peer) quitSync(po int) {
- live := NewStream("SYNC", FormatSyncBinKey(uint8(po)), true)
- history := getHistoryStream(live)
- err := p.streamer.Quit(p.ID(), live)
- if err != nil && err != p2p.ErrShuttingDown {
- log.Error("quit", "err", err, "peer", p.ID(), "stream", live)
- }
- err = p.streamer.Quit(p.ID(), history)
- if err != nil && err != p2p.ErrShuttingDown {
- log.Error("quit", "err", err, "peer", p.ID(), "stream", history)
- }
-
- err = p.removeServer(live)
- if err != nil {
- log.Error("remove server", "err", err, "peer", p.ID(), "stream", live)
- }
- err = p.removeServer(history)
- if err != nil {
- log.Error("remove server", "err", err, "peer", p.ID(), "stream", live)
- }
-}
-
-// syncSubscriptionsDiff calculates to which proximity order bins a peer
-// (with po peerPO) needs to be subscribed after kademlia neighbourhood depth
-// change from prevDepth to newDepth. Max argument limits the number of
-// proximity order bins. Returned values are slices of integers which represent
-// proximity order bins, the first one to which additional subscriptions need to
-// be requested and the second one which subscriptions need to be quit. Argument
-// prevDepth with value less then 0 represents no previous depth, used for
-// initial syncing subscriptions.
-func syncSubscriptionsDiff(peerPO, prevDepth, newDepth, max int) (subBins, quitBins []int) {
- newStart, newEnd := syncBins(peerPO, newDepth, max)
- if prevDepth < 0 {
- // no previous depth, return the complete range
- // for subscriptions requests and nothing for quitting
- return intRange(newStart, newEnd), nil
- }
-
- prevStart, prevEnd := syncBins(peerPO, prevDepth, max)
-
- if newStart < prevStart {
- subBins = append(subBins, intRange(newStart, prevStart)...)
- }
-
- if prevStart < newStart {
- quitBins = append(quitBins, intRange(prevStart, newStart)...)
- }
-
- if newEnd < prevEnd {
- quitBins = append(quitBins, intRange(newEnd, prevEnd)...)
- }
-
- if prevEnd < newEnd {
- subBins = append(subBins, intRange(prevEnd, newEnd)...)
- }
-
- return subBins, quitBins
-}
-
-// syncBins returns the range to which proximity order bins syncing
-// subscriptions need to be requested, based on peer proximity and
-// kademlia neighbourhood depth. Returned range is [start,end), inclusive for
-// start and exclusive for end.
-func syncBins(peerPO, depth, max int) (start, end int) {
- if peerPO < depth {
- // subscribe only to peerPO bin if it is not
- // in the nearest neighbourhood
- return peerPO, peerPO + 1
- }
- // subscribe from depth to max bin if the peer
- // is in the nearest neighbourhood
- return depth, max + 1
-}
-
-// intRange returns the slice of integers [start,end). The start
-// is inclusive and the end is not.
-func intRange(start, end int) (r []int) {
- for i := start; i < end; i++ {
- r = append(r, i)
- }
- return r
-}
diff --git a/network/stream/peer_test.go b/network/stream/peer_test.go
deleted file mode 100644
index deaec6afb6..0000000000
--- a/network/stream/peer_test.go
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "context"
- "fmt"
- "reflect"
- "sort"
- "sync"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/network/simulation"
- "github.com/ethersphere/swarm/state"
-)
-
-// TestSyncSubscriptionsDiff validates the output of syncSubscriptionsDiff
-// function for various arguments.
-func TestSyncSubscriptionsDiff(t *testing.T) {
- max := network.NewKadParams().MaxProxDisplay
- for _, tc := range []struct {
- po, prevDepth, newDepth int
- subBins, quitBins []int
- }{
- {
- po: 0, prevDepth: -1, newDepth: 0,
- subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 1, prevDepth: -1, newDepth: 0,
- subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 2, prevDepth: -1, newDepth: 0,
- subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 0, prevDepth: -1, newDepth: 1,
- subBins: []int{0},
- },
- {
- po: 1, prevDepth: -1, newDepth: 1,
- subBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 2, prevDepth: -1, newDepth: 2,
- subBins: []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 3, prevDepth: -1, newDepth: 2,
- subBins: []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 1, prevDepth: -1, newDepth: 2,
- subBins: []int{1},
- },
- {
- po: 0, prevDepth: 0, newDepth: 0, // 0-16 -> 0-16
- },
- {
- po: 1, prevDepth: 0, newDepth: 0, // 0-16 -> 0-16
- },
- {
- po: 0, prevDepth: 0, newDepth: 1, // 0-16 -> 0
- quitBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 0, prevDepth: 0, newDepth: 2, // 0-16 -> 0
- quitBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 1, prevDepth: 0, newDepth: 1, // 0-16 -> 1-16
- quitBins: []int{0},
- },
- {
- po: 1, prevDepth: 1, newDepth: 0, // 1-16 -> 0-16
- subBins: []int{0},
- },
- {
- po: 4, prevDepth: 0, newDepth: 1, // 0-16 -> 1-16
- quitBins: []int{0},
- },
- {
- po: 4, prevDepth: 0, newDepth: 4, // 0-16 -> 4-16
- quitBins: []int{0, 1, 2, 3},
- },
- {
- po: 4, prevDepth: 0, newDepth: 5, // 0-16 -> 4
- quitBins: []int{0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 4, prevDepth: 5, newDepth: 0, // 4 -> 0-16
- subBins: []int{0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
- },
- {
- po: 4, prevDepth: 5, newDepth: 6, // 4 -> 4
- },
- } {
- subBins, quitBins := syncSubscriptionsDiff(tc.po, tc.prevDepth, tc.newDepth, max)
- if fmt.Sprint(subBins) != fmt.Sprint(tc.subBins) {
- t.Errorf("po: %v, prevDepth: %v, newDepth: %v: got subBins %v, want %v", tc.po, tc.prevDepth, tc.newDepth, subBins, tc.subBins)
- }
- if fmt.Sprint(quitBins) != fmt.Sprint(tc.quitBins) {
- t.Errorf("po: %v, prevDepth: %v, newDepth: %v: got quitBins %v, want %v", tc.po, tc.prevDepth, tc.newDepth, quitBins, tc.quitBins)
- }
- }
-}
-
-// TestUpdateSyncingSubscriptions validates that syncing subscriptions are correctly
-// made on initial node connections and that subscriptions are correctly changed
-// when kademlia neighbourhood depth is changed by connecting more nodes.
-func TestUpdateSyncingSubscriptions(t *testing.T) {
- sim := simulation.NewInProc(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
- if err != nil {
- return nil, nil, err
- }
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- SyncUpdateDelay: 100 * time.Millisecond,
- Syncing: SyncingAutoSubscribe,
- }, nil)
- cleanup = func() {
- r.Close()
- clean()
- }
- bucket.Store("bzz-address", addr)
- return r, cleanup, nil
- },
- })
- defer sim.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
- defer cancel()
-
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
- // initial nodes, first one as pivot center of the start
- ids, err := sim.AddNodesAndConnectStar(10)
- if err != nil {
- return err
- }
-
- // pivot values
- pivotRegistryID := ids[0]
- pivotRegistry := sim.Service("streamer", pivotRegistryID).(*Registry)
- pivotKademlia := pivotRegistry.delivery.kad
- // nodes proximities from the pivot node
- nodeProximities := make(map[string]int)
- for _, id := range ids[1:] {
- bzzAddr, ok := sim.NodeItem(id, "bzz-address")
- if !ok {
- t.Fatal("no bzz address for node")
- }
- nodeProximities[id.String()] = chunk.Proximity(pivotKademlia.BaseAddr(), bzzAddr.(*network.BzzAddr).Over())
- }
- // wait until sync subscriptions are done for all nodes
- waitForSubscriptions(t, pivotRegistry, ids[1:]...)
-
- // check initial sync streams
- err = checkSyncStreamsWithRetry(pivotRegistry, nodeProximities)
- if err != nil {
- return err
- }
-
- // add more nodes until the depth is changed
- prevDepth := pivotKademlia.NeighbourhoodDepth()
- var noDepthChangeChecked bool // true it there was a check when no depth is changed
- for {
- ids, err := sim.AddNodes(5)
- if err != nil {
- return err
- }
- // add new nodes to sync subscriptions check
- for _, id := range ids {
- bzzAddr, ok := sim.NodeItem(id, "bzz-address")
- if !ok {
- t.Fatal("no bzz address for node")
- }
- nodeProximities[id.String()] = chunk.Proximity(pivotKademlia.BaseAddr(), bzzAddr.(*network.BzzAddr).Over())
- }
- err = sim.Net.ConnectNodesStar(ids, pivotRegistryID)
- if err != nil {
- return err
- }
- waitForSubscriptions(t, pivotRegistry, ids...)
-
- newDepth := pivotKademlia.NeighbourhoodDepth()
- // depth is not changed, check if streams are still correct
- if newDepth == prevDepth {
- err = checkSyncStreamsWithRetry(pivotRegistry, nodeProximities)
- if err != nil {
- return err
- }
- noDepthChangeChecked = true
- }
- // do the final check when depth is changed and
- // there has been at least one check
- // for the case when depth is not changed
- if newDepth != prevDepth && noDepthChangeChecked {
- // check sync streams for changed depth
- return checkSyncStreamsWithRetry(pivotRegistry, nodeProximities)
- }
- prevDepth = newDepth
- }
- })
- if result.Error != nil {
- t.Fatal(result.Error)
- }
-}
-
-// waitForSubscriptions is a test helper function that blocks until
-// stream server subscriptions are established on the provided registry
-// to the nodes with provided IDs.
-func waitForSubscriptions(t *testing.T, r *Registry, ids ...enode.ID) {
- t.Helper()
-
- for retries := 0; retries < 100; retries++ {
- subs := r.api.GetPeerServerSubscriptions()
- if allSubscribed(subs, ids) {
- return
- }
- time.Sleep(50 * time.Millisecond)
- }
- t.Fatalf("missing subscriptions")
-}
-
-// allSubscribed returns true if nodes with ids have subscriptions
-// in provided subs map.
-func allSubscribed(subs map[string][]string, ids []enode.ID) bool {
- for _, id := range ids {
- if s, ok := subs[id.String()]; !ok || len(s) == 0 {
- return false
- }
- }
- return true
-}
-
-// checkSyncStreamsWithRetry is calling checkSyncStreams with retries.
-func checkSyncStreamsWithRetry(r *Registry, nodeProximities map[string]int) (err error) {
- for retries := 0; retries < 5; retries++ {
- err = checkSyncStreams(r, nodeProximities)
- if err == nil {
- return nil
- }
- time.Sleep(500 * time.Millisecond)
- }
- return err
-}
-
-// checkSyncStreams validates that registry contains expected sync
-// subscriptions to nodes with proximities in a map nodeProximities.
-func checkSyncStreams(r *Registry, nodeProximities map[string]int) error {
- depth := r.delivery.kad.NeighbourhoodDepth()
- maxPO := r.delivery.kad.MaxProxDisplay
- for id, po := range nodeProximities {
- wantStreams := syncStreams(po, depth, maxPO)
- gotStreams := nodeStreams(r, id)
-
- if r.getPeer(enode.HexID(id)) == nil {
- // ignore removed peer
- continue
- }
-
- if !reflect.DeepEqual(gotStreams, wantStreams) {
- return fmt.Errorf("node %s got streams %v, want %v", id, gotStreams, wantStreams)
- }
- }
- return nil
-}
-
-// syncStreams returns expected sync streams that need to be
-// established between a node with kademlia neighbourhood depth
-// and a node with proximity order po.
-func syncStreams(po, depth, maxPO int) (streams []string) {
- start, end := syncBins(po, depth, maxPO)
- for bin := start; bin < end; bin++ {
- streams = append(streams, NewStream("SYNC", FormatSyncBinKey(uint8(bin)), false).String())
- streams = append(streams, NewStream("SYNC", FormatSyncBinKey(uint8(bin)), true).String())
- }
- return streams
-}
-
-// nodeStreams returns stream server subscriptions on a registry
-// to the peer with provided id.
-func nodeStreams(r *Registry, id string) []string {
- streams := r.api.GetPeerServerSubscriptions()[id]
- sort.Strings(streams)
- return streams
-}
diff --git a/network/stream/snapshot_retrieval_test.go b/network/stream/snapshot_retrieval_test.go
deleted file mode 100644
index ff3dd7e68d..0000000000
--- a/network/stream/snapshot_retrieval_test.go
+++ /dev/null
@@ -1,484 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-package stream
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "sync"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/log"
- "github.com/ethersphere/swarm/network/simulation"
- "github.com/ethersphere/swarm/state"
- "github.com/ethersphere/swarm/storage"
- "github.com/ethersphere/swarm/testutil"
-)
-
-// constants for random file generation
-const (
- minFileSize = 2
- maxFileSize = 40
-)
-
-// TestFileRetrieval is a retrieval test for nodes.
-// A configurable number of nodes can be
-// provided to the test.
-// Files are uploaded to nodes, other nodes try to retrieve the file
-// Number of nodes can be provided via commandline too.
-func TestFileRetrieval(t *testing.T) {
- var nodeCount []int
-
- if *nodes != 0 {
- nodeCount = []int{*nodes}
- } else {
- nodeCount = []int{16}
-
- if *testutil.Longrunning {
- nodeCount = append(nodeCount, 32, 64)
- } else if testutil.RaceEnabled {
- nodeCount = []int{4}
- }
-
- }
-
- for _, nc := range nodeCount {
- runFileRetrievalTest(t, nc)
- }
-}
-
-// TestPureRetrieval tests pure retrieval without syncing
-// A configurable number of nodes and chunks
-// can be provided to the test.
-// A number of random chunks is generated, then stored directly in
-// each node's localstore according to their address.
-// Each chunk is supposed to end up at certain nodes
-// With retrieval we then make sure that every node can actually retrieve
-// the chunks.
-func TestPureRetrieval(t *testing.T) {
- var nodeCount []int
- var chunkCount []int
-
- if *nodes != 0 && *chunks != 0 {
- nodeCount = []int{*nodes}
- chunkCount = []int{*chunks}
- } else {
- nodeCount = []int{16}
- chunkCount = []int{150}
-
- if *testutil.Longrunning {
- nodeCount = append(nodeCount, 32, 64)
- chunkCount = append(chunkCount, 32, 256)
- } else if testutil.RaceEnabled {
- nodeCount = []int{4}
- chunkCount = []int{4}
- }
-
- }
-
- for _, nc := range nodeCount {
- for _, c := range chunkCount {
- runPureRetrievalTest(t, nc, c)
- }
- }
-}
-
-// TestRetrieval tests retrieval of chunks by random nodes.
-// One node is randomly selected to be the pivot node.
-// A configurable number of chunks and nodes can be
-// provided to the test, the number of chunks is uploaded
-// to the pivot node and other nodes try to retrieve the chunk(s).
-// Number of chunks and nodes can be provided via commandline too.
-func TestRetrieval(t *testing.T) {
- // if nodes/chunks have been provided via commandline,
- // run the tests with these values
- if *nodes != 0 && *chunks != 0 {
- runRetrievalTest(t, *chunks, *nodes)
- } else {
- nodeCnt := []int{16}
- chnkCnt := []int{32}
-
- if *testutil.Longrunning {
- nodeCnt = []int{16, 32, 64}
- chnkCnt = []int{4, 32, 256}
- } else if testutil.RaceEnabled {
- nodeCnt = []int{4}
- chnkCnt = []int{4}
- }
-
- for _, n := range nodeCnt {
- for _, c := range chnkCnt {
- t.Run(fmt.Sprintf("TestRetrieval_%d_%d", n, c), func(t *testing.T) {
- runRetrievalTest(t, c, n)
- })
- }
- }
- }
-}
-
-var retrievalSimServiceMap = map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
- if err != nil {
- return nil, nil, err
- }
-
- syncUpdateDelay := 1 * time.Second
- if *testutil.Longrunning {
- syncUpdateDelay = 3 * time.Second
- }
-
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Syncing: SyncingAutoSubscribe,
- SyncUpdateDelay: syncUpdateDelay,
- }, nil)
-
- cleanup = func() {
- r.Close()
- clean()
- }
-
- return r, cleanup, nil
- },
-}
-
-// runPureRetrievalTest by uploading a snapshot,
-// then starting a simulation, distribute chunks to nodes
-// and start retrieval.
-// The snapshot should have 'streamer' in its service list.
-func runPureRetrievalTest(t *testing.T, nodeCount int, chunkCount int) {
-
- t.Helper()
- // the pure retrieval test needs a different service map, as we want
- // syncing disabled and we don't need to set the syncUpdateDelay
- sim := simulation.NewInProc(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
- if err != nil {
- return nil, nil, err
- }
-
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Syncing: SyncingDisabled,
- }, nil)
-
- cleanup = func() {
- r.Close()
- clean()
- }
-
- return r, cleanup, nil
- },
- },
- )
- defer sim.Close()
-
- log.Info("Initializing test config", "node count", nodeCount)
-
- conf := &synctestConfig{}
- //map of discover ID to indexes of chunks expected at that ID
- conf.idToChunksMap = make(map[enode.ID][]int)
- //map of overlay address to discover ID
- conf.addrToIDMap = make(map[string]enode.ID)
- //array where the generated chunk hashes will be stored
- conf.hashes = make([]storage.Address, 0)
-
- ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute)
- defer cancelSimRun()
-
- filename := fmt.Sprintf("testing/snapshot_%d.json", nodeCount)
- err := sim.UploadSnapshot(ctx, filename)
- if err != nil {
- t.Fatal(err)
- }
-
- log.Info("Starting simulation")
-
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- nodeIDs := sim.UpNodeIDs()
- // first iteration: create addresses
- for _, n := range nodeIDs {
- //get the kademlia overlay address from this ID
- a := n.Bytes()
- //append it to the array of all overlay addresses
- conf.addrs = append(conf.addrs, a)
- //the proximity calculation is on overlay addr,
- //the p2p/simulations check func triggers on enode.ID,
- //so we need to know which overlay addr maps to which nodeID
- conf.addrToIDMap[string(a)] = n
- }
-
- // now create random chunks
- chunks := storage.GenerateRandomChunks(int64(chunkSize), chunkCount)
- for _, chunk := range chunks {
- conf.hashes = append(conf.hashes, chunk.Address())
- }
-
- log.Debug("random chunks generated, mapping keys to nodes")
-
- // map addresses to nodes
- mapKeysToNodes(conf)
-
- // second iteration: storing chunks at the peer whose
- // overlay address is closest to a particular chunk's hash
- log.Debug("storing every chunk at correspondent node store")
- for _, id := range nodeIDs {
- // for every chunk for this node (which are only indexes)...
- for _, ch := range conf.idToChunksMap[id] {
- item, ok := sim.NodeItem(id, bucketKeyStore)
- if !ok {
- return fmt.Errorf("Error accessing localstore")
- }
- lstore := item.(chunk.Store)
- // ...get the actual chunk
- for _, chnk := range chunks {
- if bytes.Equal(chnk.Address(), conf.hashes[ch]) {
- // ...and store it in the localstore
- if _, err = lstore.Put(ctx, chunk.ModePutUpload, chnk); err != nil {
- return err
- }
- }
- }
- }
- }
-
- // now try to retrieve every chunk from every node
- log.Debug("starting retrieval")
- cnt := 0
-
- for _, id := range nodeIDs {
- item, ok := sim.NodeItem(id, bucketKeyFileStore)
- if !ok {
- return fmt.Errorf("No filestore")
- }
- fileStore := item.(*storage.FileStore)
- for _, chunk := range chunks {
- reader, _ := fileStore.Retrieve(context.TODO(), chunk.Address())
- content := make([]byte, chunkSize)
- size, err := reader.Read(content)
- //check chunk size and content
- ok := true
- if err != io.EOF {
- log.Debug("Retrieve error", "err", err, "hash", chunk.Address(), "nodeId", id)
- ok = false
- }
- if size != chunkSize {
- log.Debug("size not equal chunkSize", "size", size, "hash", chunk.Address(), "nodeId", id)
- ok = false
- }
- // skip chunk "metadata" for chunk.Data()
- if !bytes.Equal(content, chunk.Data()[8:]) {
- log.Debug("content not equal chunk data", "hash", chunk.Address(), "nodeId", id)
- ok = false
- }
- if !ok {
- return fmt.Errorf("Expected test to succeed at first run, but failed with chunk not found")
- }
- log.Debug(fmt.Sprintf("chunk with root hash %x successfully retrieved", chunk.Address()))
- cnt++
- }
- }
- log.Info("retrieval terminated, chunks retrieved: ", "count", cnt)
- return nil
-
- })
-
- log.Info("Simulation terminated")
-
- if result.Error != nil {
- t.Fatal(result.Error)
- }
-}
-
-// runFileRetrievalTest loads a snapshot file to construct the swarm network.
-// The snapshot should have 'streamer' in its service list.
-func runFileRetrievalTest(t *testing.T, nodeCount int) {
-
- t.Helper()
-
- sim := simulation.NewInProc(retrievalSimServiceMap)
- defer sim.Close()
-
- log.Info("Initializing test config", "node count", nodeCount)
-
- conf := &synctestConfig{}
- //map of discover ID to indexes of chunks expected at that ID
- conf.idToChunksMap = make(map[enode.ID][]int)
- //map of overlay address to discover ID
- conf.addrToIDMap = make(map[string]enode.ID)
- //array where the generated chunk hashes will be stored
- conf.hashes = make([]storage.Address, 0)
-
- ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute)
- defer cancelSimRun()
-
- filename := fmt.Sprintf("testing/snapshot_%d.json", nodeCount)
- err := sim.UploadSnapshot(ctx, filename)
- if err != nil {
- t.Fatal(err)
- }
-
- log.Info("Starting simulation")
-
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- nodeIDs := sim.UpNodeIDs()
- for _, n := range nodeIDs {
- //get the kademlia overlay address from this ID
- a := n.Bytes()
- //append it to the array of all overlay addresses
- conf.addrs = append(conf.addrs, a)
- //the proximity calculation is on overlay addr,
- //the p2p/simulations check func triggers on enode.ID,
- //so we need to know which overlay addr maps to which nodeID
- conf.addrToIDMap[string(a)] = n
- }
-
- //an array for the random files
- var randomFiles []string
-
- conf.hashes, randomFiles, err = uploadFilesToNodes(sim)
- if err != nil {
- return err
- }
-
- log.Info("network healthy, start file checks")
-
- // File retrieval check is repeated until all uploaded files are retrieved from all nodes
- // or until the timeout is reached.
- REPEAT:
- for {
- for _, id := range nodeIDs {
- //for each expected file, check if it is in the local store
- item, ok := sim.NodeItem(id, bucketKeyFileStore)
- if !ok {
- return fmt.Errorf("No filestore")
- }
- fileStore := item.(*storage.FileStore)
- //check all chunks
- for i, hash := range conf.hashes {
- reader, _ := fileStore.Retrieve(context.TODO(), hash)
- //check that we can read the file size and that it corresponds to the generated file size
- if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
- log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id)
- time.Sleep(500 * time.Millisecond)
- continue REPEAT
- }
- log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
- }
- }
- return nil
- }
- })
-
- log.Info("Simulation terminated")
-
- if result.Error != nil {
- t.Fatal(result.Error)
- }
-}
-
-// runRetrievalTest generates the given number of chunks.
-// The test loads a snapshot file to construct the swarm network.
-// The snapshot should have 'streamer' in its service list.
-func runRetrievalTest(t *testing.T, chunkCount int, nodeCount int) {
-
- t.Helper()
-
- sim := simulation.NewInProc(retrievalSimServiceMap)
- defer sim.Close()
-
- conf := &synctestConfig{}
- //map of discover ID to indexes of chunks expected at that ID
- conf.idToChunksMap = make(map[enode.ID][]int)
- //map of overlay address to discover ID
- conf.addrToIDMap = make(map[string]enode.ID)
- //array where the generated chunk hashes will be stored
- conf.hashes = make([]storage.Address, 0)
-
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
- defer cancel()
-
- filename := fmt.Sprintf("testing/snapshot_%d.json", nodeCount)
- err := sim.UploadSnapshot(ctx, filename)
- if err != nil {
- t.Fatal(err)
- }
-
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- nodeIDs := sim.UpNodeIDs()
- for _, n := range nodeIDs {
- //get the kademlia overlay address from this ID
- a := n.Bytes()
- //append it to the array of all overlay addresses
- conf.addrs = append(conf.addrs, a)
- //the proximity calculation is on overlay addr,
- //the p2p/simulations check func triggers on enode.ID,
- //so we need to know which overlay addr maps to which nodeID
- conf.addrToIDMap[string(a)] = n
- }
-
- //this is the node selected for upload
- node := sim.Net.GetRandomUpNode()
- item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
- if !ok {
- return fmt.Errorf("No localstore")
- }
- lstore := item.(chunk.Store)
- conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
- if err != nil {
- return err
- }
-
- // File retrieval check is repeated until all uploaded files are retrieved from all nodes
- // or until the timeout is reached.
- REPEAT:
- for {
- for _, id := range nodeIDs {
- //for each expected chunk, check if it is in the local store
- //check on the node's FileStore (netstore)
- item, ok := sim.NodeItem(id, bucketKeyFileStore)
- if !ok {
- return fmt.Errorf("No filestore")
- }
- fileStore := item.(*storage.FileStore)
- //check all chunks
- for _, hash := range conf.hashes {
- reader, _ := fileStore.Retrieve(context.TODO(), hash)
- //check that we can read the chunk size and that it corresponds to the generated chunk size
- if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) {
- log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s)
- time.Sleep(500 * time.Millisecond)
- continue REPEAT
- }
- log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash))
- }
- }
- // all nodes and files found, exit loop and return without error
- return nil
- }
- })
-
- if result.Error != nil {
- t.Fatal(result.Error)
- }
-}
diff --git a/network/stream/stream.go b/network/stream/stream.go
deleted file mode 100644
index 1442402a31..0000000000
--- a/network/stream/stream.go
+++ /dev/null
@@ -1,793 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "context"
- "fmt"
- "math"
- "reflect"
- "sync"
- "time"
-
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/rpc"
- "github.com/ethersphere/swarm/log"
- "github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/network/stream/intervals"
- "github.com/ethersphere/swarm/p2p/protocols"
- "github.com/ethersphere/swarm/state"
- "github.com/ethersphere/swarm/storage"
- "github.com/ethersphere/swarm/swap"
-)
-
-const (
- Low uint8 = iota
- Mid
- High
- Top
- PriorityQueue = 4 // number of priority queues - Low, Mid, High, Top
- PriorityQueueCap = 4096 // queue capacity
- HashSize = 32
-)
-
-// Enumerate options for syncing and retrieval
-type SyncingOption int
-
-// Syncing options
-const (
- // Syncing disabled
- SyncingDisabled SyncingOption = iota
- // Register the client and the server but not subscribe
- SyncingRegisterOnly
- // Both client and server funcs are registered, subscribe sent automatically
- SyncingAutoSubscribe
-)
-
-// subscriptionFunc is used to determine what to do in order to perform subscriptions
-// usually we would start to really subscribe to nodes, but for tests other functionality may be needed
-// (see TestRequestPeerSubscriptions in streamer_test.go)
-var subscriptionFunc = doRequestSubscription
-
-// Registry registry for outgoing and incoming streamer constructors
-type Registry struct {
- addr enode.ID
- api *API
- skipCheck bool
- clientMu sync.RWMutex
- serverMu sync.RWMutex
- peersMu sync.RWMutex
- serverFuncs map[string]func(*Peer, string, bool) (Server, error)
- clientFuncs map[string]func(*Peer, string, bool) (Client, error)
- peers map[enode.ID]*Peer
- delivery *Delivery
- intervalsStore state.Store
- maxPeerServers int
- spec *protocols.Spec //this protocol's spec
- balance protocols.Balance //implements protocols.Balance, for accounting
- prices protocols.Prices //implements protocols.Prices, provides prices to accounting
- quit chan struct{} // terminates registry goroutines
- syncMode SyncingOption
- syncUpdateDelay time.Duration
-}
-
-// RegistryOptions holds optional values for NewRegistry constructor.
-type RegistryOptions struct {
- SkipCheck bool
- Syncing SyncingOption // Defines syncing behavior
- SyncUpdateDelay time.Duration
- MaxPeerServers int // The limit of servers for each peer in registry
-}
-
-// NewRegistry is Streamer constructor
-func NewRegistry(localID enode.ID, delivery *Delivery, netStore *storage.NetStore, intervalsStore state.Store, options *RegistryOptions, balance protocols.Balance) *Registry {
- if options == nil {
- options = &RegistryOptions{}
- }
- if options.SyncUpdateDelay <= 0 {
- options.SyncUpdateDelay = 15 * time.Second
- }
-
- quit := make(chan struct{})
-
- streamer := &Registry{
- addr: localID,
- skipCheck: options.SkipCheck,
- serverFuncs: make(map[string]func(*Peer, string, bool) (Server, error)),
- clientFuncs: make(map[string]func(*Peer, string, bool) (Client, error)),
- peers: make(map[enode.ID]*Peer),
- delivery: delivery,
- intervalsStore: intervalsStore,
- maxPeerServers: options.MaxPeerServers,
- balance: balance,
- quit: quit,
- syncUpdateDelay: options.SyncUpdateDelay,
- syncMode: options.Syncing,
- }
-
- streamer.setupSpec()
-
- streamer.api = NewAPI(streamer)
- delivery.getPeer = streamer.getPeer
-
- // If syncing is not disabled, the syncing functions are registered (both client and server)
- if options.Syncing != SyncingDisabled {
- RegisterSwarmSyncerServer(streamer, netStore)
- RegisterSwarmSyncerClient(streamer, netStore)
- }
-
- return streamer
-}
-
-// This is an accounted protocol, therefore we need to provide a pricing Hook to the spec
-// For simulations to be able to run multiple nodes and not override the hook's balance,
-// we need to construct a spec instance per node instance
-func (r *Registry) setupSpec() {
- // first create the "bare" spec
- r.createSpec()
- // now create the pricing object
- r.createPriceOracle()
- // if balance is nil, this node has been started without swap support (swapEnabled flag is false)
- if r.balance != nil && !reflect.ValueOf(r.balance).IsNil() {
- // swap is enabled, so setup the hook
- r.spec.Hook = protocols.NewAccounting(r.balance, r.prices)
- }
-}
-
-// RegisterClient registers an incoming streamer constructor
-func (r *Registry) RegisterClientFunc(stream string, f func(*Peer, string, bool) (Client, error)) {
- r.clientMu.Lock()
- defer r.clientMu.Unlock()
-
- r.clientFuncs[stream] = f
-}
-
-// RegisterServer registers an outgoing streamer constructor
-func (r *Registry) RegisterServerFunc(stream string, f func(*Peer, string, bool) (Server, error)) {
- r.serverMu.Lock()
- defer r.serverMu.Unlock()
-
- r.serverFuncs[stream] = f
-}
-
-// GetClient accessor for incoming streamer constructors
-func (r *Registry) GetClientFunc(stream string) (func(*Peer, string, bool) (Client, error), error) {
- r.clientMu.RLock()
- defer r.clientMu.RUnlock()
-
- f := r.clientFuncs[stream]
- if f == nil {
- return nil, fmt.Errorf("stream %v not registered", stream)
- }
- return f, nil
-}
-
-// GetServer accessor for incoming streamer constructors
-func (r *Registry) GetServerFunc(stream string) (func(*Peer, string, bool) (Server, error), error) {
- r.serverMu.RLock()
- defer r.serverMu.RUnlock()
-
- f := r.serverFuncs[stream]
- if f == nil {
- return nil, fmt.Errorf("stream %v not registered", stream)
- }
- return f, nil
-}
-
-func (r *Registry) RequestSubscription(peerId enode.ID, s Stream, h *Range, prio uint8) error {
- // check if the stream is registered
- if _, err := r.GetServerFunc(s.Name); err != nil {
- return err
- }
-
- peer := r.getPeer(peerId)
- if peer == nil {
- return fmt.Errorf("peer not found %v", peerId)
- }
-
- if _, err := peer.getServer(s); err != nil {
- if e, ok := err.(*notFoundError); ok && e.t == "server" {
- // request subscription only if the server for this stream is not created
- log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h)
- return peer.Send(context.TODO(), &RequestSubscriptionMsg{
- Stream: s,
- History: h,
- Priority: prio,
- })
- }
- return err
- }
- log.Trace("RequestSubscription: already subscribed", "peer", peerId, "stream", s, "history", h)
- return nil
-}
-
-// Subscribe initiates the streamer
-func (r *Registry) Subscribe(peerId enode.ID, s Stream, h *Range, priority uint8) error {
- // check if the stream is registered
- if _, err := r.GetClientFunc(s.Name); err != nil {
- return err
- }
-
- peer := r.getPeer(peerId)
- if peer == nil {
- return fmt.Errorf("peer not found %v", peerId)
- }
-
- var to uint64
- if !s.Live && h != nil {
- to = h.To
- }
-
- err := peer.setClientParams(s, newClientParams(priority, to))
- if err != nil {
- return err
- }
- if s.Live && h != nil {
- if err := peer.setClientParams(
- getHistoryStream(s),
- newClientParams(getHistoryPriority(priority), h.To),
- ); err != nil {
- return err
- }
- }
-
- msg := &SubscribeMsg{
- Stream: s,
- History: h,
- Priority: priority,
- }
- log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h)
-
- return peer.Send(context.TODO(), msg)
-}
-
-func (r *Registry) Unsubscribe(peerId enode.ID, s Stream) error {
- peer := r.getPeer(peerId)
- if peer == nil {
- return fmt.Errorf("peer not found %v", peerId)
- }
-
- msg := &UnsubscribeMsg{
- Stream: s,
- }
- log.Debug("Unsubscribe ", "peer", peerId, "stream", s)
-
- if err := peer.Send(context.TODO(), msg); err != nil {
- return err
- }
- return peer.removeClient(s)
-}
-
-// Quit sends the QuitMsg to the peer to remove the
-// stream peer client and terminate the streaming.
-func (r *Registry) Quit(peerId enode.ID, s Stream) error {
- peer := r.getPeer(peerId)
- if peer == nil {
- log.Debug("stream quit: peer not found", "peer", peerId, "stream", s)
- // if the peer is not found, abort the request
- return nil
- }
-
- msg := &QuitMsg{
- Stream: s,
- }
- log.Debug("Quit ", "peer", peerId, "stream", s)
-
- return peer.Send(context.TODO(), msg)
-}
-
-func (r *Registry) Close() error {
- // Stop sending neighborhood depth change and address count
- // change from Kademlia that were initiated in NewRegistry constructor.
- r.delivery.Close()
- close(r.quit)
- return r.intervalsStore.Close()
-}
-
-func (r *Registry) getPeer(peerId enode.ID) *Peer {
- r.peersMu.RLock()
- defer r.peersMu.RUnlock()
-
- return r.peers[peerId]
-}
-
-func (r *Registry) setPeer(peer *Peer) {
- r.peersMu.Lock()
- r.peers[peer.ID()] = peer
- metrics.GetOrRegisterCounter("registry.setpeer", nil).Inc(1)
- metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
- r.peersMu.Unlock()
-}
-
-func (r *Registry) deletePeer(peer *Peer) {
- r.peersMu.Lock()
- delete(r.peers, peer.ID())
- metrics.GetOrRegisterCounter("registry.deletepeer", nil).Inc(1)
- metrics.GetOrRegisterGauge("registry.peers", nil).Update(int64(len(r.peers)))
- r.peersMu.Unlock()
-}
-
-func (r *Registry) peersCount() (c int) {
- r.peersMu.Lock()
- c = len(r.peers)
- r.peersMu.Unlock()
- return
-}
-
-// Run protocol run function
-func (r *Registry) Run(p *network.BzzPeer) error {
- sp := NewPeer(p, r)
- r.setPeer(sp)
-
- if r.syncMode == SyncingAutoSubscribe {
- go sp.runUpdateSyncing()
- }
-
- defer r.deletePeer(sp)
- defer close(sp.quit)
- defer sp.close()
-
- return sp.Run(sp.HandleMsg)
-}
-
-// doRequestSubscription sends the actual RequestSubscription to the peer
-func doRequestSubscription(r *Registry, id enode.ID, bin uint8) error {
- log.Debug("Requesting subscription by registry:", "registry", r.addr, "peer", id, "bin", bin)
- // bin is always less then 256 and it is safe to convert it to type uint8
- stream := NewStream("SYNC", FormatSyncBinKey(bin), true)
- err := r.RequestSubscription(id, stream, NewRange(0, 0), High)
- if err != nil {
- log.Debug("Request subscription", "err", err, "peer", id, "stream", stream)
- return err
- }
- return nil
-}
-
-func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
- peer := protocols.NewPeer(p, rw, r.spec)
- bp := network.NewBzzPeer(peer)
- np := network.NewPeer(bp, r.delivery.kad)
- r.delivery.kad.On(np)
- defer r.delivery.kad.Off(np)
- return r.Run(bp)
-}
-
-// HandleMsg is the message handler that delegates incoming messages
-func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
- select {
- case <-p.streamer.quit:
- log.Trace("message received after the streamer is closed", "peer", p.ID())
- // return without an error since streamer is closed and
- // no messages should be handled as other subcomponents like
- // storage leveldb may be closed
- return nil
- default:
- }
-
- switch msg := msg.(type) {
-
- case *SubscribeMsg:
- return p.handleSubscribeMsg(ctx, msg)
-
- case *SubscribeErrorMsg:
- return p.handleSubscribeErrorMsg(msg)
-
- case *UnsubscribeMsg:
- return p.handleUnsubscribeMsg(msg)
-
- case *OfferedHashesMsg:
- go func() {
- err := p.handleOfferedHashesMsg(ctx, msg)
- if err != nil {
- log.Error(err.Error())
- p.Drop()
- }
- }()
- return nil
-
- case *TakeoverProofMsg:
- go func() {
- err := p.handleTakeoverProofMsg(ctx, msg)
- if err != nil {
- log.Error(err.Error())
- p.Drop()
- }
- }()
- return nil
-
- case *WantedHashesMsg:
- go func() {
- err := p.handleWantedHashesMsg(ctx, msg)
- if err != nil {
- log.Error(err.Error())
- p.Drop()
- }
- }()
- return nil
-
- case *ChunkDeliveryMsgRetrieval:
- // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
- go func() {
- err := p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, msg)
- if err != nil {
- log.Error(err.Error())
- p.Drop()
- }
- }()
- return nil
-
- case *ChunkDeliveryMsgSyncing:
- // handling chunk delivery is the same for retrieval and syncing, so let's cast the msg
- go func() {
- err := p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, msg)
- if err != nil {
- log.Error(err.Error())
- p.Drop()
- }
- }()
- return nil
-
- case *RetrieveRequestMsg:
- go func() {
- err := p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg)
- if err != nil {
- log.Error(err.Error())
- p.Drop()
- }
- }()
- return nil
-
- case *RequestSubscriptionMsg:
- return p.handleRequestSubscription(ctx, msg)
-
- case *QuitMsg:
- return p.handleQuitMsg(msg)
-
- default:
- return fmt.Errorf("unknown message type: %T", msg)
- }
-}
-
-type server struct {
- Server
- stream Stream
- priority uint8
- currentBatch []byte
- sessionIndex uint64
-}
-
-// setNextBatch adjusts passed interval based on session index and whether
-// stream is live or history. It calls Server SetNextBatch with adjusted
-// interval and returns batch hashes and their interval.
-func (s *server) setNextBatch(from, to uint64) ([]byte, uint64, uint64, error) {
- if s.stream.Live {
- if from == 0 {
- from = s.sessionIndex
- }
- if to <= from || from >= s.sessionIndex {
- to = math.MaxUint64
- }
- } else {
- if (to < from && to != 0) || from > s.sessionIndex {
- return nil, 0, 0, nil
- }
- if to == 0 || to > s.sessionIndex {
- to = s.sessionIndex
- }
- }
- return s.SetNextBatch(from, to)
-}
-
-// Server interface for outgoing peer Streamer
-type Server interface {
- // SessionIndex is called when a server is initialized
- // to get the current cursor state of the stream data.
- // Based on this index, live and history stream intervals
- // will be adjusted before calling SetNextBatch.
- SessionIndex() (uint64, error)
- SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, err error)
- GetData(context.Context, []byte) ([]byte, error)
- Close()
-}
-
-type client struct {
- Client
- stream Stream
- priority uint8
- sessionAt uint64
- to uint64
- next chan error
- quit chan struct{}
-
- intervalsKey string
- intervalsStore state.Store
-}
-
-func peerStreamIntervalsKey(p *Peer, s Stream) string {
- return p.ID().String() + s.String()
-}
-
-func (c *client) AddInterval(start, end uint64) (err error) {
- i := &intervals.Intervals{}
- if err = c.intervalsStore.Get(c.intervalsKey, i); err != nil {
- return err
- }
- i.Add(start, end)
- return c.intervalsStore.Put(c.intervalsKey, i)
-}
-
-func (c *client) NextInterval() (start, end uint64, err error) {
- i := &intervals.Intervals{}
- err = c.intervalsStore.Get(c.intervalsKey, i)
- if err != nil {
- return 0, 0, err
- }
- start, end = i.Next()
- return start, end, nil
-}
-
-// Client interface for incoming peer Streamer
-type Client interface {
- NeedData(context.Context, []byte) (bool, func(context.Context) error)
- Close()
-}
-
-func (c *client) nextBatch(from uint64) (nextFrom uint64, nextTo uint64) {
- if c.to > 0 && from >= c.to {
- return 0, 0
- }
- if c.stream.Live {
- return from, 0
- } else if from >= c.sessionAt {
- if c.to > 0 {
- return from, c.to
- }
- return from, math.MaxUint64
- }
- nextFrom, nextTo, err := c.NextInterval()
- if err != nil {
- log.Error("next intervals", "stream", c.stream)
- return
- }
- if nextTo > c.to {
- nextTo = c.to
- }
- if nextTo == 0 {
- nextTo = c.sessionAt
- }
- return
-}
-
-func (c *client) close() {
- select {
- case <-c.quit:
- default:
- close(c.quit)
- }
- c.Close()
-}
-
-// clientParams store parameters for the new client
-// between a subscription and initial offered hashes request handling.
-type clientParams struct {
- priority uint8
- to uint64
- // signal when the client is created
- clientCreatedC chan struct{}
-}
-
-func newClientParams(priority uint8, to uint64) *clientParams {
- return &clientParams{
- priority: priority,
- to: to,
- clientCreatedC: make(chan struct{}),
- }
-}
-
-func (c *clientParams) waitClient(ctx context.Context) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-c.clientCreatedC:
- return nil
- }
-}
-
-func (c *clientParams) clientCreated() {
- close(c.clientCreatedC)
-}
-
-// GetSpec returns the streamer spec to callers
-// This used to be a global variable but for simulations with
-// multiple nodes its fields (notably the Hook) would be overwritten
-func (r *Registry) GetSpec() *protocols.Spec {
- return r.spec
-}
-
-func (r *Registry) createSpec() {
- // Spec is the spec of the streamer protocol
- var spec = &protocols.Spec{
- Name: "stream",
- Version: 10,
- MaxMsgSize: 10 * 1024 * 1024,
- Messages: []interface{}{
- UnsubscribeMsg{},
- OfferedHashesMsg{},
- WantedHashesMsg{},
- TakeoverProofMsg{},
- SubscribeMsg{},
- RetrieveRequestMsg{},
- ChunkDeliveryMsgRetrieval{},
- SubscribeErrorMsg{},
- RequestSubscriptionMsg{},
- QuitMsg{},
- ChunkDeliveryMsgSyncing{},
- },
- }
- r.spec = spec
-}
-
-// An accountable message needs some meta information attached to it
-// in order to evaluate the correct price
-type StreamerPrices struct {
- priceMatrix map[reflect.Type]*protocols.Price
- registry *Registry
-}
-
-// Price implements the accounting interface and returns the price for a specific message
-func (sp *StreamerPrices) Price(msg interface{}) *protocols.Price {
- t := reflect.TypeOf(msg).Elem()
- return sp.priceMatrix[t]
-}
-
-// Instead of hardcoding the price, get it
-// through a function - it could be quite complex in the future
-func (sp *StreamerPrices) getRetrieveRequestMsgPrice() uint64 {
- return swap.RetrieveRequestPrice
-}
-
-// Instead of hardcoding the price, get it
-// through a function - it could be quite complex in the future
-func (sp *StreamerPrices) getChunkDeliveryMsgPrice() uint64 {
- return swap.ChunkDeliveryPrice
-}
-
-// createPriceOracle sets up a matrix which can be queried to get
-// the price for a message via the Price method
-func (r *Registry) createPriceOracle() {
- sp := &StreamerPrices{
- registry: r,
- }
- sp.priceMatrix = map[reflect.Type]*protocols.Price{
- reflect.TypeOf(ChunkDeliveryMsgRetrieval{}): {
- Value: sp.getChunkDeliveryMsgPrice(), // arbitrary price for now
- PerByte: true,
- Payer: protocols.Receiver,
- },
- reflect.TypeOf(RetrieveRequestMsg{}): {
- Value: sp.getRetrieveRequestMsgPrice(), // arbitrary price for now
- PerByte: false,
- Payer: protocols.Sender,
- },
- }
- r.prices = sp
-}
-
-func (r *Registry) Protocols() []p2p.Protocol {
- return []p2p.Protocol{
- {
- Name: r.spec.Name,
- Version: r.spec.Version,
- Length: r.spec.Length(),
- Run: r.runProtocol,
- },
- }
-}
-
-func (r *Registry) APIs() []rpc.API {
- return []rpc.API{
- {
- Namespace: "stream",
- Version: "3.0",
- Service: r.api,
- Public: false,
- },
- }
-}
-
-func (r *Registry) Start(server *p2p.Server) error {
- log.Info("Streamer started")
- return nil
-}
-
-func (r *Registry) Stop() error {
- return nil
-}
-
-type Range struct {
- From, To uint64
-}
-
-func NewRange(from, to uint64) *Range {
- return &Range{
- From: from,
- To: to,
- }
-}
-
-func (r *Range) String() string {
- return fmt.Sprintf("%v-%v", r.From, r.To)
-}
-
-func getHistoryPriority(priority uint8) uint8 {
- if priority == 0 {
- return 0
- }
- return priority - 1
-}
-
-func getHistoryStream(s Stream) Stream {
- return NewStream(s.Name, s.Key, false)
-}
-
-type API struct {
- streamer *Registry
-}
-
-func NewAPI(r *Registry) *API {
- return &API{
- streamer: r,
- }
-}
-
-func (api *API) SubscribeStream(peerId enode.ID, s Stream, history *Range, priority uint8) error {
- return api.streamer.Subscribe(peerId, s, history, priority)
-}
-
-func (api *API) UnsubscribeStream(peerId enode.ID, s Stream) error {
- return api.streamer.Unsubscribe(peerId, s)
-}
-
-/*
-GetPeerServerSubscriptions is a API function which allows to query a peer for stream subscriptions it has.
-It can be called via RPC.
-It returns a map of node IDs with an array of string representations of Stream objects.
-*/
-func (api *API) GetPeerServerSubscriptions() map[string][]string {
- pstreams := make(map[string][]string)
-
- api.streamer.peersMu.RLock()
- defer api.streamer.peersMu.RUnlock()
-
- for id, p := range api.streamer.peers {
- var streams []string
- //every peer has a map of stream servers
- //every stream server represents a subscription
- p.serverMu.RLock()
- for s := range p.servers {
- //append the string representation of the stream
- //to the list for this peer
- streams = append(streams, s.String())
- }
- p.serverMu.RUnlock()
- //set the array of stream servers to the map
- pstreams[id.String()] = streams
- }
- return pstreams
-}
diff --git a/network/stream/streamer_test.go b/network/stream/streamer_test.go
deleted file mode 100644
index 50e7e74d7a..0000000000
--- a/network/stream/streamer_test.go
+++ /dev/null
@@ -1,1157 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "os"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
- "github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/network/simulation"
- p2ptest "github.com/ethersphere/swarm/p2p/testing"
- "github.com/ethersphere/swarm/state"
- "github.com/ethersphere/swarm/testutil"
- "golang.org/x/crypto/sha3"
-)
-
-func TestStreamerSubscribe(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- stream := NewStream("foo", "", true)
- err = streamer.Subscribe(tester.Nodes[0].ID(), stream, NewRange(0, 0), Top)
- if err == nil || err.Error() != "stream foo not registered" {
- t.Fatalf("Expected error %v, got %v", "stream foo not registered", err)
- }
-}
-
-func TestStreamerRequestSubscription(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- stream := NewStream("foo", "", false)
- err = streamer.RequestSubscription(tester.Nodes[0].ID(), stream, &Range{}, Top)
- if err == nil || err.Error() != "stream foo not registered" {
- t.Fatalf("Expected error %v, got %v", "stream foo not registered", err)
- }
-}
-
-var (
- hash0 = sha3.Sum256([]byte{0})
- hash1 = sha3.Sum256([]byte{1})
- hash2 = sha3.Sum256([]byte{2})
- hashesTmp = append(hash0[:], hash1[:]...)
- hashes = append(hashesTmp, hash2[:]...)
- corruptHashes = append(hashes[:40])
-)
-
-type testClient struct {
- t string
- wait0 chan bool
- wait2 chan bool
- receivedHashes map[string][]byte
-}
-
-func newTestClient(t string) *testClient {
- return &testClient{
- t: t,
- wait0: make(chan bool),
- wait2: make(chan bool),
- receivedHashes: make(map[string][]byte),
- }
-}
-
-func (self *testClient) NeedData(ctx context.Context, hash []byte) (bool, func(context.Context) error) {
- self.receivedHashes[string(hash)] = hash
- if bytes.Equal(hash, hash0[:]) {
- return false, func(context.Context) error {
- <-self.wait0
- return nil
- }
- } else if bytes.Equal(hash, hash2[:]) {
- return false, func(context.Context) error {
- <-self.wait2
- return nil
- }
- }
- return false, nil
-}
-
-func (self *testClient) Close() {}
-
-type testServer struct {
- t string
- sessionIndex uint64
-}
-
-func newTestServer(t string, sessionIndex uint64) *testServer {
- return &testServer{
- t: t,
- sessionIndex: sessionIndex,
- }
-}
-
-func (s *testServer) SessionIndex() (uint64, error) {
- return s.sessionIndex, nil
-}
-
-func (self *testServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, error) {
- return make([]byte, HashSize), from + 1, to + 1, nil
-}
-
-func (self *testServer) GetData(context.Context, []byte) ([]byte, error) {
- return nil, nil
-}
-
-func (self *testServer) Close() {
-}
-
-func TestStreamerDownstreamSubscribeUnsubscribeMsgExchange(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
- return newTestClient(t), nil
- })
-
- node := tester.Nodes[0]
-
- stream := NewStream("foo", "", true)
- err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- err = tester.TestExchanges(
- p2ptest.Exchange{
- Label: "Subscribe message",
- Expects: []p2ptest.Expect{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- },
- // trigger OfferedHashesMsg to actually create the client
- p2ptest.Exchange{
- Label: "OfferedHashes message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Hashes: hashes,
- From: 5,
- To: 8,
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 2,
- Msg: &WantedHashesMsg{
- Stream: stream,
- Want: []byte{5},
- From: 9,
- To: 0,
- },
- Peer: node.ID(),
- },
- },
- },
- )
- if err != nil {
- t.Fatal(err)
- }
-
- err = streamer.Unsubscribe(node.ID(), stream)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Unsubscribe message",
- Expects: []p2ptest.Expect{
- {
- Code: 0,
- Msg: &UnsubscribeMsg{
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestStreamerUpstreamSubscribeUnsubscribeMsgExchange(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- stream := NewStream("foo", "", false)
-
- streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
- return newTestServer(t, 10), nil
- })
-
- node := tester.Nodes[0]
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Stream: stream,
- Hashes: make([]byte, HashSize),
- From: 6,
- To: 9,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "unsubscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 0,
- Msg: &UnsubscribeMsg{
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestStreamerUpstreamSubscribeUnsubscribeMsgExchangeLive(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- stream := NewStream("foo", "", true)
-
- streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
- return newTestServer(t, 0), nil
- })
-
- node := tester.Nodes[0]
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Stream: stream,
- Hashes: make([]byte, HashSize),
- From: 1,
- To: 0,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "unsubscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 0,
- Msg: &UnsubscribeMsg{
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestStreamerUpstreamSubscribeErrorMsgExchange(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
- return newTestServer(t, 0), nil
- })
-
- stream := NewStream("bar", "", true)
-
- node := tester.Nodes[0]
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 7,
- Msg: &SubscribeErrorMsg{
- Error: "stream bar not registered",
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestStreamerUpstreamSubscribeLiveAndHistory(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- stream := NewStream("foo", "", true)
-
- streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
- return newTestServer(t, 10), nil
- })
-
- node := tester.Nodes[0]
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Stream: NewStream("foo", "", false),
- Hashes: make([]byte, HashSize),
- From: 6,
- To: 9,
- },
- Peer: node.ID(),
- },
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Stream: stream,
- From: 11,
- To: 0,
- Hashes: make([]byte, HashSize),
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestStreamerDownstreamCorruptHashesMsgExchange(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- stream := NewStream("foo", "", true)
-
- var tc *testClient
-
- streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
- tc = newTestClient(t)
- return tc, nil
- })
-
- node := tester.Nodes[0]
-
- err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Expects: []p2ptest.Expect{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- },
- p2ptest.Exchange{
- Label: "Corrupt offered hash message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Hashes: corruptHashes,
- From: 5,
- To: 8,
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- })
- if err != nil {
- t.Fatal(err)
- }
-
- expectedError := errors.New("subprotocol error")
- if err := tester.TestDisconnected(&p2ptest.Disconnect{Peer: node.ID(), Error: expectedError}); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestStreamerDownstreamOfferedHashesMsgExchange(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- stream := NewStream("foo", "", true)
-
- var tc *testClient
-
- streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
- tc = newTestClient(t)
- return tc, nil
- })
-
- node := tester.Nodes[0]
-
- err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Expects: []p2ptest.Expect{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- },
- p2ptest.Exchange{
- Label: "WantedHashes message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Hashes: hashes,
- From: 5,
- To: 8,
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 2,
- Msg: &WantedHashesMsg{
- Stream: stream,
- Want: []byte{5},
- From: 9,
- To: 0,
- },
- Peer: node.ID(),
- },
- },
- })
- if err != nil {
- t.Fatal(err)
- }
-
- if len(tc.receivedHashes) != 3 {
- t.Fatalf("Expected number of received hashes %v, got %v", 3, len(tc.receivedHashes))
- }
-
- close(tc.wait0)
-
- time.Sleep(100 * time.Millisecond)
-
- close(tc.wait2)
-}
-
-func TestStreamerRequestSubscriptionQuitMsgExchange(t *testing.T) {
- tester, streamer, _, teardown, err := newStreamerTester(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
- return newTestServer(t, 10), nil
- })
-
- node := tester.Nodes[0]
-
- stream := NewStream("foo", "", true)
- err = streamer.RequestSubscription(node.ID(), stream, NewRange(5, 8), Top)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- err = tester.TestExchanges(
- p2ptest.Exchange{
- Label: "RequestSubscription message",
- Expects: []p2ptest.Expect{
- {
- Code: 8,
- Msg: &RequestSubscriptionMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- },
- p2ptest.Exchange{
- Label: "Subscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- History: NewRange(5, 8),
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Stream: NewStream("foo", "", false),
- Hashes: make([]byte, HashSize),
- From: 6,
- To: 9,
- },
- Peer: node.ID(),
- },
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Stream: stream,
- From: 11,
- To: 0,
- Hashes: make([]byte, HashSize),
- },
- Peer: node.ID(),
- },
- },
- },
- )
- if err != nil {
- t.Fatal(err)
- }
-
- err = streamer.Quit(node.ID(), stream)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Quit message",
- Expects: []p2ptest.Expect{
- {
- Code: 9,
- Msg: &QuitMsg{
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-
- historyStream := getHistoryStream(stream)
-
- err = streamer.Quit(node.ID(), historyStream)
- if err != nil {
- t.Fatalf("Expected no error, got %v", err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Quit message",
- Expects: []p2ptest.Expect{
- {
- Code: 9,
- Msg: &QuitMsg{
- Stream: historyStream,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// TestMaxPeerServersWithUnsubscribe creates a registry with a limited
-// number of stream servers, and performs a test with subscriptions and
-// unsubscriptions, checking if unsubscriptions will remove streams,
-// leaving place for new streams.
-func TestMaxPeerServersWithUnsubscribe(t *testing.T) {
- var maxPeerServers = 6
- tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
- Syncing: SyncingDisabled,
- MaxPeerServers: maxPeerServers,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
- return newTestServer(t, 0), nil
- })
-
- node := tester.Nodes[0]
-
- for i := 0; i < maxPeerServers+10; i++ {
- stream := NewStream("foo", strconv.Itoa(i), true)
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Stream: stream,
- Hashes: make([]byte, HashSize),
- From: 1,
- To: 0,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "unsubscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 0,
- Msg: &UnsubscribeMsg{
- Stream: stream,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-// TestMaxPeerServersWithoutUnsubscribe creates a registry with a limited
-// number of stream servers, and performs subscriptions to detect subscriptions
-// error message exchange.
-func TestMaxPeerServersWithoutUnsubscribe(t *testing.T) {
- var maxPeerServers = 6
- tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
- MaxPeerServers: maxPeerServers,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
- return newTestServer(t, 0), nil
- })
-
- node := tester.Nodes[0]
-
- for i := 0; i < maxPeerServers+10; i++ {
- stream := NewStream("foo", strconv.Itoa(i), true)
-
- if i >= maxPeerServers {
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 7,
- Msg: &SubscribeErrorMsg{
- Error: ErrMaxPeerServers.Error(),
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
- continue
- }
-
- err = tester.TestExchanges(p2ptest.Exchange{
- Label: "Subscribe message",
- Triggers: []p2ptest.Trigger{
- {
- Code: 4,
- Msg: &SubscribeMsg{
- Stream: stream,
- Priority: Top,
- },
- Peer: node.ID(),
- },
- },
- Expects: []p2ptest.Expect{
- {
- Code: 1,
- Msg: &OfferedHashesMsg{
- Stream: stream,
- Hashes: make([]byte, HashSize),
- From: 1,
- To: 0,
- },
- Peer: node.ID(),
- },
- },
- })
-
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-//TestHasPriceImplementation is to check that the Registry has a
-//`Price` interface implementation
-func TestHasPriceImplementation(t *testing.T) {
- _, r, _, teardown, err := newStreamerTester(&RegistryOptions{
- Syncing: SyncingDisabled,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer teardown()
-
- if r.prices == nil {
- t.Fatal("No prices implementation available for the stream protocol")
- }
-
- pricesInstance, ok := r.prices.(*StreamerPrices)
- if !ok {
- t.Fatal("`Registry` does not have the expected Prices instance")
- }
- price := pricesInstance.Price(&ChunkDeliveryMsgRetrieval{})
- if price == nil || price.Value == 0 || price.Value != pricesInstance.getChunkDeliveryMsgPrice() {
- t.Fatal("No prices set for chunk delivery msg")
- }
-
- price = pricesInstance.Price(&RetrieveRequestMsg{})
- if price == nil || price.Value == 0 || price.Value != pricesInstance.getRetrieveRequestMsgPrice() {
- t.Fatal("No prices set for chunk delivery msg")
- }
-}
-
-// TestGetServerSubscriptions is a unit test for the api.GetPeerServerSubscriptions() function
-func TestGetServerSubscriptions(t *testing.T) {
- // create an amount of dummy peers
- testPeerCount := 8
- // every peer will have this amount of dummy servers
- testServerCount := 4
- // the peerMap which will store this data for the registry
- peerMap := make(map[enode.ID]*Peer)
- // create the registry
- r := &Registry{}
- api := NewAPI(r)
- // call once, at this point should be empty
- regs := api.GetPeerServerSubscriptions()
- if len(regs) != 0 {
- t.Fatal("Expected subscription count to be 0, but it is not")
- }
-
- // now create a number of dummy servers for each node
- for i := 0; i < testPeerCount; i++ {
- addr := network.RandomAddr()
- id := addr.ID()
- p := &Peer{}
- p.servers = make(map[Stream]*server)
- for k := 0; k < testServerCount; k++ {
- s := Stream{
- Name: strconv.Itoa(k),
- Key: "",
- Live: false,
- }
- p.servers[s] = &server{}
- }
- peerMap[id] = p
- }
- r.peers = peerMap
-
- // call the subscriptions again
- regs = api.GetPeerServerSubscriptions()
- // count how many (fake) subscriptions there are
- cnt := 0
- for _, reg := range regs {
- for range reg {
- cnt++
- }
- }
- // check expected value
- expectedCount := testPeerCount * testServerCount
- if cnt != expectedCount {
- t.Fatalf("Expected %d subscriptions, but got %d", expectedCount, cnt)
- }
-}
-
-/*
-TestGetServerSubscriptionsRPC sets up a simulation network of `nodeCount` nodes,
-starts the simulation, waits for SyncUpdateDelay in order to kick off
-stream registration, then tests that there are subscriptions.
-*/
-func TestGetServerSubscriptionsRPC(t *testing.T) {
-
- if testutil.RaceEnabled && os.Getenv("TRAVIS") == "true" {
- t.Skip("flaky with -race on Travis")
- // Note: related ticket https://github.com/ethersphere/go-ethereum/issues/1234
- }
-
- // arbitrarily set to 4
- nodeCount := 4
- // set the syncUpdateDelay for sync registrations to start
- syncUpdateDelay := 200 * time.Millisecond
- // run with more nodes if `longrunning` flag is set
- if *testutil.Longrunning {
- nodeCount = 64
- syncUpdateDelay = 10 * time.Second
- }
- // holds the msg code for SubscribeMsg
- var subscribeMsgCode uint64
- var ok bool
- var expectedMsgCount counter
-
- // this channel signalizes that the expected amount of subscriptiosn is done
- allSubscriptionsDone := make(chan struct{})
- // after the test, we need to reset the subscriptionFunc to the default
- defer func() { subscriptionFunc = doRequestSubscription }()
-
- // we use this subscriptionFunc for this test: just increases count and calls the actual subscription
- subscriptionFunc = func(r *Registry, id enode.ID, bin uint8) error {
- // syncing starts after syncUpdateDelay and loops after that Duration; we only want to count at the first iteration
- // in the first iteration, subs will be empty (no existing subscriptions), thus we can use this check
- // this avoids flakyness
- expectedMsgCount.inc()
- doRequestSubscription(r, id, bin)
- return nil
- }
- // create a standard sim
- sim := simulation.NewInProc(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- addr, netStore, delivery, clean, err := newNetStoreAndDeliveryWithRequestFunc(ctx, bucket, dummyRequestFromPeers)
- if err != nil {
- return nil, nil, err
- }
-
- // configure so that sync registrations actually happen
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Syncing: SyncingAutoSubscribe, //enable sync registrations
- SyncUpdateDelay: syncUpdateDelay,
- }, nil)
-
- // get the SubscribeMsg code
- subscribeMsgCode, ok = r.GetSpec().GetCode(SubscribeMsg{})
- if !ok {
- t.Fatal("Message code for SubscribeMsg not found")
- }
-
- cleanup = func() {
- r.Close()
- clean()
- }
-
- return r, cleanup, nil
- },
- })
- defer sim.Close()
-
- ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute)
- defer cancelSimRun()
-
- // setup the filter for SubscribeMsg
- msgs := sim.PeerEvents(
- context.Background(),
- sim.UpNodeIDs(),
- simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("stream").MsgCode(subscribeMsgCode),
- )
-
- ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
- defer cancel()
- filename := fmt.Sprintf("testing/snapshot_%d.json", nodeCount)
- if err := sim.UploadSnapshot(ctx, filename); err != nil {
- t.Fatal(err)
- }
-
- // strategy: listen to all SubscribeMsg events; after every event we wait
- // if after `waitDuration` no more messages are being received, we assume the
- // subscription phase has terminated!
-
- // the loop in this go routine will either wait for new message events
- // or times out after 1 second, which signals that we are not receiving
- // any new subscriptions any more
- go func() {
- //for long running sims, waiting 1 sec will not be enough
- waitDuration := 1 * time.Second
- if *testutil.Longrunning {
- waitDuration = 3 * time.Second
- }
- for {
- select {
- case <-ctx.Done():
- return
- case m := <-msgs: // just reset the loop
- if m.Error != nil {
- log.Error("stream message", "err", m.Error)
- continue
- }
- log.Trace("stream message", "node", m.NodeID, "peer", m.PeerID)
- case <-time.After(waitDuration):
- // one second passed, don't assume more subscriptions
- allSubscriptionsDone <- struct{}{}
- log.Info("All subscriptions received")
- return
-
- }
- }
- }()
-
- //run the simulation
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- log.Info("Simulation running")
- nodes := sim.Net.Nodes
-
- //wait until all subscriptions are done
- select {
- case <-allSubscriptionsDone:
- case <-ctx.Done():
- return errors.New("Context timed out")
- }
-
- log.Debug("Expected message count: ", "expectedMsgCount", expectedMsgCount.count())
- //now iterate again, this time we call each node via RPC to get its subscriptions
- realCount := 0
- for _, node := range nodes {
- //create rpc client
- client, err := node.Client()
- if err != nil {
- return fmt.Errorf("create node 1 rpc client fail: %v", err)
- }
-
- //ask it for subscriptions
- pstreams := make(map[string][]string)
- err = client.Call(&pstreams, "stream_getPeerServerSubscriptions")
- if err != nil {
- return fmt.Errorf("client call stream_getPeerSubscriptions: %v", err)
- }
- //length of the subscriptions can not be smaller than number of peers
- log.Debug("node subscriptions", "node", node.String())
- for p, ps := range pstreams {
- log.Debug("... with", "peer", p)
- for _, s := range ps {
- log.Debug(".......", "stream", s)
- // each node also has subscriptions to RETRIEVE_REQUEST streams,
- // we need to ignore those, we are only counting SYNC streams
- if !strings.HasPrefix(s, "RETRIEVE_REQUEST") {
- realCount++
- }
- }
- }
- log.Debug("All node streams counted", "realCount", realCount)
- }
- emc := expectedMsgCount.count()
- // after a subscription request, internally a live AND a history stream will be subscribed,
- // thus the real count should be half of the actual request subscriptions sent
- if realCount/2 != emc {
- return fmt.Errorf("Real subscriptions and expected amount don't match; real: %d, expected: %d", realCount/2, emc)
- }
- return nil
- })
- if result.Error != nil {
- t.Fatal(result.Error)
- }
-}
-
-// counter is used to concurrently increment
-// and read an integer value.
-type counter struct {
- v int
- mu sync.RWMutex
-}
-
-// Increment the counter.
-func (c *counter) inc() {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- c.v++
-}
-
-// Read the counter value.
-func (c *counter) count() int {
- c.mu.RLock()
- defer c.mu.RUnlock()
-
- return c.v
-}
diff --git a/network/stream/syncer.go b/network/stream/syncer.go
deleted file mode 100644
index 6fc1202ad3..0000000000
--- a/network/stream/syncer.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "context"
- "fmt"
- "strconv"
- "time"
-
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/log"
- "github.com/ethersphere/swarm/network/timeouts"
- "github.com/ethersphere/swarm/storage"
-)
-
-const (
- BatchSize = 128
-)
-
-// SwarmSyncerServer implements an Server for history syncing on bins
-// offered streams:
-// * live request delivery with or without checkback
-// * (live/non-live historical) chunk syncing per proximity bin
-type SwarmSyncerServer struct {
- correlateId string //used for logging
- po uint8
- netStore *storage.NetStore
- quit chan struct{}
-}
-
-// NewSwarmSyncerServer is constructor for SwarmSyncerServer
-func NewSwarmSyncerServer(po uint8, netStore *storage.NetStore, correlateId string) (*SwarmSyncerServer, error) {
- return &SwarmSyncerServer{
- correlateId: correlateId,
- po: po,
- netStore: netStore,
- quit: make(chan struct{}),
- }, nil
-}
-
-func RegisterSwarmSyncerServer(streamer *Registry, netStore *storage.NetStore) {
- streamer.RegisterServerFunc("SYNC", func(p *Peer, t string, _ bool) (Server, error) {
- po, err := ParseSyncBinKey(t)
- if err != nil {
- return nil, err
- }
- return NewSwarmSyncerServer(po, netStore, fmt.Sprintf("%s|%d", p.ID(), po))
- })
- // streamer.RegisterServerFunc(stream, func(p *Peer) (Server, error) {
- // return NewOutgoingProvableSwarmSyncer(po, db)
- // })
-}
-
-// Close needs to be called on a stream server
-func (s *SwarmSyncerServer) Close() {
- close(s.quit)
-}
-
-// GetData retrieves the actual chunk from netstore
-func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) {
- ch, err := s.netStore.Store.Get(ctx, chunk.ModeGetSync, storage.Address(key))
- if err != nil {
- return nil, err
- }
- return ch.Data(), nil
-}
-
-// SessionIndex returns current storage bin (po) index.
-func (s *SwarmSyncerServer) SessionIndex() (uint64, error) {
- return s.netStore.LastPullSubscriptionBinID(s.po)
-}
-
-// SetNextBatch retrieves the next batch of hashes from the localstore.
-// It expects a range of bin IDs, both ends inclusive in syncing, and returns
-// concatenated byte slice of chunk addresses and bin IDs of the first and
-// the last one in that slice. The batch may have up to BatchSize number of
-// chunk addresses. If at least one chunk is added to the batch and no new chunks
-// are added in batchTimeout period, the batch will be returned. This function
-// will block until new chunks are received from localstore pull subscription.
-func (s *SwarmSyncerServer) SetNextBatch(from, to uint64) ([]byte, uint64, uint64, error) {
- batchStart := time.Now()
- descriptors, stop := s.netStore.SubscribePull(context.Background(), s.po, from, to)
- defer stop()
-
- const batchTimeout = 2 * time.Second
-
- var (
- batch []byte
- batchSize int
- batchStartID *uint64
- batchEndID uint64
- timer *time.Timer
- timerC <-chan time.Time
- )
-
- defer func(start time.Time) {
- metrics.GetOrRegisterResettingTimer("syncer.set-next-batch.total-time", nil).UpdateSince(start)
- metrics.GetOrRegisterCounter("syncer.set-next-batch.batch-size", nil).Inc(int64(batchSize))
- if timer != nil {
- timer.Stop()
- }
- }(batchStart)
-
- for iterate := true; iterate; {
- select {
- case d, ok := <-descriptors:
- if !ok {
- iterate = false
- break
- }
- batch = append(batch, d.Address[:]...)
- // This is the most naive approach to label the chunk as synced
- // allowing it to be garbage collected. A proper way requires
- // validating that the chunk is successfully stored by the peer.
- err := s.netStore.Set(context.Background(), chunk.ModeSetSync, d.Address)
- if err != nil {
- metrics.GetOrRegisterCounter("syncer.set-next-batch.set-sync-err", nil).Inc(1)
- log.Debug("syncer pull subscription - err setting chunk as synced", "correlateId", s.correlateId, "err", err)
- return nil, 0, 0, err
- }
- batchSize++
- if batchStartID == nil {
- // set batch start id only if
- // this is the first iteration
- batchStartID = &d.BinID
- }
- batchEndID = d.BinID
- if batchSize >= BatchSize {
- iterate = false
- metrics.GetOrRegisterCounter("syncer.set-next-batch.full-batch", nil).Inc(1)
- log.Trace("syncer pull subscription - batch size reached", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
- }
- if timer == nil {
- timer = time.NewTimer(batchTimeout)
- } else {
- log.Trace("syncer pull subscription - stopping timer", "correlateId", s.correlateId)
- if !timer.Stop() {
- <-timer.C
- }
- log.Trace("syncer pull subscription - channel drained, resetting timer", "correlateId", s.correlateId)
- timer.Reset(batchTimeout)
- }
- timerC = timer.C
- case <-timerC:
- // return batch if new chunks are not
- // received after some time
- iterate = false
- metrics.GetOrRegisterCounter("syncer.set-next-batch.timer-expire", nil).Inc(1)
- log.Trace("syncer pull subscription timer expired", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
- case <-s.quit:
- iterate = false
- log.Trace("syncer pull subscription - quit received", "correlateId", s.correlateId, "batchSize", batchSize, "batchStartID", batchStartID, "batchEndID", batchEndID)
- }
- }
- if batchStartID == nil {
- // if batch start id is not set, return 0
- batchStartID = new(uint64)
- }
- return batch, *batchStartID, batchEndID, nil
-}
-
-// SwarmSyncerClient
-type SwarmSyncerClient struct {
- netStore *storage.NetStore
- peer *Peer
- stream Stream
-}
-
-// NewSwarmSyncerClient is a contructor for provable data exchange syncer
-func NewSwarmSyncerClient(p *Peer, netStore *storage.NetStore, stream Stream) (*SwarmSyncerClient, error) {
- return &SwarmSyncerClient{
- netStore: netStore,
- peer: p,
- stream: stream,
- }, nil
-}
-
-// RegisterSwarmSyncerClient registers the client constructor function for
-// to handle incoming sync streams
-func RegisterSwarmSyncerClient(streamer *Registry, netStore *storage.NetStore) {
- streamer.RegisterClientFunc("SYNC", func(p *Peer, t string, live bool) (Client, error) {
- return NewSwarmSyncerClient(p, netStore, NewStream("SYNC", t, live))
- })
-}
-
-func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (loaded bool, wait func(context.Context) error) {
- start := time.Now()
-
- fi, loaded, ok := s.netStore.GetOrCreateFetcher(ctx, key, "syncer")
- if !ok {
- return loaded, nil
- }
-
- return loaded, func(ctx context.Context) error {
- select {
- case <-fi.Delivered:
- metrics.GetOrRegisterResettingTimer(fmt.Sprintf("fetcher.%s.syncer", fi.CreatedBy), nil).UpdateSince(start)
- case <-time.After(timeouts.SyncerClientWaitTimeout):
- metrics.GetOrRegisterCounter("fetcher.syncer.timeout", nil).Inc(1)
- return fmt.Errorf("chunk not delivered through syncing after %dsec. ref=%s", timeouts.SyncerClientWaitTimeout, fmt.Sprintf("%x", key))
- }
- return nil
- }
-}
-
-func (s *SwarmSyncerClient) Close() {}
-
-// base for parsing and formating sync bin key
-// it must be 2 <= base <= 36
-const syncBinKeyBase = 36
-
-// FormatSyncBinKey returns a string representation of
-// Kademlia bin number to be used as key for SYNC stream.
-func FormatSyncBinKey(bin uint8) string {
- return strconv.FormatUint(uint64(bin), syncBinKeyBase)
-}
-
-// ParseSyncBinKey parses the string representation
-// and returns the Kademlia bin number.
-func ParseSyncBinKey(s string) (uint8, error) {
- bin, err := strconv.ParseUint(s, syncBinKeyBase, 8)
- if err != nil {
- return 0, err
- }
- return uint8(bin), nil
-}
diff --git a/network/stream/syncer_test.go b/network/stream/syncer_test.go
deleted file mode 100644
index b46b623510..0000000000
--- a/network/stream/syncer_test.go
+++ /dev/null
@@ -1,589 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package stream
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
- "github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/log"
- "github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/network/simulation"
- "github.com/ethersphere/swarm/state"
- "github.com/ethersphere/swarm/storage"
- "github.com/ethersphere/swarm/testutil"
-)
-
-const dataChunkCount = 1000
-
-// TestTwoNodesFullSync connects two nodes, uploads content to one node and expects the
-// uploader node's chunks to be synced to the second node. This is expected behaviour since although
-// both nodes might share address bits, due to kademlia depth=0 when under ProxBinSize - this will
-// eventually create subscriptions on all bins between the two nodes, causing a full sync between them
-// The test checks that:
-// 1. All subscriptions are created
-// 2. All chunks are transferred from one node to another (asserted by summing and comparing bin indexes on both nodes)
-func TestTwoNodesFullSync(t *testing.T) { //
- var (
- chunkCount = 1000 //~4mb
- syncTime = 5 * time.Second
- )
- sim := simulation.NewInProc(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- addr := network.NewAddr(ctx.Config.Node())
-
- netStore, delivery, clean, err := newNetStoreAndDeliveryWithBzzAddr(ctx, bucket, addr)
- if err != nil {
- return nil, nil, err
- }
-
- var dir string
- var store *state.DBStore
- if testutil.RaceEnabled {
- // Use on-disk DBStore to reduce memory consumption in race tests.
- dir, err = ioutil.TempDir("", "swarm-stream-")
- if err != nil {
- return nil, nil, err
- }
- store, err = state.NewDBStore(dir)
- if err != nil {
- return nil, nil, err
- }
- } else {
- store = state.NewInmemoryStore()
- }
-
- r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
- Syncing: SyncingAutoSubscribe,
- SyncUpdateDelay: 500 * time.Millisecond, //this is needed to trigger the update subscriptions loop
- SkipCheck: true,
- }, nil)
-
- cleanup = func() {
- r.Close()
- clean()
- if dir != "" {
- os.RemoveAll(dir)
- }
- }
-
- return r, cleanup, nil
- },
- })
- defer sim.Close()
-
- // create context for simulation run
- timeout := 30 * time.Second
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- // defer cancel should come before defer simulation teardown
- defer cancel()
-
- _, err := sim.AddNodesAndConnectChain(2)
- if err != nil {
- t.Fatal(err)
- }
-
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
- nodeIDs := sim.UpNodeIDs()
- if len(nodeIDs) != 2 {
- return errors.New("not enough nodes up")
- }
-
- nodeIndex := make(map[enode.ID]int)
- for i, id := range nodeIDs {
- nodeIndex[id] = i
- }
-
- disconnected := watchDisconnections(ctx, sim)
- defer func() {
- if err != nil && disconnected.bool() {
- err = errors.New("disconnect events received")
- }
- }()
-
- item, ok := sim.NodeItem(nodeIDs[0], bucketKeyFileStore)
- if !ok {
- return fmt.Errorf("No filestore")
- }
- fileStore := item.(*storage.FileStore)
- size := chunkCount * chunkSize
-
- _, wait1, err := fileStore.Store(ctx, testutil.RandomReader(0, size), int64(size), false)
- if err != nil {
- return fmt.Errorf("fileStore.Store: %v", err)
- }
-
- _, wait2, err := fileStore.Store(ctx, testutil.RandomReader(10, size), int64(size), false)
- if err != nil {
- return fmt.Errorf("fileStore.Store: %v", err)
- }
-
- wait1(ctx)
- wait2(ctx)
- time.Sleep(1 * time.Second)
-
- //explicitly check that all subscriptions are there on all bins
- for idx, id := range nodeIDs {
- node := sim.Net.GetNode(id)
- client, err := node.Client()
- if err != nil {
- return fmt.Errorf("create node %d rpc client fail: %v", idx, err)
- }
-
- //ask it for subscriptions
- pstreams := make(map[string][]string)
- err = client.Call(&pstreams, "stream_getPeerServerSubscriptions")
- if err != nil {
- return fmt.Errorf("client call stream_getPeerSubscriptions: %v", err)
- }
- for _, streams := range pstreams {
- b := make([]bool, 17)
- for _, sub := range streams {
- subPO, err := ParseSyncBinKey(strings.Split(sub, "|")[1])
- if err != nil {
- return err
- }
- b[int(subPO)] = true
- }
- for bin, v := range b {
- if !v {
- return fmt.Errorf("did not find any subscriptions for node %d on bin %d", idx, bin)
- }
- }
- }
- }
- log.Debug("subscriptions on all bins exist between the two nodes, proceeding to check bin indexes")
- log.Debug("uploader node", "enode", nodeIDs[0])
- item, ok = sim.NodeItem(nodeIDs[0], bucketKeyStore)
- if !ok {
- return fmt.Errorf("No DB")
- }
- store := item.(chunk.Store)
- uploaderNodeBinIDs := make([]uint64, 17)
-
- log.Debug("checking pull subscription bin ids")
- for po := 0; po <= 16; po++ {
- until, err := store.LastPullSubscriptionBinID(uint8(po))
- if err != nil {
- t.Fatal(err)
- }
-
- uploaderNodeBinIDs[po] = until
- }
- // wait for syncing
- time.Sleep(syncTime)
-
- // check that the sum of bin indexes is equal
- for idx := range nodeIDs {
- if nodeIDs[idx] == nodeIDs[0] {
- continue
- }
-
- log.Debug("compare to", "enode", nodeIDs[idx])
- item, ok = sim.NodeItem(nodeIDs[idx], bucketKeyStore)
- if !ok {
- return fmt.Errorf("No DB")
- }
- db := item.(chunk.Store)
-
- uploaderSum, otherNodeSum := 0, 0
- for po, uploaderUntil := range uploaderNodeBinIDs {
- shouldUntil, err := db.LastPullSubscriptionBinID(uint8(po))
- if err != nil {
- t.Fatal(err)
- }
- otherNodeSum += int(shouldUntil)
- uploaderSum += int(uploaderUntil)
- }
- if uploaderSum != otherNodeSum {
- t.Fatalf("bin indice sum mismatch. got %d want %d", otherNodeSum, uploaderSum)
- }
- }
- return nil
- })
-
- if result.Error != nil {
- t.Fatal(result.Error)
- }
-}
-
-// TestStarNetworkSync tests that syncing works on a more elaborate network topology
-// the test creates a network of 10 nodes and connects them in a star topology, this causes
-// the pivot node to have neighbourhood depth > 0, which in turn means that each individual node
-// will only get SOME of the chunks that exist on the uploader node (the pivot node).
-// The test checks that EVERY chunk that exists on the pivot node:
-// a. exists on the most proximate node
-// b. exists on the nodes subscribed on the corresponding chunk PO
-// c. does not exist on the peers that do not have that PO subscription
-func TestStarNetworkSync(t *testing.T) {
- t.Skip("flaky test https://github.com/ethersphere/swarm/issues/1457")
- if testutil.RaceEnabled {
- return
- }
- var (
- chunkCount = 500
- nodeCount = 6
- simTimeout = 60 * time.Second
- syncTime = 30 * time.Second
- filesize = chunkCount * chunkSize
- )
- sim := simulation.NewInProc(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- addr := network.NewAddr(ctx.Config.Node())
-
- netStore, delivery, clean, err := newNetStoreAndDeliveryWithBzzAddr(ctx, bucket, addr)
- if err != nil {
- return nil, nil, err
- }
-
- var dir string
- var store *state.DBStore
- if testutil.RaceEnabled {
- // Use on-disk DBStore to reduce memory consumption in race tests.
- dir, err = ioutil.TempDir("", "swarm-stream-")
- if err != nil {
- return nil, nil, err
- }
- store, err = state.NewDBStore(dir)
- if err != nil {
- return nil, nil, err
- }
- } else {
- store = state.NewInmemoryStore()
- }
-
- r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
- Syncing: SyncingAutoSubscribe,
- SyncUpdateDelay: 200 * time.Millisecond,
- SkipCheck: true,
- }, nil)
-
- cleanup = func() {
- r.Close()
- clean()
- if dir != "" {
- os.RemoveAll(dir)
- }
- }
-
- return r, cleanup, nil
- },
- })
- defer sim.Close()
-
- // create context for simulation run
- ctx, cancel := context.WithTimeout(context.Background(), simTimeout)
- // defer cancel should come before defer simulation teardown
- defer cancel()
- _, err := sim.AddNodesAndConnectStar(nodeCount)
- if err != nil {
- t.Fatal(err)
- }
-
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
- nodeIDs := sim.UpNodeIDs()
-
- nodeIndex := make(map[enode.ID]int)
- for i, id := range nodeIDs {
- nodeIndex[id] = i
- }
- disconnected := watchDisconnections(ctx, sim)
- defer func() {
- if err != nil && disconnected.bool() {
- err = errors.New("disconnect events received")
- }
- }()
- seed := int(time.Now().Unix())
- randomBytes := testutil.RandomBytes(seed, filesize)
-
- chunkAddrs, err := getAllRefs(randomBytes[:])
- if err != nil {
- return err
- }
- chunksProx := make([]chunkProxData, 0)
- for _, chunkAddr := range chunkAddrs {
- chunkInfo := chunkProxData{
- addr: chunkAddr,
- uploaderNodePO: chunk.Proximity(nodeIDs[0].Bytes(), chunkAddr),
- nodeProximities: make(map[enode.ID]int),
- }
- closestNodePO := 0
- for nodeAddr := range nodeIndex {
- po := chunk.Proximity(nodeAddr.Bytes(), chunkAddr)
-
- chunkInfo.nodeProximities[nodeAddr] = po
- if po > closestNodePO {
- chunkInfo.closestNodePO = po
- chunkInfo.closestNode = nodeAddr
- }
- log.Trace("processed chunk", "uploaderPO", chunkInfo.uploaderNodePO, "ci", chunkInfo.closestNode, "cpo", chunkInfo.closestNodePO, "cadrr", chunkInfo.addr)
- }
- chunksProx = append(chunksProx, chunkInfo)
- }
-
- // get the pivot node and pump some data
- item, ok := sim.NodeItem(nodeIDs[0], bucketKeyFileStore)
- if !ok {
- return fmt.Errorf("No filestore")
- }
- fileStore := item.(*storage.FileStore)
- reader := bytes.NewReader(randomBytes[:])
- _, wait1, err := fileStore.Store(ctx, reader, int64(len(randomBytes)), false)
- if err != nil {
- return fmt.Errorf("fileStore.Store: %v", err)
- }
-
- wait1(ctx)
-
- // check that chunks with a marked proximate host are where they should be
- count := 0
-
- // wait to sync
- time.Sleep(syncTime)
-
- log.Info("checking if chunks are on prox hosts")
- for _, c := range chunksProx {
- // if the most proximate host is set - check that the chunk is there
- if c.closestNodePO > 0 {
- count++
- log.Trace("found chunk with proximate host set, trying to find in localstore", "po", c.closestNodePO, "closestNode", c.closestNode)
- item, ok = sim.NodeItem(c.closestNode, bucketKeyStore)
- if !ok {
- return fmt.Errorf("No DB")
- }
- store := item.(chunk.Store)
-
- _, err := store.Get(context.TODO(), chunk.ModeGetRequest, c.addr)
- if err != nil {
- return err
- }
- }
- }
- log.Debug("done checking stores", "checked chunks", count, "total chunks", len(chunksProx))
- if count != len(chunksProx) {
- return fmt.Errorf("checked chunks dont match numer of chunks. got %d want %d", count, len(chunksProx))
- }
-
- // check that chunks from each po are _not_ on nodes that don't have subscriptions for these POs
- node := sim.Net.GetNode(nodeIDs[0])
- client, err := node.Client()
- if err != nil {
- return fmt.Errorf("create node 1 rpc client fail: %v", err)
- }
-
- //ask it for subscriptions
- pstreams := make(map[string][]string)
- err = client.Call(&pstreams, "stream_getPeerServerSubscriptions")
- if err != nil {
- return fmt.Errorf("client call stream_getPeerSubscriptions: %v", err)
- }
-
- //create a map of no-subs for a node
- noSubMap := make(map[enode.ID]map[int]bool)
-
- for subscribedNode, streams := range pstreams {
- id := enode.HexID(subscribedNode)
- b := make([]bool, 17)
- for _, sub := range streams {
- subPO, err := ParseSyncBinKey(strings.Split(sub, "|")[1])
- if err != nil {
- return err
- }
- b[int(subPO)] = true
- }
- noMapMap := make(map[int]bool)
- for i, v := range b {
- if !v {
- noMapMap[i] = true
- }
- }
- noSubMap[id] = noMapMap
- }
-
- // iterate over noSubMap, for each node check if it has any of the chunks it shouldn't have
- for nodeId, nodeNoSubs := range noSubMap {
- for _, c := range chunksProx {
- // if the chunk PO is equal to the sub that the node shouldnt have - check if the node has the chunk!
- if _, ok := nodeNoSubs[c.uploaderNodePO]; ok {
- count++
- item, ok = sim.NodeItem(nodeId, bucketKeyStore)
- if !ok {
- return fmt.Errorf("No DB")
- }
- store := item.(chunk.Store)
-
- _, err := store.Get(context.TODO(), chunk.ModeGetRequest, c.addr)
- if err == nil {
- return fmt.Errorf("got a chunk where it shouldn't be! addr %s, nodeId %s", c.addr, nodeId)
- }
- }
- }
- }
- return nil
- })
-
- if result.Error != nil {
- t.Fatal(result.Error)
- }
-}
-
-type chunkProxData struct {
- addr chunk.Address
- uploaderNodePO int
- nodeProximities map[enode.ID]int
- closestNode enode.ID
- closestNodePO int
-}
-
-//TestSameVersionID just checks that if the version is not changed,
-//then streamer peers see each other
-func TestSameVersionID(t *testing.T) {
- //test version ID
- v := uint(1)
- sim := simulation.NewInProc(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
- if err != nil {
- return nil, nil, err
- }
-
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Syncing: SyncingAutoSubscribe,
- }, nil)
- bucket.Store(bucketKeyRegistry, r)
-
- //assign to each node the same version ID
- r.spec.Version = v
-
- cleanup = func() {
- r.Close()
- clean()
- }
-
- return r, cleanup, nil
- },
- })
- defer sim.Close()
-
- //connect just two nodes
- log.Info("Adding nodes to simulation")
- _, err := sim.AddNodesAndConnectChain(2)
- if err != nil {
- t.Fatal(err)
- }
-
- log.Info("Starting simulation")
- ctx := context.Background()
- //make sure they have time to connect
- time.Sleep(200 * time.Millisecond)
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- //get the pivot node's filestore
- nodes := sim.UpNodeIDs()
-
- item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
- if !ok {
- return fmt.Errorf("No filestore")
- }
- registry := item.(*Registry)
-
- //the peers should connect, thus getting the peer should not return nil
- if registry.getPeer(nodes[1]) == nil {
- return errors.New("Expected the peer to not be nil, but it is")
- }
- return nil
- })
- if result.Error != nil {
- t.Fatal(result.Error)
- }
- log.Info("Simulation ended")
-}
-
-//TestDifferentVersionID proves that if the streamer protocol version doesn't match,
-//then the peers are not connected at streamer level
-func TestDifferentVersionID(t *testing.T) {
- //create a variable to hold the version ID
- v := uint(0)
- sim := simulation.NewInProc(map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
- if err != nil {
- return nil, nil, err
- }
-
- r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
- Syncing: SyncingAutoSubscribe,
- }, nil)
- bucket.Store(bucketKeyRegistry, r)
-
- //increase the version ID for each node
- v++
- r.spec.Version = v
-
- cleanup = func() {
- r.Close()
- clean()
- }
-
- return r, cleanup, nil
- },
- })
- defer sim.Close()
-
- //connect the nodes
- log.Info("Adding nodes to simulation")
- _, err := sim.AddNodesAndConnectChain(2)
- if err != nil {
- t.Fatal(err)
- }
-
- log.Info("Starting simulation")
- ctx := context.Background()
- //make sure they have time to connect
- time.Sleep(200 * time.Millisecond)
- result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
- //get the pivot node's filestore
- nodes := sim.UpNodeIDs()
-
- item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
- if !ok {
- return fmt.Errorf("No filestore")
- }
- registry := item.(*Registry)
-
- //getting the other peer should fail due to the different version numbers
- if registry.getPeer(nodes[1]) != nil {
- return errors.New("Expected the peer to be nil, but it is not")
- }
- return nil
- })
- if result.Error != nil {
- t.Fatal(result.Error)
- }
- log.Info("Simulation ended")
-}
diff --git a/network/stream/testing/snapshot_128.json b/network/stream/testdata/snapshot_128.json
similarity index 100%
rename from network/stream/testing/snapshot_128.json
rename to network/stream/testdata/snapshot_128.json
diff --git a/network/stream/testing/snapshot_16.json b/network/stream/testdata/snapshot_16.json
similarity index 100%
rename from network/stream/testing/snapshot_16.json
rename to network/stream/testdata/snapshot_16.json
diff --git a/network/stream/testing/snapshot_256.json b/network/stream/testdata/snapshot_256.json
similarity index 100%
rename from network/stream/testing/snapshot_256.json
rename to network/stream/testdata/snapshot_256.json
diff --git a/network/stream/testing/snapshot_32.json b/network/stream/testdata/snapshot_32.json
similarity index 100%
rename from network/stream/testing/snapshot_32.json
rename to network/stream/testdata/snapshot_32.json
diff --git a/network/stream/testing/snapshot_4.json b/network/stream/testdata/snapshot_4.json
similarity index 100%
rename from network/stream/testing/snapshot_4.json
rename to network/stream/testdata/snapshot_4.json
diff --git a/network/stream/testing/snapshot_64.json b/network/stream/testdata/snapshot_64.json
similarity index 100%
rename from network/stream/testing/snapshot_64.json
rename to network/stream/testdata/snapshot_64.json
diff --git a/network/stream/v2/common_test.go b/network/stream/v2/common_test.go
new file mode 100644
index 0000000000..6d883edd4b
--- /dev/null
+++ b/network/stream/v2/common_test.go
@@ -0,0 +1,311 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package stream
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/signal"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethersphere/swarm/chunk"
+ "github.com/ethersphere/swarm/network"
+ "github.com/ethersphere/swarm/network/retrieval"
+ "github.com/ethersphere/swarm/network/simulation"
+ "github.com/ethersphere/swarm/state"
+ "github.com/ethersphere/swarm/storage"
+ "github.com/ethersphere/swarm/storage/localstore"
+ "github.com/ethersphere/swarm/storage/mock"
+ "github.com/ethersphere/swarm/testutil"
+)
+
+var (
+ update = flag.Bool("update", false, "Update golden files in testdata directory")
+)
+
+func init() {
+ testutil.Init()
+}
+
+var (
+ serviceNameStream = "bzz-stream"
+ bucketKeyFileStore = "filestore"
+ bucketKeyLocalStore = "localstore"
+ bucketKeyInitialBinIndexes = "bin-indexes"
+
+ simContextTimeout = 90 * time.Second
+)
+
+func nodeRegistry(sim *simulation.Simulation, id enode.ID) (s *Registry) {
+ return sim.Service(serviceNameStream, id).(*Registry)
+}
+
+func nodeFileStore(sim *simulation.Simulation, id enode.ID) (s *storage.FileStore) {
+ return sim.MustNodeItem(id, bucketKeyFileStore).(*storage.FileStore)
+}
+
+func nodeInitialBinIndexes(sim *simulation.Simulation, id enode.ID) (s []uint64) {
+ return sim.MustNodeItem(id, bucketKeyInitialBinIndexes).([]uint64)
+}
+
+func nodeKademlia(sim *simulation.Simulation, id enode.ID) (k *network.Kademlia) {
+ return sim.MustNodeItem(id, simulation.BucketKeyKademlia).(*network.Kademlia)
+}
+
+func nodeBinIndexes(t *testing.T, store interface {
+ LastPullSubscriptionBinID(bin uint8) (id uint64, err error)
+}) []uint64 {
+ t.Helper()
+
+ binIndexes := make([]uint64, 17)
+ for i := 0; i <= 16; i++ {
+ binIndex, err := store.LastPullSubscriptionBinID(uint8(i))
+ if err != nil {
+ t.Fatal(err)
+ }
+ binIndexes[i] = binIndex
+ }
+ return binIndexes
+}
+
+type SyncSimServiceOptions struct {
+ InitialChunkCount uint64
+ SyncOnlyWithinDepth bool
+ StreamConstructorFunc func(state.Store, []byte, ...StreamProvider) node.Service
+}
+
+func newSyncSimServiceFunc(o *SyncSimServiceOptions) func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ if o == nil {
+ o = new(SyncSimServiceOptions)
+ }
+ if o.StreamConstructorFunc == nil {
+ o.StreamConstructorFunc = func(s state.Store, b []byte, p ...StreamProvider) node.Service {
+ return New(s, b, p...)
+ }
+ }
+ return func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
+ n := ctx.Config.Node()
+ addr := network.NewAddr(n)
+
+ localStore, localStoreCleanup, err := newTestLocalStore(n.ID(), addr, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var kad *network.Kademlia
+
+ // check if another kademlia already exists and load it if necessary - we dont want two independent copies of it
+ if kv, ok := bucket.Load(simulation.BucketKeyKademlia); ok {
+ kad = kv.(*network.Kademlia)
+ } else {
+ kad = network.NewKademlia(addr.Over(), network.NewKadParams())
+ bucket.Store(simulation.BucketKeyKademlia, kad)
+ }
+
+ netStore := storage.NewNetStore(localStore, kad.BaseAddr(), n.ID())
+ lnetStore := storage.NewLNetStore(netStore)
+ fileStore := storage.NewFileStore(lnetStore, lnetStore, storage.NewFileStoreParams(), chunk.NewTags())
+ bucket.Store(bucketKeyFileStore, fileStore)
+ bucket.Store(bucketKeyLocalStore, localStore)
+
+ ret := retrieval.New(kad, netStore, kad.BaseAddr())
+ netStore.RemoteGet = ret.RequestFromPeers
+
+ if o.InitialChunkCount > 0 {
+ _, err := uploadChunks(context.Background(), localStore, o.InitialChunkCount)
+ if err != nil {
+ return nil, nil, err
+ }
+ binIndexes := make([]uint64, 17)
+ for i := uint8(0); i <= 16; i++ {
+ binIndex, err := localStore.LastPullSubscriptionBinID(i)
+ if err != nil {
+ return nil, nil, err
+ }
+ binIndexes[i] = binIndex
+ }
+ bucket.Store(bucketKeyInitialBinIndexes, binIndexes)
+ }
+
+ var store *state.DBStore
+ // Use on-disk DBStore to reduce memory consumption in race tests.
+ dir, err := ioutil.TempDir(tmpDir, "statestore-")
+ if err != nil {
+ return nil, nil, err
+ }
+ store, err = state.NewDBStore(dir)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sp := NewSyncProvider(netStore, kad, true, o.SyncOnlyWithinDepth)
+ ss := o.StreamConstructorFunc(store, addr.Over(), sp)
+
+ cleanup = func() {
+ //ss.Stop() // wait for handlers to finish before closing localstore
+ localStore.Close()
+ localStoreCleanup()
+ store.Close()
+ os.RemoveAll(dir)
+ }
+
+ return ss, cleanup, nil
+ }
+}
+
+func newTestLocalStore(id enode.ID, addr *network.BzzAddr, globalStore mock.GlobalStorer) (localStore *localstore.DB, cleanup func(), err error) {
+ dir, err := ioutil.TempDir(tmpDir, "localstore-")
+ if err != nil {
+ return nil, nil, err
+ }
+ cleanup = func() {
+ os.RemoveAll(dir)
+ }
+
+ var mockStore *mock.NodeStore
+ if globalStore != nil {
+ mockStore = globalStore.NewNodeStore(common.BytesToAddress(id.Bytes()))
+ }
+
+ localStore, err = localstore.New(dir, addr.Over(), &localstore.Options{
+ MockStore: mockStore,
+ })
+ if err != nil {
+ cleanup()
+ return nil, nil, err
+ }
+ return localStore, cleanup, nil
+}
+
+func parseID(str string) ID {
+ v := strings.Split(str, "|")
+ if len(v) != 2 {
+ panic("too short")
+ }
+ return NewID(v[0], v[1])
+}
+
+func uploadChunks(ctx context.Context, store chunk.Store, count uint64) (chunks []chunk.Address, err error) {
+ for i := uint64(0); i < count; i++ {
+ c := storage.GenerateRandomChunk(4096)
+ exists, err := store.Put(ctx, chunk.ModePutUpload, c)
+ if err != nil {
+ return nil, err
+ }
+ if exists[0] {
+ return nil, errors.New("generated already existing chunk")
+ }
+ chunks = append(chunks, c.Address())
+ }
+ return chunks, nil
+}
+
+func mustUploadChunks(ctx context.Context, t testing.TB, store chunk.Store, count uint64) (chunks []chunk.Address) {
+ t.Helper()
+
+ chunks, err := uploadChunks(ctx, store, count)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return chunks
+}
+
+// Test run global tmp dir. Please, use it as the first argument
+// to ioutil.TempDir function calls in this package tests.
+var tmpDir string
+
+func TestMain(m *testing.M) {
+ // Remove the sync init delay in tests.
+ defer func(b time.Duration) { SyncInitBackoff = b }(SyncInitBackoff)
+ SyncInitBackoff = 0
+
+ // Tests in this package generate a lot of temporary directories
+ // that may not be removed if tests are interrupted with SIGINT.
+ // This function constructs a single top-level directory to be used
+ // to store all data from a test execution. It removes the
+ // tmpDir with defer, or by catching keyboard interrupt signal,
+ // so that all data will be removed even on forced termination.
+
+ var err error
+ tmpDir, err = ioutil.TempDir("", "swarm-stream-")
+ if err != nil {
+ panic(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+ defer signal.Stop(c)
+
+ go func() {
+ first := true
+ for range c {
+ fmt.Fprintln(os.Stderr, "signal: interrupt")
+ if first {
+ fmt.Fprintln(os.Stderr, "removing swarm stream tmp directory", tmpDir)
+ os.RemoveAll(tmpDir)
+ os.Exit(1)
+ }
+ }
+ }()
+ os.Exit(m.Run())
+}
+
+// syncPauser implements pauser interface used only in tests.
+type syncPauser struct {
+ c *sync.Cond
+ cMu sync.Mutex
+ mu sync.RWMutex
+}
+
+func (p *syncPauser) pause() {
+ p.mu.Lock()
+ if p.c == nil {
+ p.c = sync.NewCond(&p.cMu)
+ }
+ p.mu.Unlock()
+}
+
+func (p *syncPauser) resume() {
+ p.c.L.Lock()
+ p.c.Broadcast()
+ p.c.L.Unlock()
+ p.mu.Lock()
+ p.c = nil
+ p.mu.Unlock()
+}
+
+func (p *syncPauser) wait() {
+ p.mu.RLock()
+ if p.c != nil {
+ p.c.L.Lock()
+ p.c.Wait()
+ p.c.L.Unlock()
+ }
+ p.mu.RUnlock()
+}
diff --git a/network/stream/v2/cursors_test.go b/network/stream/v2/cursors_test.go
new file mode 100644
index 0000000000..17d8646e41
--- /dev/null
+++ b/network/stream/v2/cursors_test.go
@@ -0,0 +1,655 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package stream
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/simulations"
+ "github.com/ethereum/go-ethereum/rpc"
+
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethersphere/swarm/chunk"
+ "github.com/ethersphere/swarm/log"
+ "github.com/ethersphere/swarm/network"
+ "github.com/ethersphere/swarm/network/simulation"
+ "github.com/ethersphere/swarm/p2p/protocols"
+ "github.com/ethersphere/swarm/pot"
+ "github.com/ethersphere/swarm/state"
+)
+
+func init() {
+ rand.Seed(time.Now().Unix())
+}
+
+// TestNodesExchangeCorrectBinIndexes tests that two nodes exchange the correct cursors for all streams
+// it tests that all streams are exchanged
+func TestNodesExchangeCorrectBinIndexes(t *testing.T) {
+ const (
+ nodeCount = 2
+ chunkCount = 1000
+ )
+
+ sim := simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ serviceNameStream: newSyncSimServiceFunc(&SyncSimServiceOptions{
+ InitialChunkCount: chunkCount,
+ }),
+ })
+ defer sim.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), simContextTimeout)
+ defer cancel()
+ _, err := sim.AddNodesAndConnectStar(nodeCount)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ nodeIDs := sim.UpNodeIDs()
+ if len(nodeIDs) != nodeCount {
+ return errors.New("not enough nodes up")
+ }
+
+ // wait for the nodes to exchange StreamInfo messages
+ time.Sleep(100 * time.Millisecond)
+
+ idOne := nodeIDs[0]
+ idOther := nodeIDs[1]
+ onesCursors := nodeRegistry(sim, idOne).getPeer(idOther).getCursorsCopy()
+ othersCursors := nodeRegistry(sim, idOther).getPeer(idOne).getCursorsCopy()
+
+ onesBins := nodeInitialBinIndexes(sim, idOne)
+ othersBins := nodeInitialBinIndexes(sim, idOther)
+
+ if err := compareNodeBinsToStreams(t, onesCursors, othersBins); err != nil {
+ return err
+ }
+ if err := compareNodeBinsToStreams(t, othersCursors, onesBins); err != nil {
+ return err
+ }
+
+ return nil
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+}
+
+// TestNodesCorrectBinsDynamic adds nodes to a star topology, connecting new nodes to the pivot node
+// after each connection is made, the cursors on the pivot are checked, to reflect the bins that we are
+// currently still interested in. this makes sure that correct bins are of interest
+// when nodes enter the kademlia of the pivot node
+func TestNodesCorrectBinsDynamic(t *testing.T) {
+ const (
+ nodeCount = 6
+ chunkCount = 500
+ )
+
+ sim := simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ serviceNameStream: newSyncSimServiceFunc(&SyncSimServiceOptions{
+ InitialChunkCount: chunkCount,
+ }),
+ })
+ defer sim.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), simContextTimeout)
+ defer cancel()
+ _, err := sim.AddNodesAndConnectStar(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ nodeIDs := sim.UpNodeIDs()
+ if len(nodeIDs) != 2 {
+ return errors.New("not enough nodes up")
+ }
+
+ // wait for the nodes to exchange StreamInfo messages
+ time.Sleep(100 * time.Millisecond)
+ idPivot := nodeIDs[0]
+ pivotSyncer := nodeRegistry(sim, idPivot)
+ pivotKademlia := nodeKademlia(sim, idPivot)
+ pivotDepth := uint(pivotKademlia.NeighbourhoodDepth())
+
+ for j := 2; j <= nodeCount; j++ {
+ // append a node to the simulation
+ id, err := sim.AddNodes(1)
+ if err != nil {
+ return err
+ }
+ err = sim.Net.ConnectNodesStar(id, nodeIDs[0])
+ if err != nil {
+ return err
+ }
+ nodeIDs := sim.UpNodeIDs()
+ if len(nodeIDs) != j+1 {
+ return fmt.Errorf("not enough nodes up. got %d, want %d", len(nodeIDs), j)
+ }
+ time.Sleep(50 * time.Millisecond)
+ idPivot = nodeIDs[0]
+ for i := 1; i < j; i++ {
+ idOther := nodeIDs[i]
+ otherKademlia := sim.MustNodeItem(idOther, simulation.BucketKeyKademlia).(*network.Kademlia)
+ po := chunk.Proximity(otherKademlia.BaseAddr(), pivotKademlia.BaseAddr())
+ depth := pivotKademlia.NeighbourhoodDepth()
+ pivotCursors := pivotSyncer.getPeer(idOther).getCursorsCopy()
+
+ // check that the pivot node is interested just in bins >= depth
+ if po >= depth {
+ othersBins := nodeInitialBinIndexes(sim, idOther)
+ if err := compareNodeBinsToStreamsWithDepth(t, pivotCursors, othersBins, pivotDepth); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+}
+
+var reestablishCursorsSnapshotFilename = "testdata/reestablish-cursors-snapshot.json"
+
+// TestNodesRemovesCursors creates a pivot network of 2 nodes where the pivot's depth = 0.
+// test sequence:
+// - select another node with po >= depth (of the pivot's kademlia)
+// - add other nodes to the pivot until the depth goes above that peer's po (depth > peerPo)
+// - asserts that the pivot does not maintain any cursors of the node that moved out of depth
+// - start removing nodes from the simulation until that peer is again within depth
+// - check that the cursors are being re-established
+func TestNodeRemovesAndReestablishCursors(t *testing.T) {
+ t.Skip("disable to find more optimal way to run it")
+
+ if *update {
+ generateReestablishCursorsSnapshot(t, 2)
+ }
+
+ const chunkCount = 1000
+
+ sim := simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ serviceNameStream: newSyncSimServiceFunc(nil),
+ })
+ defer sim.Close()
+
+ // load the snapshot
+ if err := sim.UploadSnapshot(context.Background(), reestablishCursorsSnapshotFilename); err != nil {
+ t.Fatal(err)
+ }
+ // load additional test specific data from the snapshot
+ d, err := ioutil.ReadFile(reestablishCursorsSnapshotFilename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var s reestablishCursorsState
+ if err := json.Unmarshal(d, &s); err != nil {
+ t.Fatal(err)
+ }
+
+ pivotEnode := s.PivotEnode
+ pivotKademlia := sim.MustNodeItem(pivotEnode, simulation.BucketKeyKademlia).(*network.Kademlia)
+ lookupEnode := s.LookupEnode
+ lookupPO := s.PO
+ nodeCount := len(sim.UpNodeIDs())
+
+ mustUploadChunks(context.Background(), t, nodeFileStore(sim, pivotEnode), chunkCount)
+
+ log.Debug("tracking enode", "enode", lookupEnode, "po", lookupPO)
+
+ // expecting some cursors
+ waitForCursors(t, sim, pivotEnode, lookupEnode, true)
+
+ //append nodes to simulation until the node po moves out of the depth, then assert no subs from pivot to that node
+ for i := float64(1); pivotKademlia.NeighbourhoodDepth() <= lookupPO; i++ {
+ // calculate the number of nodes to add:
+ // - logarithmically increase by the number of iterations
+ // - ensure that the logarithm is greater then 0 by starting the iteration from 1, not 0
+ // - ensure that the logarithm is greater then 0 by adding 1
+ // - multiply by the difference between target and current depth
+ // - ensure that is greater then 0 by adding 1
+ // - multiply it by empirical constant 4
+ newNodeCount := int(math.Logb(i)+1) * ((lookupPO-pivotKademlia.NeighbourhoodDepth())*4 + 1)
+ id, err := sim.AddNodes(newNodeCount)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = sim.Net.ConnectNodesStar(id, pivotEnode)
+ if err != nil {
+ t.Fatal(err)
+ }
+ nodeCount += newNodeCount
+ nodeIDs := sim.UpNodeIDs()
+ if len(nodeIDs) != nodeCount {
+ t.Fatalf("got %v up nodes, want %v", len(nodeIDs), nodeCount)
+ }
+ log.Debug("added new nodes to reach depth", "new nodes", newNodeCount, "current depth", pivotKademlia.NeighbourhoodDepth(), "target depth", lookupPO)
+ }
+
+ log.Debug("added nodes to sim, node moved out of depth", "depth", pivotKademlia.NeighbourhoodDepth(), "peerPo", lookupPO, "lookupEnode", lookupEnode)
+
+ // no cursors should exist at this point
+ waitForCursors(t, sim, pivotEnode, lookupEnode, false)
+
+ var removed int
+ // remove nodes from the simulation until the peer moves again into depth
+ log.Error("pulling the plug on some nodes to make the depth go up again", "pivotDepth", pivotKademlia.NeighbourhoodDepth(), "peerPo", lookupPO, "lookupEnode", lookupEnode)
+ for pivotKademlia.NeighbourhoodDepth() > lookupPO {
+ _, err := sim.StopRandomNode(pivotEnode, lookupEnode)
+ if err != nil {
+ t.Fatal(err)
+ }
+ removed++
+ log.Debug("removed 1 node", "pivotDepth", pivotKademlia.NeighbourhoodDepth(), "peerPo", lookupPO)
+ }
+ log.Debug("done removing nodes", "pivotDepth", pivotKademlia.NeighbourhoodDepth(), "peerPo", lookupPO, "removed", removed)
+
+ // expecting some new cursors
+ waitForCursors(t, sim, pivotEnode, lookupEnode, true)
+}
+
+// data appended to reestablish cursors snapshot
+type reestablishCursorsState struct {
+ PivotEnode enode.ID `json:"pivotEnode"`
+ LookupEnode enode.ID `json:"lookupEnode"`
+ PO int `json:"po"`
+}
+
+// function that generates a simulation and saves its snapshot for
+// TestNodeRemovesAndReestablishCursors test.
+func generateReestablishCursorsSnapshot(t *testing.T, tagetPO int) {
+ sim, pivotEnode, lookupEnode := setupReestablishCursorsSimulation(t, tagetPO)
+ defer sim.Close()
+
+ waitForCursors(t, sim, pivotEnode, lookupEnode, true)
+
+ s, err := sim.Net.Snapshot()
+ if err != nil {
+ t.Fatal(err)
+ }
+ d, err := json.Marshal(struct {
+ *simulations.Snapshot
+ reestablishCursorsState
+ }{
+ Snapshot: s,
+ reestablishCursorsState: reestablishCursorsState{
+ PivotEnode: pivotEnode,
+ LookupEnode: lookupEnode,
+ PO: tagetPO,
+ },
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Log("save snapshot file")
+
+ err = ioutil.WriteFile(reestablishCursorsSnapshotFilename, d, 0666)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// function that generates a simulation that can be used in TestNodeRemovesAndReestablishCursors
+// test with a provided target po which is a depth to reach in the test by adding new nodes.
+func setupReestablishCursorsSimulation(t *testing.T, tagetPO int) (sim *simulation.Simulation, pivotEnode, lookupEnode enode.ID) {
+ // initial node count
+ nodeCount := 5
+
+ sim = simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ serviceNameStream: newSyncSimServiceFunc(nil),
+ })
+
+ nodeIDs, err := sim.AddNodesAndConnectStar(nodeCount)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pivotEnode = nodeIDs[0]
+ log.Debug("simulation pivot node", "id", pivotEnode)
+ pivotKademlia := sim.MustNodeItem(pivotEnode, simulation.BucketKeyKademlia).(*network.Kademlia)
+
+ // make sure that we get a node with po <= depth
+ for i := 1; i < nodeCount; i++ {
+ log.Debug("looking for a peer", "i", i, "nodecount", nodeCount)
+ otherKademlia := sim.MustNodeItem(nodeIDs[i], simulation.BucketKeyKademlia).(*network.Kademlia)
+ po := chunk.Proximity(otherKademlia.BaseAddr(), pivotKademlia.BaseAddr())
+ depth := pivotKademlia.NeighbourhoodDepth()
+ if po > depth {
+ if po != tagetPO {
+ log.Debug("wrong depth to reach, generating new simulation", "depth", po)
+ return setupReestablishCursorsSimulation(t, tagetPO)
+ }
+ lookupEnode = nodeIDs[i]
+ return
+ }
+ // append a node to the simulation
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+ log.Debug("added node to simulation, connecting to pivot", "id", id, "pivot", pivotEnode)
+ if err = sim.Net.Connect(id, pivotEnode); err != nil {
+ t.Fatal(err)
+ }
+ nodeCount++
+ // wait for node to be set
+ time.Sleep(100 * time.Millisecond)
+ }
+ t.Fatal("node with po<=depth not found")
+ return
+}
+
+// waitForCursors checks if the pivot node has some cursors or not
+// by periodically checking for them.
+func waitForCursors(t *testing.T, sim *simulation.Simulation, pivotEnode, lookupEnode enode.ID, wantSome bool) {
+ t.Helper()
+
+ var got int
+ for i := 0; i < 1000; i++ { // 10s total wait
+ time.Sleep(10 * time.Millisecond)
+ s, ok := sim.Service(serviceNameStream, pivotEnode).(*Registry)
+ if !ok {
+ continue
+ }
+ p := s.getPeer(lookupEnode)
+ if p == nil {
+ continue
+ }
+ got = len(p.getCursorsCopy())
+ if got != 0 == wantSome {
+ return
+ }
+ }
+ if wantSome {
+ t.Fatalf("got %v cursors, but want some", got)
+ } else {
+ t.Fatalf("got %v cursors, but want none", got)
+ }
+}
+
+// compareNodeBinsToStreams checks that the values on `onesCursors` correlate to the values in `othersBins`
+// onesCursors represents the stream cursors that node A knows about node B (i.e. they shoud reflect directly in this case
+// the values which node B retrieved from its local store)
+// othersBins is the array of bin indexes on node B's local store as they were inserted into the store
+func compareNodeBinsToStreams(t *testing.T, onesCursors map[string]uint64, othersBins []uint64) (err error) {
+ if len(onesCursors) == 0 {
+ return errors.New("no cursors")
+ }
+ if len(othersBins) == 0 {
+ return errors.New("no bins")
+ }
+
+ for nameKey, cur := range onesCursors {
+ id, err := parseSyncKey(parseID(nameKey).Key)
+ if err != nil {
+ return err
+ }
+ if othersBins[id] != cur {
+ return fmt.Errorf("bin indexes not equal. bin %d, got %d, want %d", id, cur, othersBins[id])
+ }
+ }
+ return nil
+}
+
+func compareNodeBinsToStreamsWithDepth(t *testing.T, onesCursors map[string]uint64, othersBins []uint64, depth uint) (err error) {
+ log.Debug("compareNodeBinsToStreamsWithDepth", "cursors", onesCursors, "othersBins", othersBins, "depth", depth)
+ if len(onesCursors) == 0 || len(othersBins) == 0 {
+ return errors.New("no cursors")
+ }
+ // inclusive test
+ for nameKey, cur := range onesCursors {
+ bin, err := parseSyncKey(parseID(nameKey).Key)
+ if err != nil {
+ return err
+ }
+ if uint(bin) < depth {
+ return fmt.Errorf("cursor at bin %d should not exist. depth %d", bin, depth)
+ }
+ if othersBins[bin] != cur {
+ return fmt.Errorf("bin indexes not equal. bin %d, got %d, want %d", bin, cur, othersBins[bin])
+ }
+ }
+
+ // exclusive test
+ for i := uint8(0); i < uint8(depth); i++ {
+ // should not have anything shallower than depth
+ id := NewID("SYNC", encodeSyncKey(i))
+ if _, ok := onesCursors[id.String()]; ok {
+ return fmt.Errorf("oneCursors contains id %s, but it should not", id)
+ }
+ }
+ return nil
+}
+
+// TestCorrectCursorsExchangeRace brings up two nodes with a random config
+// then generates a whole bunch of bogus nodes at different POs from the pivot node
+// those POs are then turned On in the pivot Kademlia in order to trigger depth changes
+// without creating real nodes which slow down the test and the CI execution. The depth changes
+// trigger different cursor requests from the pivot to the other node, these requests are being intercepted
+// by a mock Stream handler which later on sends the replies to those requests in different order.
+// the test finishes after the random replies are processed and the correct cursors are asserted according to the
+// real kademlia depth. This test is to accommodate for possible race conditions where multiple cursors requests are
+// sent but the kademlia depth keeps changing. This in turn causes to possibly discard some contents of those requests which
+// are still in flight and which responses' are not yet processed
+func TestCorrectCursorsExchangeRace(t *testing.T) {
+ bogusNodeCount := 15
+ bogusNodes := []*network.Peer{}
+ popRandomNode := func() *network.Peer {
+ log.Debug("bogus peer array length", "len", len(bogusNodes))
+ i := rand.Intn(len(bogusNodes))
+ elem := bogusNodes[i]
+ bogusNodes = append(bogusNodes[:i], bogusNodes[i+1:]...)
+ return elem
+ }
+ streamInfoRes := []*StreamInfoRes{}
+ infoReqHook := func(msg *StreamInfoReq) {
+ log.Trace("mock got StreamInfoReq msg", "msg", msg)
+
+ //create the response
+ res := &StreamInfoRes{}
+ for _, v := range msg.Streams {
+ cur, err := parseSyncKey(v.Key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ desc := StreamDescriptor{
+ Stream: v,
+ Cursor: uint64(cur),
+ Bounded: false,
+ }
+ res.Streams = append(res.Streams, desc)
+ }
+ streamInfoRes = append(streamInfoRes, res)
+ }
+
+ popRandomResponse := func() *StreamInfoRes {
+ log.Debug("responses array length", "len", len(streamInfoRes))
+ i := rand.Intn(len(streamInfoRes))
+ elem := streamInfoRes[i]
+ streamInfoRes = append(streamInfoRes[:i], streamInfoRes[i+1:]...)
+ return elem
+ }
+ opts := &SyncSimServiceOptions{
+ StreamConstructorFunc: func(s state.Store, b []byte, p ...StreamProvider) node.Service {
+ return New(s, b, p...)
+ },
+ }
+ sim := simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ serviceNameStream: newSyncSimServiceFunc(opts),
+ })
+ defer sim.Close()
+
+ // create the first node with the non mock initialiser
+ pivot, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // second node should start with the mock protocol
+ opts.StreamConstructorFunc = func(s state.Store, b []byte, p ...StreamProvider) node.Service {
+ return newMock(infoReqHook)
+ }
+
+ other, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = sim.Net.Connect(pivot, other)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+ pivotKad := nodeKademlia(sim, pivot)
+ pivotAddr := pot.NewAddressFromBytes(pivotKad.BaseAddr())
+ pivotStream := nodeRegistry(sim, pivot)
+
+ otherBase := nodeKademlia(sim, other).BaseAddr()
+ otherPeer := pivotStream.getPeer(other)
+
+ log.Debug(pivotKad.String())
+ // add a few fictional nodes at higher POs so that we get kademlia depth change and as a result a trigger
+ // for a StreamInfoReq message between the two 'real' nodes
+
+ for i := 0; i < bogusNodeCount; i++ {
+ rw := &p2p.MsgPipeRW{}
+ ptpPeer := p2p.NewPeer(enode.ID{}, "wu tang killa beez", []p2p.Cap{})
+ protoPeer := protocols.NewPeer(ptpPeer, rw, &protocols.Spec{})
+ peerAddr := pot.RandomAddressAt(pivotAddr, i)
+ bzzPeer := &network.BzzPeer{
+ Peer: protoPeer,
+ BzzAddr: &network.BzzAddr{
+ OAddr: peerAddr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", peerAddr[:])),
+ },
+ }
+ peer := network.NewPeer(bzzPeer, pivotKad)
+ pivotKad.On(peer)
+ bogusNodes = append(bogusNodes, peer)
+ time.Sleep(50 * time.Millisecond)
+ }
+CHECKSTREAMS:
+ pivotDepth := pivotKad.NeighbourhoodDepth()
+ po := chunk.Proximity(otherBase, pivotKad.BaseAddr())
+ sub, qui := syncSubscriptionsDiff(po, -1, pivotDepth, pivotKad.MaxProxDisplay, false) //s.syncBinsOnlyWithinDepth)
+ log.Debug("got desired pivot cursor state", "depth", pivotDepth, "subs", sub, "quits", qui)
+
+ for i := len(streamInfoRes); i > 0; i-- {
+ v := popRandomResponse()
+ pivotStream.clientHandleStreamInfoRes(context.Background(), otherPeer, v)
+ }
+
+ //get the pivot cursors for peer, assert equal to what is in `sub`
+ for _, stream := range getAllSyncStreams() {
+ cur, ok := otherPeer.getCursor(stream)
+ keyInt, err := parseSyncKey(stream.Key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ shouldExist := checkKeyInSlice(int(keyInt), sub)
+
+ if shouldExist == ok {
+ continue
+ } else {
+ t.Fatalf("got a cursor that should not exist. key %s, cur %d", stream.Key, cur)
+ }
+ }
+
+ // repeat, until all of the bogus nodes are out of the way
+ if len(bogusNodes) > 0 {
+ p := popRandomNode()
+ pivotKad.Off(p)
+ time.Sleep(50 * time.Millisecond) // wait for the streamInfoReq to come through
+ goto CHECKSTREAMS
+ }
+}
+
+type slipStreamMock struct {
+ spec *protocols.Spec
+ streamInfoReqHook func(*StreamInfoReq)
+}
+
+func newMock(infoReqHook func(*StreamInfoReq)) *slipStreamMock {
+ return &slipStreamMock{
+ spec: Spec,
+ streamInfoReqHook: infoReqHook,
+ }
+}
+
+func (s *slipStreamMock) Protocols() []p2p.Protocol {
+ return []p2p.Protocol{
+ {
+ Name: "bzz-stream",
+ Version: 1,
+ Length: 10 * 1024 * 1024,
+ Run: s.Run,
+ },
+ }
+}
+
+func (s *slipStreamMock) APIs() []rpc.API {
+ return nil
+}
+
+func (s *slipStreamMock) Close() {
+}
+
+func (s *slipStreamMock) Start(server *p2p.Server) error {
+ return nil
+}
+
+func (s *slipStreamMock) Stop() error {
+ return nil
+}
+
+func (s *slipStreamMock) Run(p *p2p.Peer, rw p2p.MsgReadWriter) error {
+ peer := protocols.NewPeer(p, rw, s.spec)
+ return peer.Run(s.HandleMsg)
+}
+
+func (s *slipStreamMock) HandleMsg(ctx context.Context, msg interface{}) error {
+ switch msg := msg.(type) {
+ case *StreamInfoReq:
+ s.streamInfoReqHook(msg)
+ case *GetRange:
+ return nil
+ default:
+ panic("unexpected")
+ }
+ return nil
+}
+
+func getAllSyncStreams() (streams []ID) {
+ for i := uint8(0); i <= 16; i++ {
+ streams = append(streams, ID{
+ Name: syncStreamName,
+ Key: encodeSyncKey(i),
+ })
+ }
+ return
+}
diff --git a/network/stream/v2/peer.go b/network/stream/v2/peer.go
new file mode 100644
index 0000000000..b7003e75e7
--- /dev/null
+++ b/network/stream/v2/peer.go
@@ -0,0 +1,228 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package stream
+
+import (
+ "encoding/hex"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethersphere/swarm/chunk"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethersphere/swarm/network"
+ "github.com/ethersphere/swarm/network/stream/intervals"
+ "github.com/ethersphere/swarm/state"
+)
+
+// Peer is the Peer extension for the streaming protocol
+type Peer struct {
+ *network.BzzPeer
+ mtx sync.RWMutex
+ providers map[string]StreamProvider
+ intervalsStore state.Store //move to stream
+
+ logger log.Logger
+
+ streamCursorsMu sync.Mutex
+ streamCursors map[string]uint64 // key: Stream ID string representation, value: session cursor. Keeps cursors for all streams. when unset - we are not interested in that bin
+ openWants map[uint]*want // maintain open wants on the client side
+ openOffers map[uint]offer // maintain open offers on the server side
+
+ quit chan struct{} // closed when peer is going offline
+}
+
+// NewPeer is the constructor for Peer
+func NewPeer(peer *network.BzzPeer, baseKey []byte, i state.Store, providers map[string]StreamProvider) *Peer {
+ p := &Peer{
+ BzzPeer: peer,
+ providers: providers,
+ intervalsStore: i,
+ streamCursors: make(map[string]uint64),
+ openWants: make(map[uint]*want),
+ openOffers: make(map[uint]offer),
+ quit: make(chan struct{}),
+ logger: log.New("base", hex.EncodeToString(baseKey)[:16], "peer", peer.ID().String()[:16]),
+ }
+ return p
+}
+
+func (p *Peer) cursorsCount() int {
+ p.streamCursorsMu.Lock()
+ defer p.streamCursorsMu.Unlock()
+
+ return len(p.streamCursors)
+}
+
+func (p *Peer) getCursorsCopy() map[string]uint64 {
+ p.streamCursorsMu.Lock()
+ defer p.streamCursorsMu.Unlock()
+
+ c := make(map[string]uint64, len(p.streamCursors))
+ for k, v := range p.streamCursors {
+ c[k] = v
+ }
+ return c
+}
+
+func (p *Peer) getCursor(stream ID) (uint64, bool) {
+ p.streamCursorsMu.Lock()
+ defer p.streamCursorsMu.Unlock()
+ val, ok := p.streamCursors[stream.String()]
+ return val, ok
+}
+
+func (p *Peer) setCursor(stream ID, cursor uint64) {
+ p.streamCursorsMu.Lock()
+ defer p.streamCursorsMu.Unlock()
+
+ p.streamCursors[stream.String()] = cursor
+}
+
+func (p *Peer) deleteCursor(stream ID) {
+ p.streamCursorsMu.Lock()
+ defer p.streamCursorsMu.Unlock()
+
+ delete(p.streamCursors, stream.String())
+}
+
+// InitProviders initializes a provider for a certain peer
+func (p *Peer) InitProviders() {
+ p.logger.Debug("peer.InitProviders")
+
+ for _, sp := range p.providers {
+ go sp.InitPeer(p)
+ }
+}
+
+// offer represents an open offer from a server to a client as a result of a GetRange message
+// it is stored for reference to requests on the peer.openOffers map
+type offer struct {
+ ruid uint // the request uid
+ stream ID // the stream id
+ hashes []byte // all hashes offered to the client
+ requested time.Time // requested at time
+}
+
+// want represents an open want for a hash range from a client to a server
+// it is stored on the peer.openWants
+type want struct {
+ remaining uint64 // number of remaining chunks to deliver
+ ruid uint // the request uid
+ from uint64 // want from index
+ to *uint64 // want to index, nil signifies top of range not yet known
+ head bool // is this the head of the stream? (bound versus tip of the stream; true is tip)
+ stream ID // the stream id
+ hashes map[string]struct{} // key: chunk address, value: wanted yes/no, used to prevent unsolicited chunks
+ requested time.Time // requested at time
+ chunks chan chunk.Address // chunk arrived notification channel
+ closeC chan error // signal polling goroutine to terminate due to empty batch or timeout
+}
+
+// getOfferOrDrop gets on open offer for the requested ruid
+// in case the offer is not found - the peer is dropped
+func (p *Peer) getOfferOrDrop(ruid uint) (o offer, shouldBreak bool) {
+ p.mtx.RLock()
+ o, ok := p.openOffers[ruid]
+ p.mtx.RUnlock()
+ if !ok {
+ p.logger.Error("ruid not found, dropping peer", "ruid", ruid)
+ p.Drop()
+ return o, true
+ }
+ return o, false
+}
+
+// getWantOrDrop gets on open want for the requested ruid
+// in case the want is not found - the peer is dropped
+func (p *Peer) getWantOrDrop(ruid uint) (w *want, shouldBreak bool) {
+ p.mtx.RLock()
+ w, ok := p.openWants[ruid]
+ p.mtx.RUnlock()
+ if !ok {
+ p.logger.Error("ruid not found, dropping peer", "ruid", ruid)
+ p.Drop()
+ return nil, true
+ }
+ return w, false
+}
+
+func (p *Peer) addInterval(stream ID, start, end uint64) (err error) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ peerStreamKey := p.peerStreamIntervalKey(stream)
+ i := &intervals.Intervals{}
+ if err = p.intervalsStore.Get(peerStreamKey, i); err != nil {
+ return err
+ }
+ i.Add(start, end)
+ return p.intervalsStore.Put(peerStreamKey, i)
+}
+
+func (p *Peer) nextInterval(stream ID, ceil uint64) (start, end uint64, empty bool, err error) {
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ i := &intervals.Intervals{}
+ err = p.intervalsStore.Get(p.peerStreamIntervalKey(stream), i)
+ if err != nil {
+ return 0, 0, false, err
+ }
+
+ start, end, empty = i.Next(ceil)
+ return start, end, empty, nil
+}
+
+func (p *Peer) sealWant(w *want) error {
+ err := p.addInterval(w.stream, w.from, *w.to)
+ if err != nil {
+ return err
+ }
+ p.mtx.Lock()
+ delete(p.openWants, w.ruid)
+ p.mtx.Unlock()
+ return nil
+}
+
+func (p *Peer) getOrCreateInterval(key string) (*intervals.Intervals, error) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ // check that an interval entry exists
+ i := &intervals.Intervals{}
+ err := p.intervalsStore.Get(key, i)
+ switch err {
+ case nil:
+ case state.ErrNotFound:
+ // key interval values are ALWAYS > 0
+ i = intervals.NewIntervals(1)
+ if err := p.intervalsStore.Put(key, i); err != nil {
+ return nil, err
+ }
+ default:
+ p.logger.Error("unknown error while getting interval for peer", "err", err)
+ return nil, err
+ }
+ return i, nil
+}
+
+func (p *Peer) peerStreamIntervalKey(stream ID) string {
+ k := fmt.Sprintf("%s|%s", hex.EncodeToString(p.BzzAddr.OAddr), stream.String())
+ return k
+}
diff --git a/network/stream/snapshot_sync_test.go b/network/stream/v2/snapshot_sync_test.go
similarity index 66%
rename from network/stream/snapshot_sync_test.go
rename to network/stream/v2/snapshot_sync_test.go
index 9756faa998..5065a39f71 100644
--- a/network/stream/snapshot_sync_test.go
+++ b/network/stream/v2/snapshot_sync_test.go
@@ -13,54 +13,41 @@
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+
package stream
import (
"context"
- "errors"
+ "flag"
"fmt"
- "os"
- "runtime"
- "sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/p2p/simulations"
- "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/network"
"github.com/ethersphere/swarm/network/simulation"
"github.com/ethersphere/swarm/pot"
- "github.com/ethersphere/swarm/state"
"github.com/ethersphere/swarm/storage"
- "github.com/ethersphere/swarm/storage/mock"
- mockmem "github.com/ethersphere/swarm/storage/mock/mem"
"github.com/ethersphere/swarm/testutil"
)
+var (
+ nodes = flag.Int("nodes", 0, "number of nodes")
+ chunks = flag.Int("chunks", 0, "number of chunks")
+ chunkSize = 4096
+ pof = network.Pof
+)
+
type synctestConfig struct {
addrs [][]byte
hashes []storage.Address
idToChunksMap map[enode.ID][]int
- //chunksToNodesMap map[string][]int
- addrToIDMap map[string]enode.ID
+ addrToIDMap map[string]enode.ID
}
-const (
- // EventTypeNode is the type of event emitted when a node is either
- // created, started or stopped
- EventTypeChunkCreated simulations.EventType = "chunkCreated"
- EventTypeChunkOffered simulations.EventType = "chunkOffered"
- EventTypeChunkWanted simulations.EventType = "chunkWanted"
- EventTypeChunkDelivered simulations.EventType = "chunkDelivered"
- EventTypeChunkArrived simulations.EventType = "chunkArrived"
- EventTypeSimTerminated simulations.EventType = "simTerminated"
-)
-
// Tests in this file should not request chunks from peers.
// This function will panic indicating that there is a problem if request has been made.
func dummyRequestFromPeers(_ context.Context, req *storage.Request, _ enode.ID) (*enode.ID, error) {
@@ -75,22 +62,14 @@ func dummyRequestFromPeers(_ context.Context, req *storage.Request, _ enode.ID)
//they are expected to store based on the syncing protocol.
//Number of chunks and nodes can be provided via commandline too.
func TestSyncingViaGlobalSync(t *testing.T) {
- if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
- t.Skip("Flaky on mac on travis")
- }
-
- if testutil.RaceEnabled {
- t.Skip("Segfaults on Travis with -race")
- }
-
//if nodes/chunks have been provided via commandline,
//run the tests with these values
if *nodes != 0 && *chunks != 0 {
log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
testSyncingViaGlobalSync(t, *chunks, *nodes)
} else {
- chunkCounts := []int{4, 32}
- nodeCounts := []int{32, 16}
+ chunkCounts := []int{4}
+ nodeCounts := []int{16, 32}
//if the `longrunning` flag has been provided
//run more test combinations
@@ -101,44 +80,21 @@ func TestSyncingViaGlobalSync(t *testing.T) {
for _, chunkCount := range chunkCounts {
for _, n := range nodeCounts {
- log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chunkCount, n))
- testSyncingViaGlobalSync(t, chunkCount, n)
+ tName := fmt.Sprintf("snapshot sync test %d nodes %d chunks", n, chunkCount)
+ t.Run(tName, func(t *testing.T) {
+ testSyncingViaGlobalSync(t, chunkCount, n)
+ })
}
}
}
}
-var simServiceMap = map[string]simulation.ServiceFunc{
- "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
- addr, netStore, delivery, clean, err := newNetStoreAndDeliveryWithRequestFunc(ctx, bucket, dummyRequestFromPeers)
- if err != nil {
- return nil, nil, err
- }
-
- store := state.NewInmemoryStore()
-
- r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
- Syncing: SyncingAutoSubscribe,
- SyncUpdateDelay: 3 * time.Second,
- }, nil)
-
- bucket.Store(bucketKeyRegistry, r)
-
- cleanup = func() {
- r.Close()
- clean()
- }
-
- return r, cleanup, nil
- },
-}
-
func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
- sim := simulation.NewInProc(simServiceMap)
+ sim := simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ "bzz-sync": newSyncSimServiceFunc(nil),
+ })
defer sim.Close()
- log.Info("Initializing test config")
-
conf := &synctestConfig{}
//map of discover ID to indexes of chunks expected at that ID
conf.idToChunksMap = make(map[enode.ID][]int)
@@ -150,7 +106,7 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancelSimRun()
- filename := fmt.Sprintf("testing/snapshot_%d.json", nodeCount)
+ filename := fmt.Sprintf("../testdata/snapshot_%d.json", nodeCount)
err := sim.UploadSnapshot(ctx, filename)
if err != nil {
t.Fatal(err)
@@ -164,15 +120,7 @@ func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
}
func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result {
-
return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
- disconnected := watchDisconnections(ctx, sim)
- defer func() {
- if err != nil && disconnected.bool() {
- err = errors.New("disconnect events received")
- }
- }()
-
nodeIDs := sim.UpNodeIDs()
for _, n := range nodeIDs {
//get the kademlia overlay address from this ID
@@ -188,32 +136,16 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
//get the node at that index
//this is the node selected for upload
node := sim.Net.GetRandomUpNode()
- item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
- if !ok {
- return errors.New("no store in simulation bucket")
- }
- store := item.(chunk.Store)
- hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, store)
+ uploadStore := sim.MustNodeItem(node.ID(), bucketKeyFileStore).(chunk.Store)
+ hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, uploadStore)
if err != nil {
return err
}
- for _, h := range hashes {
- evt := &simulations.Event{
- Type: EventTypeChunkCreated,
- Node: sim.Net.GetNode(node.ID()),
- Data: h.String(),
- }
- sim.Net.Events().Send(evt)
- }
conf.hashes = append(conf.hashes, hashes...)
mapKeysToNodes(conf)
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
// or until the timeout is reached.
- var globalStore mock.GlobalStorer
- if *useMockStore {
- globalStore = mockmem.NewGlobalStore()
- }
REPEAT:
for {
for _, id := range nodeIDs {
@@ -225,31 +157,14 @@ func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulatio
log.Trace("node has chunk", "address", ch)
//check if the expected chunk is indeed in the localstore
var err error
- if *useMockStore {
- //use the globalStore if the mockStore should be used; in that case,
- //the complete localStore stack is bypassed for getting the chunk
- _, err = globalStore.Get(common.BytesToAddress(id.Bytes()), ch)
- } else {
- //use the actual localstore
- item, ok := sim.NodeItem(id, bucketKeyStore)
- if !ok {
- return errors.New("no store in simulation bucket")
- }
- store := item.(chunk.Store)
- _, err = store.Get(ctx, chunk.ModeGetLookup, ch)
- }
+ store := sim.MustNodeItem(id, bucketKeyFileStore).(chunk.Store)
+ _, err = store.Get(ctx, chunk.ModeGetLookup, ch)
if err != nil {
log.Debug("chunk not found", "address", ch.Hex(), "node", id)
// Do not get crazy with logging the warn message
time.Sleep(500 * time.Millisecond)
continue REPEAT
}
- evt := &simulations.Event{
- Type: EventTypeChunkArrived,
- Node: sim.Net.GetNode(id),
- Data: ch.String(),
- }
- sim.Net.Events().Send(evt)
log.Trace("chunk found", "address", ch.Hex(), "node", id)
}
}
@@ -298,7 +213,7 @@ func mapKeysToNodes(conf *synctestConfig) {
//upload a file(chunks) to a single local node store
func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, store chunk.Store) ([]storage.Address, error) {
log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
- fileStore := storage.NewFileStore(store, storage.NewFileStoreParams(), chunk.NewTags())
+ fileStore := storage.NewFileStore(store, store, storage.NewFileStoreParams(), chunk.NewTags())
size := chunkSize
var rootAddrs []storage.Address
for i := 0; i < chunkCount; i++ {
diff --git a/network/stream/v2/stream.go b/network/stream/v2/stream.go
new file mode 100644
index 0000000000..5b6b50c140
--- /dev/null
+++ b/network/stream/v2/stream.go
@@ -0,0 +1,1122 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package stream
+
+import (
+ "context"
+ "encoding/hex"
+ "math/rand"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethersphere/swarm/chunk"
+ "github.com/ethersphere/swarm/network"
+ bv "github.com/ethersphere/swarm/network/bitvector"
+ "github.com/ethersphere/swarm/network/stream/intervals"
+ "github.com/ethersphere/swarm/p2p/protocols"
+ "github.com/ethersphere/swarm/state"
+ "github.com/ethersphere/swarm/storage"
+)
+
+const (
+ HashSize = 32
+ BatchSize = 128
+ MinFrameSize = 16
+)
+
+var (
+ // Compile time interface check
+ _ node.Service = (*Registry)(nil)
+
+ // Metrics
+ processReceivedChunksMsgCount = metrics.GetOrRegisterCounter("network.stream.received_chunks_msg", nil)
+ processReceivedChunksCount = metrics.GetOrRegisterCounter("network.stream.received_chunks_handled", nil)
+ streamSeenChunkDelivery = metrics.GetOrRegisterCounter("network.stream.seen_chunk_delivery", nil)
+ streamEmptyWantedHashes = metrics.GetOrRegisterCounter("network.stream.empty_wanted_hashes", nil)
+ streamWantedHashes = metrics.GetOrRegisterCounter("network.stream.wanted_hashes", nil)
+
+ streamBatchFail = metrics.GetOrRegisterCounter("network.stream.batch_fail", nil)
+ streamChunkDeliveryFail = metrics.GetOrRegisterCounter("network.stream.delivery_fail", nil)
+ streamRequestNextIntervalFail = metrics.GetOrRegisterCounter("network.stream.next_interval_fail", nil)
+
+ headBatchSizeGauge = metrics.GetOrRegisterGauge("network.stream.batch_size_head", nil)
+ batchSizeGauge = metrics.GetOrRegisterGauge("network.stream.batch_size", nil)
+
+ streamPeersCount = metrics.GetOrRegisterGauge("network.stream.peers", nil)
+
+ collectBatchLiveTimer = metrics.GetOrRegisterResettingTimer("network.stream.server_collect_batch_head.total-time", nil)
+ collectBatchHistoryTimer = metrics.GetOrRegisterResettingTimer("network.stream.server_collect_batch.total-time", nil)
+ providerGetTimer = metrics.GetOrRegisterResettingTimer("network.stream.provider_get.total-time", nil)
+ providerPutTimer = metrics.GetOrRegisterResettingTimer("network.stream.provider_put.total-time", nil)
+ providerSetTimer = metrics.GetOrRegisterResettingTimer("network.stream.provider_set.total-time", nil)
+ providerNeedDataTimer = metrics.GetOrRegisterResettingTimer("network.stream.provider_need_data.total-time", nil)
+
+ activeBatchTimeout = 20 * time.Second
+
+ // Protocol spec
+ Spec = &protocols.Spec{
+ Name: "bzz-stream",
+ Version: 8,
+ MaxMsgSize: 10 * 1024 * 1024,
+ Messages: []interface{}{
+ StreamInfoReq{},
+ StreamInfoRes{},
+ GetRange{},
+ OfferedHashes{},
+ ChunkDelivery{},
+ WantedHashes{},
+ },
+ }
+)
+
+// Registry is the base type that handles all client/server operations on a node
+// it is instantiated once per stream protocol instance, that is, it should have
+// one instance per node
+type Registry struct {
+ mtx sync.RWMutex
+ intervalsStore state.Store // store intervals for all peers
+ peers map[enode.ID]*Peer // peers
+ baseKey []byte // this node's base address
+ providers map[string]StreamProvider // stream providers by name of stream
+ spec *protocols.Spec // this protocol's spec
+ handlersWg sync.WaitGroup // waits for all handlers to finish in Close method
+ quit chan struct{} // signal shutdown
+ lastReceivedChunkTimeMu sync.RWMutex // synchronize access to lastReceivedChunkTime
+ lastReceivedChunkTime time.Time // last received chunk time
+ logger log.Logger // the logger for the registry. appends base address to all logs
+}
+
+// New creates a new stream protocol handler
+func New(intervalsStore state.Store, baseKey []byte, providers ...StreamProvider) *Registry {
+ r := &Registry{
+ intervalsStore: intervalsStore,
+ peers: make(map[enode.ID]*Peer),
+ providers: make(map[string]StreamProvider),
+ quit: make(chan struct{}),
+ baseKey: baseKey,
+ logger: log.New("base", hex.EncodeToString(baseKey)[:16]),
+ spec: Spec,
+ }
+ for _, p := range providers {
+ r.providers[p.StreamName()] = p
+ }
+
+ return r
+}
+
+// Run is being dispatched when 2 nodes connect
+func (r *Registry) Run(bp *network.BzzPeer) error {
+ sp := NewPeer(bp, r.baseKey, r.intervalsStore, r.providers)
+ r.addPeer(sp)
+ defer r.removePeer(sp)
+
+ go sp.InitProviders()
+
+ return sp.Peer.Run(r.HandleMsg(sp))
+}
+
+// HandleMsg is the main message handler for the stream protocol
+func (r *Registry) HandleMsg(p *Peer) func(context.Context, interface{}) error {
+ return func(ctx context.Context, msg interface{}) error {
+ r.mtx.Lock() // ensure that quit read and handlersWg add are locked together
+ defer r.mtx.Unlock()
+
+ select {
+ case <-r.quit:
+ // no message handling if we quit
+ return nil
+ case <-p.quit:
+ // peer has been removed, quit
+ return nil
+ default:
+ }
+
+ // handleMsgPauser should not be nil only in tests.
+ // It does not use mutex lock protection and because of that
+ // it must be set before the Registry is constructed and
+ // reset when it is closed, in tests.
+ // Production performance impact can be considered as
+ // neglectable as nil check is a ns order operation.
+ if handleMsgPauser != nil {
+ handleMsgPauser.wait()
+ }
+
+ r.handlersWg.Add(1)
+ go func() {
+ defer r.handlersWg.Done()
+
+ switch msg := msg.(type) {
+ case *StreamInfoReq:
+ r.serverHandleStreamInfoReq(ctx, p, msg)
+ case *StreamInfoRes:
+ if len(msg.Streams) == 0 {
+ p.logger.Error("StreamInfo response is empty")
+ p.Drop()
+ return
+ }
+
+ r.clientHandleStreamInfoRes(ctx, p, msg)
+ case *GetRange:
+ provider := r.getProvider(msg.Stream)
+ if provider == nil {
+ p.logger.Error("unsupported provider", "stream", msg.Stream)
+ p.Drop()
+ return
+ }
+ r.serverHandleGetRange(ctx, p, msg, provider)
+ case *OfferedHashes:
+ // get the existing want for ruid from peer, otherwise drop
+ w, exit := p.getWantOrDrop(msg.Ruid)
+ if exit {
+ return
+ }
+ provider := r.getProvider(w.stream)
+ if provider == nil {
+ p.logger.Error("unsupported provider", "stream", w.stream)
+ p.Drop()
+ return
+ }
+ r.clientHandleOfferedHashes(ctx, p, msg, w, provider)
+ case *WantedHashes:
+ // get the existing offer for ruid from peer, otherwise drop
+ o, exit := p.getOfferOrDrop(msg.Ruid)
+ if exit {
+ return
+ }
+ provider := r.getProvider(o.stream)
+ if provider == nil {
+ p.logger.Error("unsupported provider", "stream", o.stream)
+ p.Drop()
+ return
+ }
+ r.serverHandleWantedHashes(ctx, p, msg, o, provider)
+ case *ChunkDelivery:
+ // get the existing want for ruid from peer, otherwise drop
+ w, exit := p.getWantOrDrop(msg.Ruid)
+ if exit {
+ streamChunkDeliveryFail.Inc(1)
+ return
+ }
+ provider := r.getProvider(w.stream)
+ if provider == nil {
+ p.logger.Error("unsupported provider", "stream", w.stream)
+ p.Drop()
+ return
+ }
+ r.clientHandleChunkDelivery(ctx, p, msg, w, provider)
+ }
+ }()
+ return nil
+ }
+}
+
+// Used to pause any message handling in tests for
+// synchronizing desired states.
+var handleMsgPauser pauser
+
+type pauser interface {
+ pause()
+ resume()
+ wait()
+}
+
+// serverHandleStreamInfoReq handles the StreamInfoReq message on the server side (Peer is the client)
+func (r *Registry) serverHandleStreamInfoReq(ctx context.Context, p *Peer, msg *StreamInfoReq) {
+ // illegal to request empty streams, drop peer
+ if len(msg.Streams) == 0 {
+ p.logger.Error("nil streams msg requested")
+ p.Drop()
+ return
+ }
+
+ streamRes := &StreamInfoRes{}
+ for _, v := range msg.Streams {
+ provider := r.getProvider(v)
+ if provider == nil {
+ p.logger.Error("unsupported provider", "stream", v)
+ // TODO: tell the other peer we dont support this stream? this is non fatal
+ // this need not be fatal as we might not support all providers
+ return
+ }
+
+ // get the current cursor from the data source
+ streamCursor, err := provider.Cursor(v.Key)
+ if err != nil {
+ p.logger.Error("error getting cursor for stream key", "name", v.Name, "key", v.Key, "err", err)
+ p.Drop()
+ return
+ }
+ descriptor := StreamDescriptor{
+ Stream: v,
+ Cursor: streamCursor,
+ Bounded: provider.Boundedness(),
+ }
+ streamRes.Streams = append(streamRes.Streams, descriptor)
+ }
+
+ // don't send the message in case we're shutting down or the peer left
+ select {
+ case <-r.quit:
+ // shutdown
+ return
+ case <-p.quit:
+ // peer has been removed, quit
+ return
+ default:
+ }
+
+ if err := p.Send(ctx, streamRes); err != nil {
+ p.logger.Error("failed to send StreamInfoRes to peer", "err", err)
+ p.Drop()
+ }
+}
+
+// clientHandleStreamInfoRes handles the StreamInfoRes message (Peer is the server)
+func (r *Registry) clientHandleStreamInfoRes(ctx context.Context, p *Peer, msg *StreamInfoRes) {
+ for _, s := range msg.Streams {
+ s := s
+
+ // get the provider for this stream
+ provider := r.getProvider(s.Stream)
+ if provider == nil {
+ // at this point of the message exchange unsupported providers are illegal. drop peer
+ p.logger.Error("peer requested unsupported provider. illegal, dropping peer")
+ p.Drop()
+ return
+ }
+
+ // check if we still want the requested stream. due to the fact that under certain conditions we might not
+ // want to handle the stream by the time that StreamInfoRes has been received in response to StreamInfoReq
+ if !provider.WantStream(p, s.Stream) {
+ if _, exists := p.getCursor(s.Stream); exists {
+ p.logger.Debug("stream cursor exists but we don't want it - removing", "stream", s.Stream)
+ p.deleteCursor(s.Stream)
+ }
+ continue
+ }
+
+ // if the stream cursors exists for this peer - it means that a GetRange operation on it is already in progress
+ if _, exists := p.getCursor(s.Stream); exists {
+ p.logger.Debug("stream cursor already exists, continue to next", "stream", s.Stream)
+ continue
+ }
+
+ p.logger.Debug("setting stream cursor", "stream", s.Stream, "cursor", s.Cursor)
+ p.setCursor(s.Stream, s.Cursor)
+
+ if provider.Autostart() {
+ // don't request historical ranges for streams with cursor == 0
+ if s.Cursor > 0 {
+ p.logger.Debug("requesting history stream", "stream", s.Stream, "cursor", s.Cursor)
+ // fetch everything from beginning till s.Cursor
+
+ go func() {
+ err := r.clientRequestStreamRange(ctx, p, provider, s.Stream, s.Cursor)
+ if err != nil {
+ p.logger.Error("had an error sending initial GetRange for historical stream", "stream", s.Stream, "err", err)
+ p.Drop()
+ }
+ }()
+ }
+
+ // handle stream unboundedness
+ if !s.Bounded {
+ //constantly fetch the head of the stream
+ go func() {
+ p.logger.Debug("asking for live stream", "stream", s.Stream, "cursor", s.Cursor)
+
+ // ask the tip (cursor + 1)
+ err := r.clientRequestStreamHead(ctx, p, s.Stream, s.Cursor+1)
+ // https://github.com/golang/go/issues/4373 - use of closed network connection
+ if err != nil && err != p2p.ErrShuttingDown && !strings.Contains(err.Error(), "use of closed network connection") {
+ p.logger.Error("had an error with initial stream head fetch", "stream", s.Stream, "cursor", s.Cursor+1, "err", err)
+ p.Drop()
+ }
+ }()
+ }
+ }
+ }
+}
+
+// clientRequestStreamHead sends a GetRange message to the server requesting
+// new chunks from the supplied cursor position
+func (r *Registry) clientRequestStreamHead(ctx context.Context, p *Peer, stream ID, from uint64) error {
+ p.logger.Debug("clientRequestStreamHead", "stream", stream, "from", from)
+ return r.clientCreateSendWant(ctx, p, stream, from, nil, true)
+}
+
+// clientRequestStreamRange sends a GetRange message to the server requesting
+// a bound interval of chunks starting from the current stored interval in the
+// interval store and ending at most in the supplied cursor position
+func (r *Registry) clientRequestStreamRange(ctx context.Context, p *Peer, provider StreamProvider, stream ID, cursor uint64) error {
+ p.logger.Debug("clientRequestStreamRange", "stream", stream, "cursor", cursor)
+
+ // get the next interval from the intervals store
+ from, _, empty, err := p.nextInterval(stream, 0)
+ if err != nil {
+ return err
+ }
+
+ // nothing to do - the next interval is bigger than the cursor or theinterval is empty
+ if from > cursor || empty {
+ p.logger.Debug("peer.requestStreamRange stream finished", "stream", stream, "cursor", cursor)
+ return nil
+ }
+ return r.clientCreateSendWant(ctx, p, stream, from, &cursor, false)
+}
+
+func (r *Registry) clientCreateSendWant(ctx context.Context, p *Peer, stream ID, from uint64, to *uint64, head bool) error {
+ g := GetRange{
+ Ruid: uint(rand.Uint32()),
+ Stream: stream,
+ From: from,
+ To: to,
+ BatchSize: BatchSize,
+ }
+
+ p.mtx.Lock()
+ p.openWants[g.Ruid] = &want{
+ ruid: g.Ruid,
+ stream: g.Stream,
+ from: g.From,
+ to: to,
+ head: head,
+ hashes: make(map[string]struct{}),
+ chunks: make(chan chunk.Address),
+ closeC: make(chan error),
+
+ requested: time.Now(),
+ }
+ p.mtx.Unlock()
+
+ return p.Send(ctx, g)
+}
+
+// serverHandleGetRange is handled by the server and sends in response an OfferedHashes message
+// in the case that for the specific interval no chunks exist - the server sends an empty OfferedHashes
+// message so that the client could seal the interval and request the next
+func (r *Registry) serverHandleGetRange(ctx context.Context, p *Peer, msg *GetRange, provider StreamProvider) {
+ p.logger.Debug("serverHandleGetRange", "ruid", msg.Ruid, "head?", msg.To == nil)
+ start := time.Now()
+ defer func(start time.Time) {
+ if msg.To == nil {
+ metrics.GetOrRegisterResettingTimer("network.stream.handle_get_range_head.total-time", nil).UpdateSince(start)
+ } else {
+ metrics.GetOrRegisterResettingTimer("network.stream.handle_get_range.total-time", nil).UpdateSince(start)
+ }
+ }(start)
+
+ key, err := provider.ParseKey(msg.Stream.Key)
+ if err != nil {
+ p.logger.Error("erroring parsing stream key", "stream", msg.Stream, "err", err)
+ p.Drop()
+ return
+ }
+
+ // get hashes from the data source for this batch. to is 0 to denote we want whatever comes out of SubscribePull
+ to := uint64(0)
+ if msg.To != nil {
+ to = *msg.To
+ }
+ h, _, t, e, err := r.serverCollectBatch(ctx, p, provider, key, msg.From, to)
+ if err != nil {
+ p.logger.Error("erroring getting live batch for stream", "stream", msg.Stream, "err", err)
+ p.Drop()
+ return
+ }
+
+ if e {
+ // prevent sending an empty batch that resulted from db shutdown or peer quit
+ select {
+ case <-r.quit:
+ return
+ case <-p.quit:
+ return
+ default:
+ // if the batch is empty resulting from a request for the tip
+ // the lastIdx is msg.From
+ // if the range was defined - then it equals to the top of the requested range - msg.To
+ lastIdx := msg.From
+ if msg.To != nil {
+ lastIdx = *msg.To
+ }
+ offered := OfferedHashes{
+ Ruid: msg.Ruid,
+ LastIndex: lastIdx,
+ Hashes: []byte{},
+ }
+ if err := p.Send(ctx, offered); err != nil {
+ p.logger.Error("erroring sending empty live offered hashes", "ruid", msg.Ruid, "err", err)
+ }
+ return
+ }
+ }
+
+ // store the offer for the peer
+ p.mtx.Lock()
+ p.openOffers[msg.Ruid] = offer{
+ ruid: msg.Ruid,
+ stream: msg.Stream,
+ hashes: h,
+ requested: time.Now(),
+ }
+ p.mtx.Unlock()
+
+ offered := OfferedHashes{
+ Ruid: msg.Ruid,
+ LastIndex: t,
+ Hashes: h,
+ }
+ l := len(h) / HashSize
+ if msg.To == nil {
+ headBatchSizeGauge.Update(int64(l))
+ } else {
+ batchSizeGauge.Update(int64(l))
+ }
+ if err := p.Send(ctx, offered); err != nil {
+ p.logger.Error("erroring sending offered hashes", "ruid", msg.Ruid, "err", err)
+ p.mtx.Lock()
+ delete(p.openOffers, msg.Ruid)
+ p.mtx.Unlock()
+ p.Drop()
+ }
+}
+
+// clientHandleOfferedHashes handles the OfferedHashes wire protocol message (Peer is the server)
+func (r *Registry) clientHandleOfferedHashes(ctx context.Context, p *Peer, msg *OfferedHashes, w *want, provider StreamProvider) {
+ p.logger.Debug("clientHandleOfferedHashes", "ruid", msg.Ruid, "msg.lastIndex", msg.LastIndex)
+ start := time.Now()
+ defer func(start time.Time) {
+ metrics.GetOrRegisterResettingTimer("network.stream.handle_offered_hashes.total-time", nil).UpdateSince(start)
+ }(start)
+
+ var (
+ lenHashes = len(msg.Hashes)
+ ctr uint64 = 0 // the number of chunks wanted out of the batch
+ addresses = make([]chunk.Address, lenHashes/HashSize) // the address slice for MultiHas
+ wantedHashesMsg = WantedHashes{Ruid: msg.Ruid} // the message to send back to the server
+ errc <-chan error // channel to signal end of batch
+ )
+
+ if lenHashes%HashSize != 0 {
+ p.logger.Error("invalid hashes length", "len", lenHashes, "ruid", msg.Ruid)
+ p.Drop()
+ return
+ }
+
+ w.to = &msg.LastIndex // we can set the open wants upper bound to the index supplied in the msg
+
+ // this code block handles the case of a complete gap on the interval on the server side
+ // lenhashes == 0 means there's no hashes in the requested range with the upper bound of
+ // the LastIndex on the incoming message. we should seal the interval and request the subsequent
+ if lenHashes == 0 {
+ if err := p.sealWant(w); err != nil {
+ p.logger.Error("error persisting interval", "from", w.from, "to", w.to, "err", err)
+ p.Drop()
+ return
+ }
+ r.requestSubsequentRange(ctx, p, provider, w, msg.LastIndex)
+ return
+ }
+
+ want, err := bv.New(lenHashes / HashSize)
+ if err != nil {
+ p.logger.Error("error initiaising bitvector", "len", lenHashes/HashSize, "ruid", msg.Ruid, "err", err)
+ p.Drop()
+ return
+ }
+
+ for i := 0; i < lenHashes; i += HashSize {
+ hash := msg.Hashes[i : i+HashSize]
+ addresses[i/HashSize] = hash
+ }
+
+ startNeed := time.Now()
+
+ // check which hashes we want
+ if wants, err := provider.NeedData(ctx, addresses...); err == nil {
+ for i, wantChunk := range wants {
+ if wantChunk {
+ ctr++ // increment number of wanted chunks
+ want.Set(i) // set the bitvector
+ w.hashes[addresses[i].Hex()] = struct{}{} // set unsolicited chunks guard
+ }
+ }
+ } else {
+ p.logger.Error("multi need data returned an error, dropping peer", "err", err)
+ p.Drop()
+ return
+ }
+
+ providerNeedDataTimer.UpdateSince(startNeed)
+
+ // set the number of remaining chunks to ctr
+ atomic.AddUint64(&w.remaining, ctr)
+
+ // this handles the case that there are no hashes we are interested in
+ // we then seal the current interval and request the next batch
+ if ctr == 0 {
+ streamEmptyWantedHashes.Inc(1)
+ wantedHashesMsg.BitVector = []byte{} // set the bitvector value to an empty slice, this is to signal the server we dont want any hashes
+ if err := p.sealWant(w); err != nil {
+ p.logger.Error("error persisting interval", "from", w.from, "to", *w.to, "err", err)
+ p.Drop()
+ return
+ }
+ } else {
+ // we want some hashes
+ streamWantedHashes.Inc(1)
+ wantedHashesMsg.BitVector = want.Bytes() // set to bitvector
+
+ errc = r.clientSealBatch(ctx, p, provider, w) // poll for the completion of the batch in a separate goroutine
+ }
+
+ if err := p.Send(ctx, wantedHashesMsg); err != nil {
+ p.logger.Error("error sending wanted hashes", "err", err)
+ p.Drop()
+ return
+ }
+ if ctr == 0 {
+ // request the next range in case no chunks wanted
+ r.requestSubsequentRange(ctx, p, provider, w, msg.LastIndex)
+ return
+ }
+ select {
+ case err := <-errc:
+ if err != nil {
+ streamBatchFail.Inc(1)
+ p.logger.Error("got an error while sealing batch", "from", w.from, "to", w.to, "err", err)
+ p.Drop()
+ return
+ }
+
+ // seal the interval
+ if err := p.sealWant(w); err != nil {
+ p.logger.Error("error persisting interval", "from", w.from, "to", w.to, "err", err)
+ p.Drop()
+ return
+ }
+ case <-time.After(activeBatchTimeout):
+ p.logger.Error("batch has timed out", "ruid", w.ruid)
+ close(w.closeC) // signal the polling goroutine to terminate
+ p.mtx.Lock()
+ delete(p.openWants, msg.Ruid)
+ p.mtx.Unlock()
+ p.Drop()
+ return
+ case <-r.quit:
+ return
+ case <-p.quit:
+ return
+ }
+ r.requestSubsequentRange(ctx, p, provider, w, msg.LastIndex)
+}
+
+// serverHandleWantedHashes is handled on the server side (Peer is the client) and is dependent on a preceding OfferedHashes message
+// the method is to ensure that all chunks in the requested batch is sent to the client
+func (r *Registry) serverHandleWantedHashes(ctx context.Context, p *Peer, msg *WantedHashes, o offer, provider StreamProvider) {
+ p.logger.Debug("serverHandleWantedHashes", "ruid", msg.Ruid)
+ start := time.Now()
+ defer func(start time.Time) {
+ metrics.GetOrRegisterResettingTimer("network.stream.handle_wanted_hashes.total-time", nil).UpdateSince(start)
+ }(start)
+
+ defer func() {
+ p.mtx.Lock()
+ delete(p.openOffers, msg.Ruid)
+ p.mtx.Unlock()
+ }()
+
+ var (
+ l = len(o.hashes) / HashSize
+ cd = &ChunkDelivery{Ruid: msg.Ruid}
+ wantHashes = []chunk.Address{}
+ allHashes = make([]chunk.Address, l)
+ )
+
+ if len(msg.BitVector) == 0 {
+ p.logger.Debug("peer does not want any hashes in this range", "ruid", o.ruid)
+ for i := 0; i < l; i++ {
+ allHashes = append(allHashes, o.hashes[i*HashSize:(i+1)*HashSize])
+ }
+ // set all chunks as synced
+ if err := provider.Set(ctx, allHashes...); err != nil {
+ p.logger.Error("error setting chunk as synced", "addrs", allHashes, "err", err)
+ p.Drop()
+ return
+ }
+ return
+ }
+ want, err := bv.NewFromBytes(msg.BitVector, l)
+ if err != nil {
+ p.logger.Error("error initiaising bitvector", "l", l, "ll", len(o.hashes), "err", err)
+ p.Drop()
+ return
+ }
+
+ maxFrame := MinFrameSize
+ if v := BatchSize / 4; v > maxFrame {
+ maxFrame = v
+ }
+
+ // check which hashes to get from the localstore
+ for i := 0; i < l; i++ {
+ hash := o.hashes[i*HashSize : (i+1)*HashSize]
+ if want.Get(i) {
+ metrics.GetOrRegisterCounter("network.stream.handle_wanted.want_get", nil).Inc(1)
+ wantHashes = append(wantHashes, hash)
+ }
+ allHashes[i] = hash
+ }
+ startGet := time.Now()
+
+ // get the chunks from the provider
+ chunks, err := provider.Get(ctx, wantHashes...)
+ if err != nil {
+ p.logger.Error("handleWantedHashesMsg", "err", err)
+ p.Drop()
+ return
+ }
+
+ providerGetTimer.UpdateSince(startGet) // measure how long we spend on getting the chunks
+
+ // append the chunks to the chunk delivery message. when reaching maxFrameSize send the current batch
+ for _, v := range chunks {
+ chunkD := DeliveredChunk{
+ Addr: v.Address(),
+ Data: v.Data(),
+ }
+ cd.Chunks = append(cd.Chunks, chunkD)
+
+ if len(cd.Chunks) == maxFrame {
+ // prevent sending batch on shutdown or peer dropout
+ select {
+ case <-p.quit:
+ return
+ case <-r.quit:
+ return
+ default:
+ }
+
+ //send the batch and reset chunk delivery message
+ if err := p.Send(ctx, cd); err != nil {
+ p.logger.Error("error sending chunk delivery frame", "ruid", msg.Ruid, "error", err)
+ p.Drop()
+ return
+ }
+ cd = &ChunkDelivery{
+ Ruid: msg.Ruid,
+ }
+ }
+ }
+
+ // send anything that we might have left in the batch
+ if len(cd.Chunks) > 0 {
+ if err := p.Send(ctx, cd); err != nil {
+ p.logger.Error("error sending chunk delivery frame", "ruid", msg.Ruid, "error", err)
+ p.Drop()
+ }
+ }
+
+ startSet := time.Now()
+
+ // set the chunks as synced
+ err = provider.Set(ctx, allHashes...)
+ if err != nil {
+ p.logger.Error("error setting chunk as synced", "addrs", allHashes, "err", err)
+ p.Drop()
+ return
+ }
+ providerSetTimer.UpdateSince(startSet)
+}
+
+// clientHandleChunkDelivery handles chunk delivery messages
+func (r *Registry) clientHandleChunkDelivery(ctx context.Context, p *Peer, msg *ChunkDelivery, w *want, provider StreamProvider) {
+ p.logger.Debug("clientHandleChunkDelivery", "ruid", msg.Ruid)
+ processReceivedChunksMsgCount.Inc(1)
+ r.setLastReceivedChunkTime() // needed for IsPullSyncing
+
+ defer func(start time.Time) {
+ metrics.GetOrRegisterResettingTimer("network.stream.handle_chunk_delivery.total-time", nil).UpdateSince(start)
+ }(time.Now())
+
+ chunks := make([]chunk.Chunk, len(msg.Chunks))
+ for i, dc := range msg.Chunks {
+ chunks[i] = chunk.NewChunk(dc.Addr, dc.Data)
+ }
+
+ startPut := time.Now()
+
+ // put the chunks to the local store
+ seen, err := provider.Put(ctx, chunks...)
+ if err != nil {
+ if err == storage.ErrChunkInvalid {
+ streamChunkDeliveryFail.Inc(1)
+ p.Drop()
+ return
+ }
+ p.logger.Error("clientHandleChunkDelivery error putting chunk", "err", err)
+ return
+ }
+
+ providerPutTimer.UpdateSince(startPut)
+
+ // increment seen chunk delivery metric. duplicate delivery is possible when the same chunk is asked from multiple peers, we currently do not limit this
+ for _, v := range seen {
+ if v {
+ streamSeenChunkDelivery.Inc(1)
+ }
+ }
+
+ for _, dc := range chunks {
+ select {
+ case w.chunks <- dc.Address():
+ // send the chunk address to the goroutine polling end of batch (clientSealBatch)
+ case <-w.closeC:
+ // batch timeout
+ return
+ case <-r.quit:
+ // shutdown
+ return
+ case <-p.quit:
+ // peer quit
+ return
+ }
+ }
+}
+
+// clientSealBatch seals a given batch (want). it launches a separate goroutine that check every chunk being delivered on the given ruid
+// if an unsolicited chunk is received it drops the peer
+func (r *Registry) clientSealBatch(ctx context.Context, p *Peer, provider StreamProvider, w *want) <-chan error {
+ p.logger.Debug("clientSealBatch", "stream", w.stream, "ruid", w.ruid, "from", w.from, "to", *w.to)
+ errc := make(chan error)
+ go func() {
+ start := time.Now()
+ defer func(start time.Time) {
+ metrics.GetOrRegisterResettingTimer("network.stream.client_seal_batch.total-time", nil).UpdateSince(start)
+ }(start)
+ for {
+ select {
+ case c, ok := <-w.chunks:
+ if !ok {
+ return
+ }
+ processReceivedChunksCount.Inc(1)
+ p.mtx.Lock()
+ if _, ok := w.hashes[c.Hex()]; !ok {
+ p.logger.Error("got an unsolicited chunk from peer!", "peer", p.ID(), "caddr", c)
+ streamChunkDeliveryFail.Inc(1)
+ p.Drop()
+ p.mtx.Unlock()
+ return
+ }
+ delete(w.hashes, c.Hex())
+ p.mtx.Unlock()
+ v := atomic.AddUint64(&w.remaining, ^uint64(0))
+ if v == 0 {
+ p.logger.Trace("done receiving chunks for open want", "ruid", w.ruid)
+ close(errc)
+ return
+ }
+ case <-p.quit:
+ // peer quit
+ return
+ case <-w.closeC:
+ // batch timeout was signalled
+ return
+ case <-r.quit:
+ // shutdown
+ return
+ }
+ }
+ }()
+ return errc
+}
+
+// serverCollectBatch collects a batch of hashes in response for a GetRange message
+// it will block until at least one hash is received from the provider
+func (r *Registry) serverCollectBatch(ctx context.Context, p *Peer, provider StreamProvider, key interface{}, from, to uint64) (hashes []byte, f, t uint64, empty bool, err error) {
+ p.logger.Debug("serverCollectBatch", "from", from, "to", to)
+
+ const batchTimeout = 1 * time.Second
+
+ var (
+ batch []byte
+ batchSize int
+ batchStartID *uint64
+ batchEndID uint64
+ timer *time.Timer
+ timerC <-chan time.Time
+ )
+
+ defer func(start time.Time) {
+ if to == 0 {
+ collectBatchLiveTimer.UpdateSince(start)
+ } else {
+ collectBatchHistoryTimer.UpdateSince(start)
+ }
+ if timer != nil {
+ timer.Stop()
+ }
+ }(time.Now())
+
+ descriptors, stop := provider.Subscribe(ctx, key, from, to)
+ defer stop()
+
+ for iterate := true; iterate; {
+ select {
+ case d, ok := <-descriptors:
+ if !ok {
+ iterate = false
+ break
+ }
+ batch = append(batch, d.Address[:]...)
+ batchSize++
+ if batchStartID == nil {
+ // set batch start id only if
+ // this is the first iteration
+ batchStartID = &d.BinID
+ }
+ batchEndID = d.BinID
+ if batchSize >= BatchSize {
+ iterate = false
+ metrics.GetOrRegisterCounter("network.stream.server_collect_batch.full-batch", nil).Inc(1)
+ }
+ if timer == nil {
+ timer = time.NewTimer(batchTimeout)
+ } else {
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timer.Reset(batchTimeout)
+ }
+ timerC = timer.C
+ case <-timerC:
+ // return batch if new chunks are not received after some time
+ iterate = false
+ metrics.GetOrRegisterCounter("network.stream.server_collect_batch.timer-expire", nil).Inc(1)
+ case <-p.quit:
+ iterate = false
+ case <-r.quit:
+ iterate = false
+ }
+ }
+ if batchStartID == nil {
+ // if batch start id is not set, it means we timed out
+ return nil, 0, 0, true, nil
+ }
+ return batch, *batchStartID, batchEndID, false, nil
+}
+
+// requestSubsequentRange checks the cursor for the current stream, and in case needed - requests the next range
+func (r *Registry) requestSubsequentRange(ctx context.Context, p *Peer, provider StreamProvider, w *want, lastIndex uint64) {
+ cur, ok := p.getCursor(w.stream)
+ if !ok {
+ metrics.GetOrRegisterCounter("network.stream.quit_unwanted", nil).Inc(1)
+ p.logger.Debug("no longer interested in stream. quitting", "stream", w.stream)
+ p.mtx.Lock()
+ delete(p.openWants, w.ruid)
+ p.mtx.Unlock()
+ return
+ }
+ if w.head {
+ if err := r.clientRequestStreamHead(ctx, p, w.stream, lastIndex+1); err != nil {
+ streamRequestNextIntervalFail.Inc(1)
+ p.logger.Error("error requesting next interval from peer", "err", err)
+ p.Drop()
+ return
+ }
+ } else {
+ if err := r.clientRequestStreamRange(ctx, p, provider, w.stream, cur); err != nil {
+ streamRequestNextIntervalFail.Inc(1)
+ p.logger.Error("error requesting next interval from peer", "err", err)
+ p.Drop()
+ return
+ }
+ }
+}
+
+func (r *Registry) getProvider(stream ID) StreamProvider {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ return r.providers[stream.Name]
+}
+
+func (r *Registry) getPeer(id enode.ID) *Peer {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ p := r.peers[id]
+ return p
+}
+
+func (r *Registry) addPeer(p *Peer) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ r.peers[p.ID()] = p
+
+ streamPeersCount.Update(int64(len(r.peers)))
+}
+
+func (r *Registry) removePeer(p *Peer) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ if _, found := r.peers[p.ID()]; found {
+ p.logger.Error("removing peer")
+ delete(r.peers, p.ID())
+ close(p.quit)
+ }
+ streamPeersCount.Update(int64(len(r.peers)))
+}
+
+// PeerInfo holds information about the peer and it's peers.
+type PeerInfo struct {
+ Base string `json:"base"` // our node's base address
+ Kademlia string `json:"kademlia"`
+ Peers []PeerState `json:"peers"`
+ Cursors map[string]map[string]uint64 `json:"cursors"`
+ Intervals map[string]string `json:"intervals"`
+}
+
+// PeerState holds information about a connected peer.
+type PeerState struct {
+ Peer string `json:"peer"` // the peer address
+ Cursors map[string]uint64 `json:"cursors"`
+}
+
+// PeerInfo returns a response in which the queried node's
+// peer cursors and intervals are returned
+func (r *Registry) PeerInfo() (*PeerInfo, error) {
+ info := &PeerInfo{
+ Base: hex.EncodeToString(r.baseKey)[:16],
+ Cursors: make(map[string]map[string]uint64),
+ }
+ for name, p := range r.providers {
+ info.Cursors[name] = make(map[string]uint64)
+ if name != syncStreamName {
+ // support only sync provider, for now
+ continue
+ }
+ if sp, ok := p.(*syncProvider); ok {
+ info.Kademlia = sp.kad.String()
+ }
+ for i := uint8(0); i <= chunk.MaxPO; i++ {
+ key, err := p.EncodeKey(i)
+ if err != nil {
+ return nil, err
+ }
+ cursor, err := p.Cursor(key)
+ if err != nil {
+ return nil, err
+ }
+ info.Cursors[name][key] = cursor
+ }
+ }
+ info.Intervals = make(map[string]string)
+ if err := r.intervalsStore.Iterate("", func(key, value []byte) (stop bool, err error) {
+ i := new(intervals.Intervals)
+ if err := i.UnmarshalBinary(value); err != nil {
+ return true, err
+ }
+ info.Intervals[string(key)] = i.String()
+ return false, nil
+ }); err != nil {
+ return nil, err
+ }
+ for _, p := range r.peers {
+ info.Peers = append(info.Peers, PeerState{
+ Peer: hex.EncodeToString(p.OAddr)[:16],
+ Cursors: p.getCursorsCopy(),
+ })
+ }
+ return info, nil
+}
+
+// LastReceivedChunkTime returns the time when the last chunk
+// was received by syncing. This method is used in api.Inspector
+// to detect when the syncing is complete.
+func (r *Registry) LastReceivedChunkTime() time.Time {
+ r.lastReceivedChunkTimeMu.RLock()
+ defer r.lastReceivedChunkTimeMu.RUnlock()
+ return r.lastReceivedChunkTime
+}
+
+func (r *Registry) setLastReceivedChunkTime() {
+ r.lastReceivedChunkTimeMu.Lock()
+ r.lastReceivedChunkTime = time.Now()
+ r.lastReceivedChunkTimeMu.Unlock()
+}
+
+func (r *Registry) Protocols() []p2p.Protocol {
+ return []p2p.Protocol{
+ {
+ Name: "bzz-stream",
+ Version: 1,
+ Length: 10 * 1024 * 1024,
+ Run: r.runProtocol,
+ },
+ }
+}
+
+func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error {
+ peer := protocols.NewPeer(p, rw, r.spec)
+ bp := network.NewBzzPeer(peer)
+ return r.Run(bp)
+}
+
+func (r *Registry) APIs() []rpc.API {
+ return nil
+}
+
+func (r *Registry) Start(server *p2p.Server) error {
+ r.logger.Debug("stream registry starting")
+
+ return nil
+}
+
+func (r *Registry) Stop() error {
+ log.Debug("stream registry stopping")
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ close(r.quit)
+ // wait for all handlers to finish
+ done := make(chan struct{})
+ go func() {
+ r.handlersWg.Wait()
+ close(done)
+ }()
+ select {
+ case <-done:
+ case <-time.After(5 * time.Second):
+ log.Error("stream closed with still active handlers")
+ }
+
+ for _, v := range r.providers {
+ v.Close()
+ }
+
+ return nil
+}
diff --git a/network/stream/v2/sync_provider.go b/network/stream/v2/sync_provider.go
new file mode 100644
index 0000000000..791c5b9806
--- /dev/null
+++ b/network/stream/v2/sync_provider.go
@@ -0,0 +1,469 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package stream
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethersphere/swarm/chunk"
+ "github.com/ethersphere/swarm/network"
+ "github.com/ethersphere/swarm/network/timeouts"
+ "github.com/ethersphere/swarm/storage"
+ lru "github.com/hashicorp/golang-lru"
+)
+
+const syncStreamName = "SYNC"
+const cacheCapacity = 10000
+
+type syncProvider struct {
+ netStore *storage.NetStore // netstore
+ kad *network.Kademlia // kademlia
+ name string // name of the stream we are responsible for
+ syncBinsOnlyWithinDepth bool // true means streams are established only within depth, false means outside of depth too
+ autostart bool // start fetching streams automatically when cursors arrive from peer
+ quit chan struct{} // shutdown
+ cacheMtx sync.RWMutex // synchronization primitive to protect cache
+ cache *lru.Cache // cache to minimize load on netstore
+ logger log.Logger // logger that appends the base address to loglines
+}
+
+// NewSyncProvider creates a new sync provider that is used by the stream protocol to sink data and control its behaviour
+// syncOnlyWithinDepth toggles stream establishment in reference to kademlia. When true - streams are
+// established only within depth ( >=depth ). This is needed for Push Sync. When set to false, the streams are
+// established on all bins as they did traditionally with Pull Sync.
+func NewSyncProvider(ns *storage.NetStore, kad *network.Kademlia, autostart bool, syncOnlyWithinDepth bool) StreamProvider {
+ c, err := lru.New(cacheCapacity)
+ if err != nil {
+ panic(err)
+ }
+
+ return &syncProvider{
+ netStore: ns,
+ kad: kad,
+ syncBinsOnlyWithinDepth: syncOnlyWithinDepth,
+ autostart: autostart,
+ name: syncStreamName,
+ quit: make(chan struct{}),
+ cache: c,
+ logger: log.New("base", hex.EncodeToString(kad.BaseAddr()[:16])),
+ }
+}
+
+// NeedData checks if we need to retrieve the supplied addrs from the upstream peer
+func (s *syncProvider) NeedData(ctx context.Context, addrs ...chunk.Address) ([]bool, error) {
+ var (
+ start = time.Now()
+ wants = make([]bool, len(addrs)) // which addresses we want
+ check = make([]chunk.Address, 0) // which addresses to check in localstore
+ indexes = make([]int, 0)
+ )
+
+ // don't check if we're shutting down
+ select {
+ case <-s.quit:
+ return wants, nil
+ default:
+ }
+
+ // if the cache contains the chunk key - it is most probable to exist in the localstore
+ // therefore we do not want the chunk
+ // when the chunk is not in the cache - we check the localstore, and if it does not
+ // exist there - we ask for it
+ s.cacheMtx.RLock()
+ for i, addr := range addrs {
+ if !s.cache.Contains(addr.Hex()) {
+ // chunk is not in the cache - check in the localstore and if its not there - we want it
+ check = append(check, addr)
+ indexes = append(indexes, i)
+ metrics.GetOrRegisterCounter("network.stream.sync_provider.multi_need_data.cachemiss", nil).Inc(1)
+ } else {
+ // chunk is in the cache - we don't want it
+ wants[i] = false
+ metrics.GetOrRegisterCounter("network.stream.sync_provider.multi_need_data.cachehit", nil).Inc(1)
+ }
+ }
+ s.cacheMtx.RUnlock()
+
+ // check localstore for the remaining chunks
+ has, err := s.netStore.Store.HasMulti(ctx, check...)
+ if err != nil {
+ return nil, err
+ }
+
+ // inspect results
+ for i, have := range has {
+ if !have {
+ wants[indexes[i]] = true // if we dont have it - we want it
+ fi, _, ok := s.netStore.GetOrCreateFetcher(ctx, check[i], "syncer")
+ if !ok {
+ continue
+ }
+
+ go func() {
+ select {
+ case <-fi.Delivered:
+ metrics.GetOrRegisterResettingTimer(fmt.Sprintf("fetcher.%s.syncer", fi.CreatedBy), nil).UpdateSince(start)
+ case <-time.After(timeouts.SyncerClientWaitTimeout):
+ metrics.GetOrRegisterCounter("fetcher.syncer.timeout", nil).Inc(1)
+ }
+ }()
+ } else {
+ // if we have it - we dont want it
+ wants[indexes[i]] = false
+ }
+ }
+ return wants, nil
+}
+
+// Get the supplied addresses for delivery
+func (s *syncProvider) Get(ctx context.Context, addr ...chunk.Address) ([]chunk.Chunk, error) {
+ var (
+ start = time.Now() // start time
+ retChunks = make([]chunk.Chunk, len(addr)) //the chunks we want to Get
+ lsChunks = make([]chunk.Address, 0) // the chunks that we need to Get from localstore
+ indices = make([]int, 0) // backreferences to glue retChunks and lsChunks together
+ )
+
+ defer func(start time.Time) {
+ metrics.GetOrRegisterResettingTimer("network.stream.sync_provider.get.total-time", nil).UpdateSince(start)
+ }(start)
+
+ s.cacheMtx.RLock()
+ // iterate over the array - if it is in the cache - pull it out
+ // if not - save in a slice and fallback later to localstore in one go
+ for i, a := range addr {
+ if v, ok := s.cache.Get(a.Hex()); ok {
+ retChunks[i] = chunk.NewChunk(a, v.([]byte))
+ metrics.GetOrRegisterCounter("network.stream.sync_provider.get.cachehit", nil).Inc(1)
+ } else {
+ lsChunks = append(lsChunks, a)
+ indices = append(indices, i)
+ metrics.GetOrRegisterCounter("network.stream.sync_provider.get.cachemiss", nil).Inc(1)
+ }
+ }
+ s.cacheMtx.RUnlock()
+
+ // get the rest from localstore
+ chunks, err := s.netStore.GetMulti(ctx, chunk.ModeGetSync, lsChunks...)
+ if err != nil {
+ return nil, err
+ }
+ s.cacheMtx.Lock()
+ defer s.cacheMtx.Unlock()
+
+ // merge the results together
+ for i, ch := range chunks {
+ ch := ch
+ s.cache.Add(ch.Address().Hex(), ch.Data())
+ retChunks[indices[i]] = ch
+ }
+ return retChunks, nil
+}
+
+// Set the supplied addrs as synced in order to allow for garbage collection
+func (s *syncProvider) Set(ctx context.Context, addrs ...chunk.Address) error {
+ err := s.netStore.Set(ctx, chunk.ModeSetSync, addrs...)
+ if err != nil {
+ metrics.GetOrRegisterCounter("syncProvider.set-sync-err", nil).Inc(1)
+ return err
+ }
+ return nil
+}
+
+// Put the given chunks to the local storage
+func (s *syncProvider) Put(ctx context.Context, ch ...chunk.Chunk) (exists []bool, err error) {
+ seen, err := s.netStore.Put(ctx, chunk.ModePutSync, ch...)
+ for i, v := range seen {
+ if v {
+ if putSeenTestHook != nil {
+ // call the test function if it is set
+ putSeenTestHook(ch[i].Address(), s.netStore.LocalID)
+ }
+ }
+ }
+ go func(chunks ...chunk.Chunk) {
+ s.cacheMtx.Lock()
+ defer s.cacheMtx.Unlock()
+ for _, c := range chunks {
+ s.cache.Add(c.Address().Hex(), c.Data())
+ }
+ }(ch...)
+ return seen, err
+}
+
+// Function used only in tests to detect chunks that are synced
+// multiple times within the same stream. This function pointer must be
+// nil in production.
+var putSeenTestHook func(addr chunk.Address, id enode.ID)
+
+// Subscribe wraps SubscribePull to retrieve chunks within a certain interval
+func (s *syncProvider) Subscribe(ctx context.Context, key interface{}, from, to uint64) (<-chan chunk.Descriptor, func()) {
+ // convert the key to the actual value and call SubscribePull
+ bin := key.(uint8)
+ log.Debug("syncProvider.Subscribe", "bin", bin, "from", from, "to", to)
+
+ return s.netStore.SubscribePull(ctx, bin, from, to)
+}
+
+// Cursor gets the cursor from the localstore for a given stream key
+func (s *syncProvider) Cursor(k string) (cursor uint64, err error) {
+ key, err := s.ParseKey(k)
+ if err != nil {
+ // error parsing the stream key,
+ log.Error("error parsing the stream key", "key", k)
+ return 0, err
+ }
+
+ bin, ok := key.(uint8)
+ if !ok {
+ return 0, errors.New("could not unmarshal key to uint8")
+ }
+ return s.netStore.LastPullSubscriptionBinID(bin)
+}
+
+// WantStream checks if we are interested in a given stream for a peer
+func (s *syncProvider) WantStream(p *Peer, streamID ID) bool {
+ p.logger.Debug("syncProvider.WantStream", "stream", streamID)
+ po := chunk.Proximity(p.BzzAddr.Over(), s.kad.BaseAddr())
+ depth := s.kad.NeighbourhoodDepth()
+
+ // check all subscriptions that should exist for this peer
+ subBins, _ := syncSubscriptionsDiff(po, -1, depth, s.kad.MaxProxDisplay, s.syncBinsOnlyWithinDepth)
+ v, err := parseSyncKey(streamID.Key)
+ if err != nil {
+ return false
+ }
+ return checkKeyInSlice(int(v), subBins)
+}
+
+var (
+ SyncInitBackoff = 500 * time.Millisecond
+)
+
+// InitPeer creates and maintains the streams per peer.
+// Runs per peer, in a separate goroutine
+// when the depth changes on our node
+// - peer moves from out-of-depth to depth
+// - peer moves from depth to out-of-depth
+// - depth changes, and peer stays in depth, but we need more or less
+// peer connects and disconnects quickly
+func (s *syncProvider) InitPeer(p *Peer) {
+ p.logger.Debug("syncProvider.InitPeer")
+ timer := time.NewTimer(SyncInitBackoff)
+ defer timer.Stop()
+
+ select {
+ case <-timer.C:
+ case <-p.quit:
+ return
+ }
+
+ po := chunk.Proximity(p.BzzAddr.Over(), s.kad.BaseAddr())
+ depth := s.kad.NeighbourhoodDepth()
+
+ p.logger.Debug("update syncing subscriptions: initial", "po", po, "depth", depth)
+
+ subBins, quitBins := syncSubscriptionsDiff(po, -1, depth, s.kad.MaxProxDisplay, s.syncBinsOnlyWithinDepth)
+ s.updateSyncSubscriptions(p, subBins, quitBins)
+
+ depthChangeSignal, unsubscribeDepthChangeSignal := s.kad.SubscribeToNeighbourhoodDepthChange()
+ defer unsubscribeDepthChangeSignal()
+
+ for {
+ select {
+ case _, ok := <-depthChangeSignal:
+ if !ok {
+ return
+ }
+
+ // update subscriptions for this peer when depth changes
+ ndepth := s.kad.NeighbourhoodDepth()
+ subs, quits := syncSubscriptionsDiff(po, depth, ndepth, s.kad.MaxProxDisplay, s.syncBinsOnlyWithinDepth)
+ p.logger.Debug("update syncing subscriptions", "po", po, "depth", depth, "sub", subs, "quit", quits)
+ s.updateSyncSubscriptions(p, subs, quits)
+ depth = ndepth
+ case <-s.quit:
+ return
+ case <-p.quit:
+ return
+ }
+
+ }
+}
+
+// updateSyncSubscriptions accepts two slices of integers, the first one
+// representing proximity order bins for required syncing subscriptions
+// and the second one representing bins for syncing subscriptions that
+// need to be removed.
+func (s *syncProvider) updateSyncSubscriptions(p *Peer, subBins, quitBins []int) {
+ p.logger.Debug("syncProvider.updateSyncSubscriptions", "subBins", subBins, "quitBins", quitBins)
+ if l := len(subBins); l > 0 {
+ streams := make([]ID, l)
+ for i, po := range subBins {
+
+ stream := NewID(s.StreamName(), encodeSyncKey(uint8(po)))
+ _, err := p.getOrCreateInterval(p.peerStreamIntervalKey(stream))
+ if err != nil {
+ p.logger.Error("got an error while trying to register initial streams", "stream", stream)
+ }
+
+ streams[i] = stream
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ if err := p.Send(ctx, StreamInfoReq{Streams: streams}); err != nil {
+ p.logger.Error("error establishing subsequent subscription", "err", err)
+ p.Drop()
+ return
+ }
+ }
+ for _, po := range quitBins {
+ p.logger.Debug("stream unwanted, removing cursor info for peer", "bin", po)
+ p.deleteCursor(NewID(syncStreamName, encodeSyncKey(uint8(po))))
+ }
+}
+
+// syncSubscriptionsDiff calculates to which proximity order bins a peer
+// (with po peerPO) needs to be subscribed after kademlia neighbourhood depth
+// change from prevDepth to newDepth. Max argument limits the number of
+// proximity order bins. Returned values are slices of integers which represent
+// proximity order bins, the first one to which additional subscriptions need to
+// be requested and the second one which subscriptions need to be quit. Argument
+// prevDepth with value less then 0 represents no previous depth, used for
+// initial syncing subscriptions.
+// syncBinsOnlyWithinDepth toggles between having requested streams only within depth(true)
+// or rather with the old stream establishing logic (false)
+func syncSubscriptionsDiff(peerPO, prevDepth, newDepth, max int, syncBinsOnlyWithinDepth bool) (subBins, quitBins []int) {
+ newStart, newEnd := syncBins(peerPO, newDepth, max, syncBinsOnlyWithinDepth)
+ if prevDepth < 0 {
+ if newStart == -1 && newEnd == -1 {
+ return nil, nil
+ }
+ // no previous depth, return the complete range
+ // for subscriptions requests and nothing for quitting
+ return intRange(newStart, newEnd), nil
+ }
+
+ prevStart, prevEnd := syncBins(peerPO, prevDepth, max, syncBinsOnlyWithinDepth)
+ if newStart == -1 && newEnd == -1 {
+ // this means that we should not have any streams on any bins with this peer
+ // get rid of what was established on the previous depth
+ quitBins = append(quitBins, intRange(prevStart, prevEnd)...)
+ return
+ }
+
+ if newStart < prevStart {
+ subBins = append(subBins, intRange(newStart, prevStart)...)
+ }
+
+ if prevStart < newStart {
+ quitBins = append(quitBins, intRange(prevStart, newStart)...)
+ }
+
+ if newEnd < prevEnd {
+ quitBins = append(quitBins, intRange(newEnd, prevEnd)...)
+ }
+
+ if prevEnd < newEnd {
+ subBins = append(subBins, intRange(prevEnd, newEnd)...)
+ }
+
+ return subBins, quitBins
+}
+
+// syncBins returns the range to which proximity order bins syncing
+// subscriptions need to be requested, based on peer proximity and
+// kademlia neighbourhood depth. Returned range is [start,end), inclusive for
+// start and exclusive for end.
+// syncBinsOnlyWithinDepth toggles between having requested streams only within depth(true)
+// or rather with the old stream establishing logic (false)
+func syncBins(peerPO, depth, max int, syncBinsOnlyWithinDepth bool) (start, end int) {
+ if syncBinsOnlyWithinDepth && peerPO < depth {
+ // we don't want to request anything from peers outside depth
+ return -1, -1
+ }
+ if peerPO < depth {
+ // subscribe only to peerPO bin if it is not
+ // in the nearest neighbourhood
+ return peerPO, peerPO + 1
+ }
+ // subscribe from depth to max bin if the peer
+ // is in the nearest neighbourhood
+ return depth, max + 1
+}
+
+// intRange returns the slice of integers [start,end). The start
+// is inclusive and the end is not.
+func intRange(start, end int) (r []int) {
+ for i := start; i < end; i++ {
+ r = append(r, i)
+ }
+ return r
+}
+
+func checkKeyInSlice(k int, slice []int) (found bool) {
+ for _, v := range slice {
+ if v == k {
+ found = true
+ }
+ }
+ return
+}
+
+func (s *syncProvider) ParseKey(streamKey string) (interface{}, error) {
+ return parseSyncKey(streamKey)
+}
+
+func (s *syncProvider) EncodeKey(i interface{}) (string, error) {
+ v, ok := i.(uint8)
+ if !ok {
+ return "", errors.New("error encoding key")
+ }
+ return encodeSyncKey(v), nil
+}
+
+func (s *syncProvider) StreamName() string { return s.name }
+
+func (s *syncProvider) Boundedness() bool { return false }
+
+func (s *syncProvider) Autostart() bool { return s.autostart }
+
+func (s *syncProvider) Close() { close(s.quit) }
+
+func parseSyncKey(streamKey string) (uint8, error) {
+ b, err := strconv.ParseUint(streamKey, 36, 8)
+ if err != nil {
+ return 0, err
+ }
+ if b < 0 || b > chunk.MaxPO {
+ return 0, fmt.Errorf("stream key %v out of range", b)
+ }
+ return uint8(b), nil
+}
+
+func encodeSyncKey(i uint8) string {
+ return strconv.FormatUint(uint64(i), 36)
+}
diff --git a/network/stream/v2/sync_provider_test.go b/network/stream/v2/sync_provider_test.go
new file mode 100644
index 0000000000..771b71ea9b
--- /dev/null
+++ b/network/stream/v2/sync_provider_test.go
@@ -0,0 +1,190 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package stream
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/ethersphere/swarm/network"
+)
+
+// TestSyncSubscriptionsDiff validates the output of syncSubscriptionsDiff
+// function for various arguments.
+func TestSyncSubscriptionsDiff(t *testing.T) {
+ max := network.NewKadParams().MaxProxDisplay
+ for _, tc := range []struct {
+ po, prevDepth, newDepth int
+ subBins, quitBins []int
+ syncBinsOnlyWithinDepth bool
+ }{
+ // tests for old syncBins logic that establish streams on all bins (not push-sync adjusted)
+ {
+ po: 0, prevDepth: -1, newDepth: 0,
+ subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 1, prevDepth: -1, newDepth: 0,
+ subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 2, prevDepth: -1, newDepth: 0,
+ subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 0, prevDepth: -1, newDepth: 1,
+ subBins: []int{0},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 1, prevDepth: -1, newDepth: 1,
+ subBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 2, prevDepth: -1, newDepth: 2,
+ subBins: []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 3, prevDepth: -1, newDepth: 2,
+ subBins: []int{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 1, prevDepth: -1, newDepth: 2,
+ subBins: []int{1},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 0, prevDepth: 0, newDepth: 0, // 0-16 -> 0-16
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 1, prevDepth: 0, newDepth: 0, // 0-16 -> 0-16
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 0, prevDepth: 0, newDepth: 1, // 0-16 -> 0
+ quitBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 0, prevDepth: 0, newDepth: 2, // 0-16 -> 0
+ quitBins: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 1, prevDepth: 0, newDepth: 1, // 0-16 -> 1-16
+ quitBins: []int{0},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 1, prevDepth: 1, newDepth: 0, // 1-16 -> 0-16
+ subBins: []int{0},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 4, prevDepth: 0, newDepth: 1, // 0-16 -> 1-16
+ quitBins: []int{0},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 4, prevDepth: 0, newDepth: 4, // 0-16 -> 4-16
+ quitBins: []int{0, 1, 2, 3},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 4, prevDepth: 0, newDepth: 5, // 0-16 -> 4
+ quitBins: []int{0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 4, prevDepth: 5, newDepth: 0, // 4 -> 0-16
+ subBins: []int{0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: false,
+ },
+ {
+ po: 4, prevDepth: 5, newDepth: 6, // 4 -> 4
+ syncBinsOnlyWithinDepth: false,
+ },
+
+ // tests for syncBins logic to establish streams only within depth
+ {
+ po: 0, prevDepth: 5, newDepth: 6,
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 1, prevDepth: 5, newDepth: 6,
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 7, prevDepth: 5, newDepth: 6, // 5-16 -> 6-16
+ quitBins: []int{5},
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 9, prevDepth: 5, newDepth: 6, // 5-16 -> 6-16
+ quitBins: []int{5},
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 9, prevDepth: 0, newDepth: 6, // 0-16 -> 6-16
+ quitBins: []int{0, 1, 2, 3, 4, 5},
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 9, prevDepth: -1, newDepth: 0, // [] -> 0-16
+ subBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 9, prevDepth: -1, newDepth: 7, // [] -> 7-16
+ subBins: []int{7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 9, prevDepth: -1, newDepth: 10, // [] -> []
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 9, prevDepth: 8, newDepth: 10, // 8-16 -> []
+ quitBins: []int{8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 1, prevDepth: 0, newDepth: 0, // [] -> []
+ syncBinsOnlyWithinDepth: true,
+ },
+ {
+ po: 1, prevDepth: 0, newDepth: 8, // 0-16 -> []
+ quitBins: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ syncBinsOnlyWithinDepth: true,
+ },
+ } {
+ subBins, quitBins := syncSubscriptionsDiff(tc.po, tc.prevDepth, tc.newDepth, max, tc.syncBinsOnlyWithinDepth)
+ if fmt.Sprint(subBins) != fmt.Sprint(tc.subBins) {
+ t.Errorf("po: %v, prevDepth: %v, newDepth: %v, syncBinsOnlyWithinDepth: %t: got subBins %v, want %v", tc.po, tc.prevDepth, tc.newDepth, tc.syncBinsOnlyWithinDepth, subBins, tc.subBins)
+ }
+ if fmt.Sprint(quitBins) != fmt.Sprint(tc.quitBins) {
+ t.Errorf("po: %v, prevDepth: %v, newDepth: %v, syncBinsOnlyWithinDepth: %t: got quitBins %v, want %v", tc.po, tc.prevDepth, tc.newDepth, tc.syncBinsOnlyWithinDepth, quitBins, tc.quitBins)
+ }
+ }
+}
diff --git a/network/stream/v2/syncing_test.go b/network/stream/v2/syncing_test.go
new file mode 100644
index 0000000000..2cac8310b9
--- /dev/null
+++ b/network/stream/v2/syncing_test.go
@@ -0,0 +1,823 @@
+// Copyright 2019 The Swarm Authors
+// This file is part of the Swarm library.
+//
+// The Swarm library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The Swarm library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the Swarm library. If not, see .
+
+package stream
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethersphere/swarm/chunk"
+ "github.com/ethersphere/swarm/log"
+ "github.com/ethersphere/swarm/network"
+ "github.com/ethersphere/swarm/network/simulation"
+ "github.com/ethersphere/swarm/p2p/protocols"
+ "github.com/ethersphere/swarm/pot"
+ "github.com/ethersphere/swarm/storage"
+ "github.com/ethersphere/swarm/storage/localstore"
+ "github.com/ethersphere/swarm/testutil"
+)
+
+var timeout = 90 * time.Second
+
+// TestTwoNodesSyncWithGaps tests that syncing works with gaps in the localstore intervals
+func TestTwoNodesSyncWithGaps(t *testing.T) {
+ // construct a pauser before simulation is started and reset it to nil after all streams are closed
+ // to avoid the need for protecting handleMsgPauser with a lock in production code.
+ handleMsgPauser = new(syncPauser)
+ defer func() { handleMsgPauser = nil }()
+
+ removeChunks := func(t *testing.T, ctx context.Context, store chunk.Store, gaps [][2]uint64, chunks []chunk.Address) (removedCount uint64) {
+ t.Helper()
+
+ for _, gap := range gaps {
+ for i := gap[0]; i < gap[1]; i++ {
+ c := chunks[i]
+ if err := store.Set(ctx, chunk.ModeSetRemove, c); err != nil {
+ t.Fatal(err)
+ }
+ removedCount++
+ }
+ }
+ return removedCount
+ }
+
+ for _, tc := range []struct {
+ name string
+ chunkCount uint64
+ gaps [][2]uint64
+ liveChunkCount uint64
+ liveGaps [][2]uint64
+ }{
+ {
+ name: "no gaps",
+ chunkCount: 100,
+ gaps: nil,
+ },
+ {
+ name: "first chunk removed",
+ chunkCount: 100,
+ gaps: [][2]uint64{{0, 1}},
+ },
+ {
+ name: "one chunk removed",
+ chunkCount: 100,
+ gaps: [][2]uint64{{60, 61}},
+ },
+ {
+ name: "single gap at start",
+ chunkCount: 100,
+ gaps: [][2]uint64{{0, 5}},
+ },
+ {
+ name: "single gap",
+ chunkCount: 100,
+ gaps: [][2]uint64{{5, 10}},
+ },
+ {
+ name: "multiple gaps",
+ chunkCount: 100,
+ gaps: [][2]uint64{{0, 1}, {10, 21}},
+ },
+ {
+ name: "big gaps",
+ chunkCount: 100,
+ gaps: [][2]uint64{{0, 1}, {10, 21}, {50, 91}},
+ },
+ {
+ name: "remove all",
+ chunkCount: 100,
+ gaps: [][2]uint64{{0, 100}},
+ },
+ {
+ name: "large db",
+ chunkCount: 4000,
+ },
+ {
+ name: "large db with gap",
+ chunkCount: 4000,
+ gaps: [][2]uint64{{1000, 3000}},
+ },
+ {
+ name: "live",
+ liveChunkCount: 100,
+ },
+ {
+ name: "live and history",
+ chunkCount: 100,
+ liveChunkCount: 100,
+ },
+ {
+ name: "live and history with history gap",
+ chunkCount: 100,
+ gaps: [][2]uint64{{5, 10}},
+ liveChunkCount: 100,
+ },
+ {
+ name: "live and history with live gap",
+ chunkCount: 100,
+ liveChunkCount: 100,
+ liveGaps: [][2]uint64{{105, 110}},
+ },
+ {
+ name: "live and history with gaps",
+ chunkCount: 100,
+ gaps: [][2]uint64{{5, 10}},
+ liveChunkCount: 100,
+ liveGaps: [][2]uint64{{105, 110}},
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ sim := simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ "bzz-sync": newSyncSimServiceFunc(nil),
+ })
+ defer sim.Close()
+ defer catchDuplicateChunkSync(t)()
+
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+
+ uploadNode, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ uploadStore := sim.MustNodeItem(uploadNode, bucketKeyFileStore).(chunk.Store)
+
+ chunks := mustUploadChunks(ctx, t, uploadStore, tc.chunkCount)
+
+ totalChunkCount, err := getChunkCount(uploadStore)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if totalChunkCount != tc.chunkCount {
+ t.Errorf("uploaded %v chunks, want %v", totalChunkCount, tc.chunkCount)
+ }
+
+ removedCount := removeChunks(t, ctx, uploadStore, tc.gaps, chunks)
+
+ syncNode, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = sim.Net.Connect(uploadNode, syncNode)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ syncStore := sim.MustNodeItem(syncNode, bucketKeyFileStore).(chunk.Store)
+
+ err = waitChunks(syncStore, totalChunkCount-removedCount, 10*time.Second)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if tc.liveChunkCount > 0 {
+ // pause syncing so that the chunks in the live gap
+ // are not synced before they are removed
+ handleMsgPauser.pause()
+
+ chunks = append(chunks, mustUploadChunks(ctx, t, uploadStore, tc.liveChunkCount)...)
+
+ removedCount += removeChunks(t, ctx, uploadStore, tc.liveGaps, chunks)
+
+ // resume syncing
+ handleMsgPauser.resume()
+
+ err = waitChunks(syncStore, tc.chunkCount+tc.liveChunkCount-removedCount, time.Minute)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ })
+ }
+}
+
+// TestTheeNodesUnionHistoricalSync brings up three nodes, uploads content too all of them and then
+// asserts that all of them have the union of all 3 local stores (depth is assumed to be 0)
+func TestThreeNodesUnionHistoricalSync(t *testing.T) {
+ nodes := 3
+ chunkCount := 1000
+ sim := simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ "bzz-sync": newSyncSimServiceFunc(nil),
+ })
+ defer sim.Close()
+ union := make(map[string]struct{})
+ nodeIDs := []enode.ID{}
+ for i := 0; i < nodes; i++ {
+ node, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+ nodeIDs = append(nodeIDs, node)
+ nodeStore := sim.MustNodeItem(node, bucketKeyFileStore).(*storage.FileStore)
+ mustUploadChunks(context.Background(), t, nodeStore, uint64(chunkCount))
+
+ uploadedChunks, err := getChunks(nodeStore.ChunkStore)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for k := range uploadedChunks {
+ if _, ok := union[k]; ok {
+ t.Fatal("chunk already exists in union")
+ }
+ union[k] = struct{}{}
+ }
+ }
+
+ err := sim.Net.ConnectNodesFull(nodeIDs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, n := range nodeIDs {
+ nodeStore := sim.MustNodeItem(n, bucketKeyFileStore).(*storage.FileStore)
+ if err := waitChunks(nodeStore, uint64(len(union)), 10*time.Second); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+// TestFullSync performs a series of subtests where a number of nodes are
+// connected to the single (chunk uploading) node.
+func TestFullSync(t *testing.T) {
+
+ for _, tc := range []struct {
+ name string
+ chunkCount uint64
+ syncNodeCount int
+ history bool
+ live bool
+ }{
+ {
+ name: "sync to two nodes history",
+ chunkCount: 5000,
+ syncNodeCount: 2,
+ history: true,
+ },
+ {
+ name: "sync to two nodes live",
+ chunkCount: 5000,
+ syncNodeCount: 2,
+ live: true,
+ },
+ {
+ name: "sync to two nodes history and live",
+ chunkCount: 2500,
+ syncNodeCount: 2,
+ history: true,
+ live: true,
+ },
+ {
+ name: "sync to 50 nodes history",
+ chunkCount: 500,
+ syncNodeCount: 50,
+ history: true,
+ },
+ {
+ name: "sync to 50 nodes live",
+ chunkCount: 500,
+ syncNodeCount: 50,
+ live: true,
+ },
+ {
+ name: "sync to 50 nodes history and live",
+ chunkCount: 250,
+ syncNodeCount: 50,
+ history: true,
+ live: true,
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ if tc.syncNodeCount > 2 && runtime.GOARCH == "386" {
+ t.Skip("skipping larger simulation on low memory architecture")
+ }
+
+ sim := simulation.NewInProc(map[string]simulation.ServiceFunc{
+ "bzz-sync": newSyncSimServiceFunc(nil),
+ })
+ defer sim.Close()
+
+ defer catchDuplicateChunkSync(t)()
+
+ uploaderNode, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+ uploaderNodeStore := sim.MustNodeItem(uploaderNode, bucketKeyFileStore).(*storage.FileStore)
+
+ if tc.history {
+ mustUploadChunks(context.Background(), t, uploaderNodeStore, tc.chunkCount)
+ }
+
+ // add nodes to sync to
+ ids, err := sim.AddNodes(tc.syncNodeCount)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // connect every new node to the uploading one, so
+ // every node will have depth 0 as only uploading node
+ // will be in their kademlia tables
+ err = sim.Net.ConnectNodesStar(ids, uploaderNode)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // count the content in the bins again
+ uploadedChunks, err := getChunks(uploaderNodeStore.ChunkStore)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tc.history && len(uploadedChunks) == 0 {
+ t.Errorf("got empty uploader chunk store")
+ }
+ if !tc.history && len(uploadedChunks) != 0 {
+ t.Errorf("got non empty uploader chunk store")
+ }
+
+ historicalChunks := make(map[enode.ID]map[string]struct{})
+ for _, id := range ids {
+ wantChunks := make(map[string]struct{}, len(uploadedChunks))
+ for k, v := range uploadedChunks {
+ wantChunks[k] = v
+ }
+ // wait for all chunks to be synced
+ store := sim.MustNodeItem(id, bucketKeyFileStore).(chunk.Store)
+ if err := waitChunks(store, uint64(len(wantChunks)), 10*time.Second); err != nil {
+ t.Fatal(err)
+ }
+
+ // validate that all and only all chunks are synced
+ syncedChunks, err := getChunks(store)
+ if err != nil {
+ t.Fatal(err)
+ }
+ historicalChunks[id] = make(map[string]struct{})
+ for c := range wantChunks {
+ if _, ok := syncedChunks[c]; !ok {
+ t.Errorf("missing chunk %v", c)
+ }
+ delete(wantChunks, c)
+ delete(syncedChunks, c)
+ historicalChunks[id][c] = struct{}{}
+ }
+ if len(wantChunks) != 0 {
+ t.Errorf("some of the uploaded chunks are not synced")
+ }
+ if len(syncedChunks) != 0 {
+ t.Errorf("some of the synced chunks are not of uploaded ones")
+ }
+ }
+
+ if tc.live {
+ mustUploadChunks(context.Background(), t, uploaderNodeStore, tc.chunkCount)
+ }
+
+ uploadedChunks, err = getChunks(uploaderNodeStore.ChunkStore)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, id := range ids {
+ wantChunks := make(map[string]struct{}, len(uploadedChunks))
+ for k, v := range uploadedChunks {
+ wantChunks[k] = v
+ }
+ store := sim.MustNodeItem(id, bucketKeyFileStore).(chunk.Store)
+ // wait for all chunks to be synced
+ if err := waitChunks(store, uint64(len(wantChunks)), 10*time.Second); err != nil {
+ t.Fatal(err)
+ }
+
+ // get all chunks from the syncing node
+ syncedChunks, err := getChunks(store)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // remove historical chunks from total uploaded and synced chunks
+ for c := range historicalChunks[id] {
+ if _, ok := wantChunks[c]; !ok {
+ t.Errorf("missing uploaded historical chunk: %s", c)
+ }
+ delete(wantChunks, c)
+ if _, ok := syncedChunks[c]; !ok {
+ t.Errorf("missing synced historical chunk: %s", c)
+ }
+ delete(syncedChunks, c)
+ }
+ // validate that all and only all live chunks are synced
+ for c := range wantChunks {
+ if _, ok := syncedChunks[c]; !ok {
+ t.Errorf("missing chunk %v", c)
+ }
+ delete(wantChunks, c)
+ delete(syncedChunks, c)
+ }
+ if len(wantChunks) != 0 {
+ t.Errorf("some of the uploaded live chunks are not synced")
+ }
+ if len(syncedChunks) != 0 {
+ t.Errorf("some of the synced live chunks are not of uploaded ones")
+ }
+ }
+ })
+ }
+}
+
+func waitChunks(store chunk.Store, want uint64, staledTimeout time.Duration) (err error) {
+ start := time.Now()
+ var (
+ count uint64 // total number of chunks
+ prev uint64 // total number of chunks in previous check
+ sleep time.Duration // duration until the next check
+ staled time.Duration // duration for when the number of chunks is the same
+ )
+ for staled < staledTimeout { // wait for some time while staled
+ count, err = getChunkCount(store)
+ if err != nil {
+ return err
+ }
+ if count >= want {
+ break
+ }
+ if count == prev {
+ staled += sleep
+ } else {
+ staled = 0
+ }
+ prev = count
+ if count > 0 {
+ // Calculate sleep time only if there is at least 1% of chunks available,
+ // less may produce unreliable result.
+ if count > want/100 {
+ // Calculate the time required to pass for missing chunks to be available,
+ // and divide it by half to perform a check earlier.
+ sleep = time.Duration(float64(time.Since(start)) * float64(want-count) / float64(count) / 2)
+ log.Debug("expecting all chunks", "in", sleep*2, "want", want, "have", count)
+ }
+ }
+ switch {
+ case sleep > time.Minute:
+ // next check and speed calculation in some shorter time
+ sleep = 500 * time.Millisecond
+ case sleep > 5*time.Second:
+ // upper limit for the check, do not check too slow
+ sleep = 5 * time.Second
+ case sleep < 50*time.Millisecond:
+ // lower limit for the check, do not check too frequently
+ sleep = 50 * time.Millisecond
+ if staled > 0 {
+ // slow down if chunks are stuck near the want value
+ sleep *= 10
+ }
+ }
+ time.Sleep(sleep)
+ }
+
+ if count != want {
+ return fmt.Errorf("got synced chunks %d, want %d", count, want)
+ }
+ return nil
+}
+
+func getChunkCount(store chunk.Store) (c uint64, err error) {
+ for po := 0; po <= chunk.MaxPO; po++ {
+ last, err := store.LastPullSubscriptionBinID(uint8(po))
+ if err != nil {
+ return 0, err
+ }
+ c += last
+ }
+ return c, nil
+}
+
+func getChunks(store chunk.Store) (chunks map[string]struct{}, err error) {
+ chunks = make(map[string]struct{})
+ for po := uint8(0); po <= chunk.MaxPO; po++ {
+ last, err := store.LastPullSubscriptionBinID(po)
+ if err != nil {
+ return nil, err
+ }
+ if last == 0 {
+ continue
+ }
+ ch, _ := store.SubscribePull(context.Background(), po, 0, last)
+ for c := range ch {
+ addr := c.Address.Hex()
+ if _, ok := chunks[addr]; ok {
+ return nil, fmt.Errorf("duplicate chunk %s", addr)
+ }
+ chunks[addr] = struct{}{}
+ }
+ }
+ return chunks, nil
+}
+
+/*
+BenchmarkHistoricalStream measures syncing time after two nodes connect.
+
+go test -v github.com/ethersphere/swarm/network/stream/v2 -run="^$" -bench BenchmarkHistoricalStream -benchmem -loglevel 0
+goos: darwin
+goarch: amd64
+pkg: github.com/ethersphere/swarm/network/stream/v2
+BenchmarkHistoricalStream/1000-chunks-8 10 133564663 ns/op 148289188 B/op 233646 allocs/op
+BenchmarkHistoricalStream/2000-chunks-8 5 290056259 ns/op 316599452 B/op 541507 allocs/op
+BenchmarkHistoricalStream/10000-chunks-8 1 1714618578 ns/op 1791108672 B/op 4133564 allocs/op
+BenchmarkHistoricalStream/20000-chunks-8 1 4724760666 ns/op 4133092720 B/op 11347504 allocs/op
+PASS
+*/
+func BenchmarkHistoricalStream(b *testing.B) {
+ for _, c := range []uint64{
+ 1000,
+ 2000,
+ 10000,
+ 20000,
+ } {
+ b.Run(fmt.Sprintf("%v-chunks", c), func(b *testing.B) {
+ benchmarkHistoricalStream(b, c)
+ })
+ }
+}
+
+func benchmarkHistoricalStream(b *testing.B, chunks uint64) {
+ b.StopTimer()
+
+ for i := 0; i < b.N; i++ {
+ sim := simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ "bzz-sync": newSyncSimServiceFunc(nil),
+ })
+
+ uploaderNode, err := sim.AddNode()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ uploaderNodeStore := nodeFileStore(sim, uploaderNode)
+
+ mustUploadChunks(context.Background(), b, uploaderNodeStore, chunks)
+
+ uploadedChunks, err := getChunks(uploaderNodeStore.ChunkStore)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ syncingNode, err := sim.AddNode()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.StartTimer()
+
+ err = sim.Net.Connect(syncingNode, uploaderNode)
+ if err != nil {
+ b.Fatal(err)
+ }
+ syncingNodeStore := sim.MustNodeItem(syncingNode, bucketKeyFileStore).(chunk.Store)
+ if err := waitChunks(syncingNodeStore, uint64(len(uploadedChunks)), 10*time.Second); err != nil {
+ b.Fatal(err)
+ }
+ b.StopTimer()
+ err = sim.Net.Stop(syncingNode)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ sim.Close()
+ }
+}
+
+// Function that uses putSeenTestHook to record and report
+// if there were duplicate chunk synced between Node id
+func catchDuplicateChunkSync(t *testing.T) (validate func()) {
+ m := make(map[enode.ID]map[string]int)
+ var mu sync.Mutex
+ putSeenTestHook = func(addr chunk.Address, id enode.ID) {
+ mu.Lock()
+ defer mu.Unlock()
+ if _, ok := m[id]; !ok {
+ m[id] = make(map[string]int)
+ }
+ m[id][addr.Hex()]++
+ }
+ return func() {
+ // reset the test hook
+ putSeenTestHook = nil
+ // do the validation
+ mu.Lock()
+ defer mu.Unlock()
+ for nodeID, addrs := range m {
+ for addr, count := range addrs {
+ t.Errorf("chunk synced %v times to node %s: %v", count, nodeID, addr)
+ }
+ }
+ }
+}
+
+// TestStarNetworkSyncWithBogusNodes ests that syncing works on a more elaborate network topology
+// the test creates three real nodes in a star topology, then adds bogus nodes to the pivot (instead of using real nodes
+// this is in order to make the simulation be more CI friendly)
+// the pivot node will have neighbourhood depth > 0, which in turn means that from each
+// connected node, the pivot node should have only part of its chunks
+// The test checks that EVERY chunk that exists a node which is not the pivot, according to
+// its PO, and kademlia table of the pivot - exists on the pivot node and does not exist on other nodes
+func TestStarNetworkSyncWithBogusNodes(t *testing.T) {
+ var (
+ chunkCount = 500
+ nodeCount = 12
+ minPivotDepth = 1
+ chunkSize = 4096
+ simTimeout = 60 * time.Second
+ syncTime = 2 * time.Second
+ filesize = chunkCount * chunkSize
+ )
+ sim := simulation.NewBzzInProc(map[string]simulation.ServiceFunc{
+ "bzz-sync": newSyncSimServiceFunc(&SyncSimServiceOptions{SyncOnlyWithinDepth: false}),
+ })
+ defer sim.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), simTimeout)
+ defer cancel()
+
+ pivot, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+ pivotKad := sim.MustNodeItem(pivot, simulation.BucketKeyKademlia).(*network.Kademlia)
+ pivotBase := pivotKad.BaseAddr()
+
+ log.Debug("started pivot node", "addr", hex.EncodeToString(pivotBase))
+
+ newNode, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = sim.Net.Connect(pivot, newNode)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ newNode2, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = sim.Net.Connect(pivot, newNode2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time.Sleep(50 * time.Millisecond)
+ log.Trace(sim.MustNodeItem(newNode, simulation.BucketKeyKademlia).(*network.Kademlia).String())
+ pivotKad = sim.MustNodeItem(pivot, simulation.BucketKeyKademlia).(*network.Kademlia)
+ pivotAddr := pot.NewAddressFromBytes(pivotBase)
+ // add a few fictional nodes at higher POs to uploader so that uploader depth goes > 0
+ for i := 0; i < nodeCount; i++ {
+ rw := &p2p.MsgPipeRW{}
+ ptpPeer := p2p.NewPeer(enode.ID{}, "im just a lazy hobo", []p2p.Cap{})
+ protoPeer := protocols.NewPeer(ptpPeer, rw, &protocols.Spec{})
+ peerAddr := pot.RandomAddressAt(pivotAddr, i)
+ bzzPeer := &network.BzzPeer{
+ Peer: protoPeer,
+ BzzAddr: &network.BzzAddr{
+ OAddr: peerAddr.Bytes(),
+ UAddr: []byte(fmt.Sprintf("%x", peerAddr[:])),
+ },
+ }
+ peer := network.NewPeer(bzzPeer, pivotKad)
+ pivotKad.On(peer)
+ }
+ time.Sleep(50 * time.Millisecond)
+
+ log.Trace(pivotKad.String())
+
+ if d := pivotKad.NeighbourhoodDepth(); d < minPivotDepth {
+ t.Skipf("too shallow. depth %d want %d", d, minPivotDepth)
+ }
+ pivotDepth := pivotKad.NeighbourhoodDepth()
+
+ chunkProx := make(map[string]chunkProxData)
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
+ nodeIDs := sim.UpNodeIDs()
+ for _, node := range nodeIDs {
+ node := node
+ if bytes.Equal(pivot.Bytes(), node.Bytes()) {
+ continue
+ }
+ nodeKad := sim.MustNodeItem(node, simulation.BucketKeyKademlia).(*network.Kademlia)
+ nodePo := chunk.Proximity(nodeKad.BaseAddr(), pivotKad.BaseAddr())
+ seed := int(time.Now().UnixNano())
+ randomBytes := testutil.RandomBytes(seed, filesize)
+ log.Debug("putting chunks to ephemeral localstore")
+ chunkAddrs, err := getAllRefs(randomBytes[:])
+ if err != nil {
+ return err
+ }
+ log.Debug("done getting all refs")
+ for _, c := range chunkAddrs {
+ proxData := chunkProxData{
+ addr: c,
+ uploaderNodeToPivotNodePO: nodePo,
+ chunkToUploaderPO: chunk.Proximity(nodeKad.BaseAddr(), c),
+ pivotPO: chunk.Proximity(c, pivotKad.BaseAddr()),
+ uploaderNode: node,
+ }
+ log.Debug("test putting chunk", "node", node, "addr", hex.EncodeToString(c), "uploaderToPivotPO", proxData.uploaderNodeToPivotNodePO, "c2uploaderPO", proxData.chunkToUploaderPO, "pivotDepth", pivotDepth)
+ if _, ok := chunkProx[hex.EncodeToString(c)]; ok {
+ return fmt.Errorf("chunk already found on another node %s", hex.EncodeToString(c))
+ }
+ chunkProx[hex.EncodeToString(c)] = proxData
+ }
+
+ fs := sim.MustNodeItem(node, bucketKeyFileStore).(*storage.FileStore)
+ reader := bytes.NewReader(randomBytes[:])
+ _, wait1, err := fs.Store(ctx, reader, int64(len(randomBytes)), false)
+ if err != nil {
+ return fmt.Errorf("fileStore.Store: %v", err)
+ }
+
+ if err := wait1(ctx); err != nil {
+ return err
+ }
+ }
+ //according to old pull sync - if the node is outside of depth - it should have all chunks where po(chunk)==po(node)
+ time.Sleep(syncTime)
+
+ pivotLs := sim.MustNodeItem(pivot, bucketKeyLocalStore).(*localstore.DB)
+ return verifyCorrectChunksOnPivot(chunkProx, pivotDepth, pivotLs)
+ })
+
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+}
+
+func verifyCorrectChunksOnPivot(chunkProx map[string]chunkProxData, pivotDepth int, pivotLs *localstore.DB) error {
+ for _, v := range chunkProx {
+ // outside of depth
+ if v.uploaderNodeToPivotNodePO < pivotDepth {
+ // chunk PO to uploader == uploader node PO to pivot (i.e. chunk should be synced) - inclusive test
+ if v.chunkToUploaderPO == v.uploaderNodeToPivotNodePO {
+ //check that the chunk exists on the pivot when the chunkPo == uploaderPo
+ _, err := pivotLs.Get(context.Background(), chunk.ModeGetRequest, v.addr)
+ if err != nil {
+ log.Error("chunk errored", "uploaderNode", v.uploaderNode, "poUploader", v.chunkToUploaderPO, "uploaderToPivotPo", v.uploaderNodeToPivotNodePO, "chunk", hex.EncodeToString(v.addr))
+ return err
+ }
+ } else {
+ //chunk should not be synced - exclusion test
+ _, err := pivotLs.Get(context.Background(), chunk.ModeGetRequest, v.addr)
+ if err == nil {
+ log.Error("chunk did not error but should have", "uploaderNode", v.uploaderNode, "poUploader", v.chunkToUploaderPO, "uploaderToPivotPo", v.uploaderNodeToPivotNodePO, "chunk", hex.EncodeToString(v.addr))
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+type chunkProxData struct {
+ addr chunk.Address
+ uploaderNodeToPivotNodePO int
+ chunkToUploaderPO int
+ uploaderNode enode.ID
+ pivotPO int
+}
+
+func getAllRefs(testData []byte) (storage.AddressCollection, error) {
+ datadir, err := ioutil.TempDir("", "chunk-debug")
+ if err != nil {
+ return nil, fmt.Errorf("unable to create temp dir: %v", err)
+ }
+ defer os.RemoveAll(datadir)
+ fileStore, cleanup, err := storage.NewLocalFileStore(datadir, make([]byte, 32), chunk.NewTags())
+ if err != nil {
+ return nil, err
+ }
+ defer cleanup()
+
+ reader := bytes.NewReader(testData)
+ return fileStore.GetAllReferences(context.Background(), reader)
+}
diff --git a/network/newstream/wire.go b/network/stream/v2/wire.go
similarity index 74%
rename from network/newstream/wire.go
rename to network/stream/v2/wire.go
index 47d18ffbc1..06ff9c6e87 100644
--- a/network/newstream/wire.go
+++ b/network/stream/v2/wire.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the Swarm library. If not, see .
-package newstream
+package stream
import (
"context"
@@ -35,26 +35,30 @@ import (
// to expose, parse and encode values related to the string represntation of the stream
type StreamProvider interface {
- // NeedData informs the caller whether a certain chunk needs to be fetched from another peer or not.
- // Typically this will involve checking whether a certain chunk exists locally.
- // In case a chunk does not exist locally - a `wait` function returns upon chunk delivery
- NeedData(ctx context.Context, key []byte) (need bool, wait func(context.Context) error)
+ // NeedData informs the caller whether a certain chunk needs to be fetched from another peer or not
+ NeedData(ctx context.Context, addr ...chunk.Address) ([]bool, error)
- // Get a particular chunk identified by addr from the local storage
- Get(ctx context.Context, addr chunk.Address) ([]byte, error)
+ // Get a set of chunks identified by addr from the local storage
+ Get(ctx context.Context, addr ...chunk.Address) ([]chunk.Chunk, error)
- // Put a certain chunk into the local storage
- Put(ctx context.Context, addr chunk.Address, data []byte) (exists bool, err error)
+ // Put a set of chunks into the local storage
+ Put(ctx context.Context, ch ...chunk.Chunk) (exists []bool, err error)
+
+ // Set a set of chunks as synced in the localstore
+ Set(ctx context.Context, addrs ...chunk.Address) error
// Subscribe to a data stream from an arbitrary data source
Subscribe(ctx context.Context, key interface{}, from, to uint64) (<-chan chunk.Descriptor, func())
- // Cursor returns the last known Cursor for a given Stream Key
- Cursor(interface{}) (uint64, error)
+ // Cursor returns the last known Cursor for a given Stream Key string
+ Cursor(string) (uint64, error)
- // RunUpdateStreams is a provider specific implementation on how to maintain running streams with
+ // InitPeer is a provider specific implementation on how to maintain running streams with
// an arbitrary Peer. This method should always be run in a separate goroutine
- RunUpdateStreams(p *Peer)
+ InitPeer(p *Peer)
+
+ // WantStream indicates if we are interested in a stream
+ WantStream(*Peer, ID) bool
// StreamName returns the Name of the Stream (see ID)
StreamName() string
@@ -65,28 +69,15 @@ type StreamProvider interface {
// EncodeStream from a Stream Key to a Stream pipe-separated string representation
EncodeKey(interface{}) (string, error)
- // StreamBehavior defines how the stream behaves upon initialisation
- StreamBehavior() StreamInitBehavior
+ // Autostart indicates if the stream should autostart
+ Autostart() bool
+ // Boundedness indicates if the stream is bounded or not
Boundedness() bool
-}
-
-// StreamInitBehavior defines the stream behavior upon init
-type StreamInitBehavior int
-
-const (
- // StreamIdle means that there is no initial automatic message exchange
- // between the nodes when the protocol gets established
- StreamIdle StreamInitBehavior = iota
- // StreamGetCursors tells the two nodes to automatically fetch stream
- // cursors from each other
- StreamGetCursors
-
- // StreamAutostart automatically starts fetching data from the streams
- // once the cursors arrive
- StreamAutostart
-)
+ // Close the provider
+ Close()
+}
// StreamInfoReq is a request to get information about particular streams
type StreamInfoReq struct {
@@ -111,16 +102,15 @@ type GetRange struct {
Ruid uint
Stream ID
From uint64
- To uint64 `rlp:"nil"`
+ To *uint64 `rlp:"nil"`
BatchSize uint
- Roundtrip bool
}
// OfferedHashes is a message sent from the upstream peer to the downstream peer allowing the latter
// to selectively ask for chunks within a particular requested interval
type OfferedHashes struct {
Ruid uint
- LastIndex uint
+ LastIndex uint64
Hashes []byte
}
@@ -133,9 +123,8 @@ type WantedHashes struct {
// ChunkDelivery delivers a frame of chunks in response to a WantedHashes message
type ChunkDelivery struct {
- Ruid uint
- LastIndex uint
- Chunks []DeliveredChunk
+ Ruid uint
+ Chunks []DeliveredChunk
}
// DeliveredChunk encapsulates a particular chunk's underlying data within a ChunkDelivery message
diff --git a/network_test.go b/network_test.go
index d54ba22a45..4001eb274f 100644
--- a/network_test.go
+++ b/network_test.go
@@ -68,18 +68,6 @@ func TestSwarmNetwork(t *testing.T) {
Timeout: 45 * time.Second,
},
},
- {
- name: "10_nodes_skip_check",
- steps: []testSwarmNetworkStep{
- {
- nodeCount: 10,
- },
- },
- options: &testSwarmNetworkOptions{
- Timeout: 45 * time.Second,
- SkipCheck: true,
- },
- },
{
name: "dec_inc_node_count",
steps: []testSwarmNetworkStep{
@@ -146,18 +134,6 @@ func longRunningCases() []testSwarmNetworkCase {
Timeout: 3 * time.Minute,
},
},
- {
- name: "50_nodes_skip_check",
- steps: []testSwarmNetworkStep{
- {
- nodeCount: 50,
- },
- },
- options: &testSwarmNetworkOptions{
- Timeout: 3 * time.Minute,
- SkipCheck: true,
- },
- },
{
name: "inc_node_count",
steps: []testSwarmNetworkStep{
@@ -215,30 +191,6 @@ func longRunningCases() []testSwarmNetworkCase {
Timeout: 5 * time.Minute,
},
},
- {
- name: "inc_dec_node_count_skip_check",
- steps: []testSwarmNetworkStep{
- {
- nodeCount: 3,
- },
- {
- nodeCount: 5,
- },
- {
- nodeCount: 25,
- },
- {
- nodeCount: 10,
- },
- {
- nodeCount: 4,
- },
- },
- options: &testSwarmNetworkOptions{
- Timeout: 5 * time.Minute,
- SkipCheck: true,
- },
- },
}
}
@@ -388,7 +340,7 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa
// File retrieval check is repeated until all uploaded files are retrieved from all nodes
// or until the timeout is reached.
for {
- if retrieve(sim, files, &checkStatusM, &nodeStatusM, &totalFoundCount) == 0 {
+ if retrieveF(sim, files, &checkStatusM, &nodeStatusM, &totalFoundCount) == 0 {
return nil
}
}
@@ -423,9 +375,9 @@ func uploadFile(swarm *Swarm) (storage.Address, string, error) {
return k, data, err
}
-// retrieve is the function that is used for checking the availability of
+// retrieveF is the function that is used for checking the availability of
// uploaded files in testSwarmNetwork test helper function.
-func retrieve(
+func retrieveF(
sim *simulation.Simulation,
files []file,
checkStatusM *sync.Map,
@@ -457,7 +409,7 @@ func retrieve(
swarm := sim.Service("swarm", id).(*Swarm)
for _, f := range files {
-
+ f := f
checkKey := check{
key: f.addr.String(),
nodeID: id,
diff --git a/pss/client/client_test.go b/pss/client/client_test.go
index 493bdc01c2..f53c516f1c 100644
--- a/pss/client/client_test.go
+++ b/pss/client/client_test.go
@@ -264,7 +264,7 @@ func newServices() adapters.Services {
UnderlayAddr: addr.Under(),
HiveParams: hp,
}
- return network.NewBzz(config, kademlia(ctx.Config.ID), stateStore, nil, nil), nil
+ return network.NewBzz(config, kademlia(ctx.Config.ID), stateStore, nil, nil, nil, nil), nil
},
}
}
diff --git a/pss/notify/notify_test.go b/pss/notify/notify_test.go
index 90e3330501..30bdd43bc6 100644
--- a/pss/notify/notify_test.go
+++ b/pss/notify/notify_test.go
@@ -251,7 +251,7 @@ func newServices(allowRaw bool) adapters.Services {
UnderlayAddr: addr.Under(),
HiveParams: hp,
}
- return network.NewBzz(config, kademlia(ctx.Config.ID), stateStore, nil, nil), nil
+ return network.NewBzz(config, kademlia(ctx.Config.ID), stateStore, nil, nil, nil, nil), nil
},
}
}
diff --git a/pss/prox_test.go b/pss/prox_test.go
index 00d2aa60e2..65482976fb 100644
--- a/pss/prox_test.go
+++ b/pss/prox_test.go
@@ -420,7 +420,7 @@ func newProxServices(td *testData, allowRaw bool, handlerContextFuncs map[Topic]
bzzKey := network.PrivateKeyToBzzKey(bzzPrivateKey)
pskad := kademlia(ctx.Config.ID, bzzKey)
b.Store(simulation.BucketKeyKademlia, pskad)
- return network.NewBzz(config, kademlia(ctx.Config.ID, addr.OAddr), stateStore, nil, nil), nil, nil
+ return network.NewBzz(config, kademlia(ctx.Config.ID, addr.OAddr), stateStore, nil, nil, nil, nil), nil, nil
},
"pss": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
// execadapter does not exec init()
diff --git a/pss/pss_test.go b/pss/pss_test.go
index 18e8f5c39b..2ca5e997b3 100644
--- a/pss/pss_test.go
+++ b/pss/pss_test.go
@@ -1812,7 +1812,7 @@ func newServices(allowRaw bool) map[string]simulation.ServiceFunc {
}
pskad := kademlia(ctx.Config.ID, addr.OAddr)
bucket.Store(simulation.BucketKeyKademlia, pskad)
- return network.NewBzz(config, pskad, stateStore, nil, nil), nil, nil
+ return network.NewBzz(config, pskad, stateStore, nil, nil, nil, nil), nil, nil
},
protocolName: func(ctx *adapters.ServiceContext, bucket *sync.Map) (node.Service, func(), error) {
// execadapter does not exec init()
@@ -1864,8 +1864,8 @@ func newServices(allowRaw bool) map[string]simulation.ServiceFunc {
protocol: pp,
run: p2pp.Run,
}
- return ps, cleanupFunc, nil
+ return ps, cleanupFunc, nil
},
}
}
diff --git a/storage/feed/testutil.go b/storage/feed/testutil.go
index a1e9cf030c..98ce6b5b55 100644
--- a/storage/feed/testutil.go
+++ b/storage/feed/testutil.go
@@ -51,7 +51,7 @@ func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error)
localStore := chunk.NewValidatorStore(db, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm)), fh)
- netStore := storage.NewNetStore(localStore, enode.ID{})
+ netStore := storage.NewNetStore(localStore, make([]byte, 32), enode.ID{})
netStore.RemoteGet = func(ctx context.Context, req *storage.Request, localID enode.ID) (*enode.ID, error) {
return nil, errors.New("not found")
}
diff --git a/storage/filestore.go b/storage/filestore.go
index ec8e9f73af..cb63c43216 100644
--- a/storage/filestore.go
+++ b/storage/filestore.go
@@ -45,8 +45,9 @@ const (
type FileStore struct {
ChunkStore
- hashFunc SwarmHasher
- tags *chunk.Tags
+ putterStore ChunkStore
+ hashFunc SwarmHasher
+ tags *chunk.Tags
}
type FileStoreParams struct {
@@ -68,15 +69,16 @@ func NewLocalFileStore(datadir string, basekey []byte, tags *chunk.Tags) (*FileS
cleanup := func() {
localStore.Close()
}
- return NewFileStore(chunk.NewValidatorStore(localStore, NewContentAddressValidator(MakeHashFunc(DefaultHash))), NewFileStoreParams(), tags), cleanup, nil
+ return NewFileStore(chunk.NewValidatorStore(localStore, NewContentAddressValidator(MakeHashFunc(DefaultHash))), localStore, NewFileStoreParams(), tags), cleanup, nil
}
-func NewFileStore(store ChunkStore, params *FileStoreParams, tags *chunk.Tags) *FileStore {
+func NewFileStore(store ChunkStore, putterStore ChunkStore, params *FileStoreParams, tags *chunk.Tags) *FileStore {
hashFunc := MakeHashFunc(params.Hash)
return &FileStore{
- ChunkStore: store,
- hashFunc: hashFunc,
- tags: tags,
+ ChunkStore: store,
+ putterStore: putterStore,
+ hashFunc: hashFunc,
+ tags: tags,
}
}
@@ -109,7 +111,7 @@ func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEnc
tag = chunk.NewTag(0, "", 0)
//return nil, nil, err
}
- putter := NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt, tag)
+ putter := NewHasherStore(f.putterStore, f.hashFunc, toEncrypt, tag)
return PyramidSplit(ctx, data, putter, putter, tag)
}
diff --git a/storage/filestore_test.go b/storage/filestore_test.go
index 92f0ffcfc9..275236de53 100644
--- a/storage/filestore_test.go
+++ b/storage/filestore_test.go
@@ -49,7 +49,7 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) {
}
defer localStore.Close()
- fileStore := NewFileStore(localStore, NewFileStoreParams(), chunk.NewTags())
+ fileStore := NewFileStore(localStore, localStore, NewFileStoreParams(), chunk.NewTags())
slice := testutil.RandomBytes(1, testDataSize)
ctx := context.TODO()
@@ -114,7 +114,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) {
}
defer localStore.Close()
- fileStore := NewFileStore(localStore, NewFileStoreParams(), chunk.NewTags())
+ fileStore := NewFileStore(localStore, localStore, NewFileStoreParams(), chunk.NewTags())
slice := testutil.RandomBytes(1, testDataSize)
ctx := context.TODO()
key, wait, err := fileStore.Store(ctx, bytes.NewReader(slice), testDataSize, toEncrypt)
@@ -183,7 +183,7 @@ func TestGetAllReferences(t *testing.T) {
}
defer localStore.Close()
- fileStore := NewFileStore(localStore, NewFileStoreParams(), chunk.NewTags())
+ fileStore := NewFileStore(localStore, localStore, NewFileStoreParams(), chunk.NewTags())
// testRuns[i] and expectedLen[i] are dataSize and expected length respectively
testRuns := []int{1024, 8192, 16000, 30000, 1000000}
diff --git a/storage/localstore/localstore.go b/storage/localstore/localstore.go
index d3c8c55764..2953de2a97 100644
--- a/storage/localstore/localstore.go
+++ b/storage/localstore/localstore.go
@@ -457,6 +457,5 @@ func init() {
// with provided name appended with ".total-time".
func totalTimeMetric(name string, start time.Time) {
totalTime := time.Since(start)
- log.Trace(name+" total time", "time", totalTime)
metrics.GetOrRegisterResettingTimer(name+".total-time", nil).Update(totalTime)
}
diff --git a/storage/localstore/subscription_pull.go b/storage/localstore/subscription_pull.go
index 07befb9067..99b4f6e6c1 100644
--- a/storage/localstore/subscription_pull.go
+++ b/storage/localstore/subscription_pull.go
@@ -88,17 +88,21 @@ func (db *DB) SubscribePull(ctx context.Context, bin uint8, since, until uint64)
iterStart := time.Now()
var count int
err := db.pullIndex.Iterate(func(item shed.Item) (stop bool, err error) {
+ // until chunk descriptor is sent
+ // break the iteration
+ if until > 0 && item.BinID > until {
+ return true, errStopSubscription
+ }
select {
case chunkDescriptors <- chunk.Descriptor{
Address: item.Address,
BinID: item.BinID,
}:
- count++
- // until chunk descriptor is sent
- // break the iteration
- if until > 0 && item.BinID >= until {
+ if until > 0 && item.BinID == until {
return true, errStopSubscription
}
+
+ count++
// set next iteration start item
// when its chunk is successfully sent to channel
sinceItem = &item
diff --git a/storage/localstore/subscription_pull_test.go b/storage/localstore/subscription_pull_test.go
index 2e30c1535f..993fefe77d 100644
--- a/storage/localstore/subscription_pull_test.go
+++ b/storage/localstore/subscription_pull_test.go
@@ -423,6 +423,108 @@ func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
checkErrChan(ctx, t, errChan, wantedChunksCount)
}
+// TestDB_SubscribePull_rangeOnRemovedChunks performs a test:
+// - uploads a number of chunks
+// - removes first half of chunks for every bin
+// - subscribes to a range that is within removed chunks,
+// but before the chunks that are left
+// - validates that no chunks are received on subscription channel
+func TestDB_SubscribePull_rangeOnRemovedChunks(t *testing.T) {
+ db, cleanupFunc := newTestDB(t, nil)
+ defer cleanupFunc()
+
+ // keeps track of available chunks in the database
+ // per bin with their bin ids
+ chunks := make(map[uint8][]chunk.Descriptor)
+
+ // keeps track of latest bin id for every bin
+ binIDCounter := make(map[uint8]uint64)
+
+ // upload chunks to populate bins from start
+ // bin ids start from 1
+ const chunkCount = 1000
+ for i := 0; i < chunkCount; i++ {
+ ch := generateTestRandomChunk()
+
+ _, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bin := db.po(ch.Address())
+
+ binIDCounter[bin]++
+ binID := binIDCounter[bin]
+
+ if _, ok := chunks[bin]; !ok {
+ chunks[bin] = make([]chunk.Descriptor, 0)
+ }
+ chunks[bin] = append(chunks[bin], chunk.Descriptor{
+ Address: ch.Address(),
+ BinID: binID,
+ })
+ }
+
+ // remove first half of the chunks in every bin
+ for bin := range chunks {
+ count := len(chunks[bin])
+ for i := 0; i < count/2; i++ {
+ d := chunks[bin][0]
+ if err := db.Set(context.Background(), chunk.ModeSetRemove, d.Address); err != nil {
+ t.Fatal(err)
+ }
+ chunks[bin] = chunks[bin][1:]
+ }
+ }
+
+ // set a timeout on subscription
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ // signals that there were valid bins for this check to ensure test validity
+ var checkedBins int
+ // subscribe to every bin and validate returned values
+ for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
+ // do not subscribe to bins that do not have chunks
+ if len(chunks[bin]) == 0 {
+ continue
+ }
+ // subscribe from start of the bin index
+ var since uint64
+ // subscribe until the first available chunk in bin,
+ // but not for it
+ until := chunks[bin][0].BinID - 1
+ if until <= 0 {
+ // ignore this bin if it has only one chunk left
+ continue
+ }
+ ch, stop := db.SubscribePull(ctx, bin, since, until)
+ defer stop()
+
+ // the returned channel should be closed
+ // because no chunks should be provided
+ select {
+ case d, ok := <-ch:
+ if !ok {
+ // this is expected for successful case
+ break
+ }
+ if d.BinID > until {
+ t.Errorf("got %v for bin %v, subscribed until bin id %v", d.BinID, bin, until)
+ }
+ case <-ctx.Done():
+ t.Error(ctx.Err())
+ }
+
+ // mark that the check is performed
+ checkedBins++
+ }
+ // check that test performed at least one validation
+ if checkedBins == 0 {
+ t.Fatal("test did not perform any checks")
+ }
+}
+
// uploadRandomChunksBin uploads random chunks to database and adds them to
// the map of addresses ber bin.
func uploadRandomChunksBin(t *testing.T, db *DB, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
diff --git a/storage/netstore.go b/storage/netstore.go
index 41c77bc299..f15d6c6870 100644
--- a/storage/netstore.go
+++ b/storage/netstore.go
@@ -18,19 +18,21 @@ package storage
import (
"context"
+ "encoding/hex"
"errors"
"fmt"
"sync"
"time"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+
"github.com/ethersphere/swarm/chunk"
- "github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/network/timeouts"
"github.com/ethersphere/swarm/spancontext"
lru "github.com/hashicorp/golang-lru"
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/p2p/enode"
olog "github.com/opentracing/opentracing-go/log"
"github.com/syndtr/goleveldb/leveldb"
"golang.org/x/sync/singleflight"
@@ -83,21 +85,23 @@ type RemoteGetFunc func(ctx context.Context, req *Request, localID enode.ID) (*e
// on request it initiates remote cloud retrieval
type NetStore struct {
chunk.Store
- localID enode.ID // our local enode - used when issuing RetrieveRequests
+ LocalID enode.ID // our local enode - used when issuing RetrieveRequests
fetchers *lru.Cache
putMu sync.Mutex
requestGroup singleflight.Group
RemoteGet RemoteGetFunc
+ logger log.Logger
}
// NewNetStore creates a new NetStore using the provided chunk.Store and localID of the node.
-func NewNetStore(store chunk.Store, localID enode.ID) *NetStore {
+func NewNetStore(store chunk.Store, baseAddr []byte, localID enode.ID) *NetStore {
fetchers, _ := lru.New(fetchersCapacity)
return &NetStore{
fetchers: fetchers,
Store: store,
- localID: localID,
+ LocalID: localID,
+ logger: log.New("base", hex.EncodeToString(baseAddr)[:16]),
}
}
@@ -108,9 +112,8 @@ func (n *NetStore) Put(ctx context.Context, mode chunk.ModePut, chs ...Chunk) ([
defer n.putMu.Unlock()
for i, ch := range chs {
- log.Trace("netstore.put", "index", i, "ref", ch.Address().String(), "mode", mode)
+ n.logger.Trace("netstore.put", "index", i, "ref", ch.Address().String(), "mode", mode)
}
-
// put the chunk to the localstore, there should be no error
exist, err := n.Store.Put(ctx, mode, chs...)
if err != nil {
@@ -125,16 +128,16 @@ func (n *NetStore) Put(ctx context.Context, mode chunk.ModePut, chs ...Chunk) ([
// delivered through syncing and through a retrieve request
fii := fi.(*Fetcher)
fii.SafeClose()
- log.Trace("netstore.put chunk delivered and stored", "ref", ch.Address().String())
+ n.logger.Trace("netstore.put chunk delivered and stored", "ref", ch.Address().String())
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("netstore.fetcher.lifetime.%s", fii.CreatedBy), nil).UpdateSince(fii.CreatedAt)
// helper snippet to log if a chunk took way to long to be delivered
slowChunkDeliveryThreshold := 5 * time.Second
if time.Since(fii.CreatedAt) > slowChunkDeliveryThreshold {
- log.Trace("netstore.put slow chunk delivery", "ref", ch.Address().String())
+ metrics.GetOrRegisterCounter("netstore.slow_chunk_delivery", nil).Inc(1)
+ n.logger.Trace("netstore.put slow chunk delivery", "ref", ch.Address().String())
}
-
n.fetchers.Remove(ch.Address().String())
}
}
@@ -155,16 +158,14 @@ func (n *NetStore) Get(ctx context.Context, mode chunk.ModeGet, req *Request) (C
ref := req.Addr
- log.Trace("netstore.get", "ref", ref.String())
-
ch, err := n.Store.Get(ctx, mode, ref)
if err != nil {
// TODO: fix comparison - we should be comparing against leveldb.ErrNotFound, this error should be wrapped.
if err != ErrChunkNotFound && err != leveldb.ErrNotFound {
- log.Error("localstore get error", "err", err)
+ n.logger.Error("localstore get error", "err", err)
}
- log.Trace("netstore.chunk-not-in-localstore", "ref", ref.String())
+ n.logger.Trace("netstore.chunk-not-in-localstore", "ref", ref.String())
v, err, _ := n.requestGroup.Do(ref.String(), func() (interface{}, error) {
// currently we issue a retrieve request if a fetcher
@@ -183,7 +184,7 @@ func (n *NetStore) Get(ctx context.Context, mode chunk.ModeGet, req *Request) (C
ch, err := n.Store.Get(ctx, mode, ref)
if err != nil {
- log.Error(err.Error(), "ref", ref)
+ n.logger.Error(err.Error(), "ref", ref)
return nil, errors.New("item should have been in localstore, but it is not")
}
@@ -196,16 +197,17 @@ func (n *NetStore) Get(ctx context.Context, mode chunk.ModeGet, req *Request) (C
})
if err != nil {
- log.Trace(err.Error(), "ref", ref)
+ n.logger.Trace(err.Error(), "ref", ref)
return nil, err
}
c := v.(Chunk)
- log.Trace("netstore.singleflight returned", "ref", ref.String(), "err", err)
+ n.logger.Trace("netstore.singleflight returned", "ref", ref.String(), "err", err)
return c, nil
}
+ n.logger.Trace("netstore.get returned", "ref", ref.String())
ctx, ssp := spancontext.StartSpan(
ctx,
@@ -234,23 +236,23 @@ func (n *NetStore) RemoteFetch(ctx context.Context, req *Request, fi *Fetcher) e
"remote.fetch")
osp.LogFields(olog.String("ref", ref.String()))
- log.Trace("remote.fetch", "ref", ref)
+ n.logger.Trace("remote.fetch", "ref", ref)
- currentPeer, err := n.RemoteGet(ctx, req, n.localID)
+ currentPeer, err := n.RemoteGet(ctx, req, n.LocalID)
if err != nil {
- log.Trace(err.Error(), "ref", ref)
+ n.logger.Trace(err.Error(), "ref", ref)
osp.LogFields(olog.String("err", err.Error()))
osp.Finish()
return ErrNoSuitablePeer
}
// add peer to the set of peers to skip from now
- log.Trace("remote.fetch, adding peer to skip", "ref", ref, "peer", currentPeer.String())
+ n.logger.Trace("remote.fetch, adding peer to skip", "ref", ref, "peer", currentPeer.String())
req.PeersToSkip.Store(currentPeer.String(), time.Now())
select {
case <-fi.Delivered:
- log.Trace("remote.fetch, chunk delivered", "ref", ref)
+ n.logger.Trace("remote.fetch, chunk delivered", "ref", ref, "base", hex.EncodeToString(n.LocalID[:16]))
osp.LogFields(olog.Bool("delivered", true))
osp.Finish()
@@ -262,7 +264,7 @@ func (n *NetStore) RemoteFetch(ctx context.Context, req *Request, fi *Fetcher) e
osp.Finish()
break
case <-ctx.Done(): // global fetcher timeout
- log.Trace("remote.fetch, fail", "ref", ref)
+ n.logger.Trace("remote.fetch, fail", "ref", ref)
metrics.GetOrRegisterCounter("remote.fetch.timeout.global", nil).Inc(1)
osp.LogFields(olog.Bool("fail", true))
@@ -284,17 +286,9 @@ func (n *NetStore) GetOrCreateFetcher(ctx context.Context, ref Address, interest
n.putMu.Lock()
defer n.putMu.Unlock()
- has, err := n.Store.Has(ctx, ref)
- if err != nil {
- log.Error(err.Error())
- }
- if has {
- return nil, false, false
- }
-
f = NewFetcher()
v, loaded := n.fetchers.Get(ref.String())
- log.Trace("netstore.has-with-callback.loadorstore", "ref", ref.String(), "loaded", loaded)
+ n.logger.Trace("netstore.has-with-callback.loadorstore", "localID", n.LocalID.String()[:16], "ref", ref.String(), "loaded", loaded, "createdBy", interestedParty)
if loaded {
f = v.(*Fetcher)
} else {
diff --git a/storage/pin/pin_test.go b/storage/pin/pin_test.go
index 8ef6d50c69..b1a8bb78fb 100644
--- a/storage/pin/pin_test.go
+++ b/storage/pin/pin_test.go
@@ -232,7 +232,7 @@ func getPinApiAndFileStore(t *testing.T) (*API, *storage.FileStore, func()) {
t.Fatalf("could not create localstore. Error: %s", err.Error())
}
tags := chunk.NewTags()
- fileStore := storage.NewFileStore(lStore, storage.NewFileStoreParams(), tags)
+ fileStore := storage.NewFileStore(lStore, lStore, storage.NewFileStoreParams(), tags)
// Swarm feeds test setup
feedsDir, err := ioutil.TempDir("", "swarm-feeds-test")
diff --git a/storage/pyramid.go b/storage/pyramid.go
index aaad8ac482..8083a558b2 100644
--- a/storage/pyramid.go
+++ b/storage/pyramid.go
@@ -454,7 +454,6 @@ func (pc *PyramidChunker) prepareChunks(ctx context.Context, isAppend bool) {
copy(chunkData[8+readBytes:], res)
readBytes += len(res)
- log.Trace("pyramid.chunker: copied all data", "readBytes", readBytes)
if err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
diff --git a/swarm.go b/swarm.go
index e56c1cbae3..dc8952063e 100644
--- a/swarm.go
+++ b/swarm.go
@@ -48,7 +48,8 @@ import (
"github.com/ethersphere/swarm/fuse"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/network"
- "github.com/ethersphere/swarm/network/stream"
+ "github.com/ethersphere/swarm/network/retrieval"
+ "github.com/ethersphere/swarm/network/stream/v2"
"github.com/ethersphere/swarm/p2p/protocols"
"github.com/ethersphere/swarm/pss"
"github.com/ethersphere/swarm/state"
@@ -75,8 +76,9 @@ type Swarm struct {
dns api.Resolver // DNS registrar
fileStore *storage.FileStore // distributed preimage archive, the local API to the storage with document level storage/retrieval support
streamer *stream.Registry
- bzzEth *bzzeth.BzzEth
+ retrieval *retrieval.Retrieval
bzz *network.Bzz // the logistic manager
+ bzzEth *bzzeth.BzzEth
backend cswap.Backend
privateKey *ecdsa.PrivateKey
netStore *storage.NetStore
@@ -200,38 +202,33 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
)
nodeID := config.Enode.ID()
- self.netStore = storage.NewNetStore(lstore, nodeID)
+ self.netStore = storage.NewNetStore(lstore, bzzconfig.OverlayAddr, nodeID)
to := network.NewKademlia(
common.FromHex(config.BzzKey),
network.NewKadParams(),
)
- delivery := stream.NewDelivery(to, self.netStore)
- self.netStore.RemoteGet = delivery.RequestFromPeers
+ self.retrieval = retrieval.New(to, self.netStore, bzzconfig.OverlayAddr) // nodeID.Bytes())
+ self.netStore.RemoteGet = self.retrieval.RequestFromPeers
feedsHandler.SetStore(self.netStore)
- syncing := stream.SyncingAutoSubscribe
- if !config.SyncEnabled || config.LightNodeEnabled {
- syncing = stream.SyncingDisabled
+ syncing := true
+ if !config.SyncEnabled || config.LightNodeEnabled || config.BootnodeMode {
+ syncing = false
}
- registryOptions := &stream.RegistryOptions{
- SkipCheck: config.DeliverySkipCheck,
- Syncing: syncing,
- SyncUpdateDelay: config.SyncUpdateDelay,
- MaxPeerServers: config.MaxStreamPeerServers,
- }
- self.streamer = stream.NewRegistry(nodeID, delivery, self.netStore, self.stateStore, registryOptions, self.swap)
+ syncProvider := stream.NewSyncProvider(self.netStore, to, syncing, false)
+ self.streamer = stream.New(self.stateStore, bzzconfig.OverlayAddr, syncProvider)
self.tags = chunk.NewTags() //todo load from state store
// Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage
lnetStore := storage.NewLNetStore(self.netStore)
- self.fileStore = storage.NewFileStore(lnetStore, self.config.FileStoreParams, self.tags)
+ self.fileStore = storage.NewFileStore(lnetStore, localStore, self.config.FileStoreParams, self.tags)
log.Debug("Setup local storage")
- self.bzz = network.NewBzz(bzzconfig, to, self.stateStore, self.streamer.GetSpec(), self.streamer.Run)
+ self.bzz = network.NewBzz(bzzconfig, to, self.stateStore, stream.Spec, retrieval.Spec, self.streamer.Run, self.retrieval.Run)
self.bzzEth = bzzeth.New()
@@ -444,8 +441,10 @@ func (s *Swarm) Start(srv *p2p.Server) error {
}(startTime)
startCounter.Inc(1)
- s.streamer.Start(srv)
- return nil
+ if err := s.streamer.Start(srv); err != nil {
+ return err
+ }
+ return s.retrieval.Start(srv)
}
// Stop stops all component services.
@@ -468,12 +467,19 @@ func (s *Swarm) Stop() error {
if s.accountingMetrics != nil {
s.accountingMetrics.Close()
}
+
+ if err := s.streamer.Stop(); err != nil {
+ log.Error("streamer stop", "err", err)
+ }
+ if err := s.retrieval.Stop(); err != nil {
+ log.Error("retrieval stop", "err", err)
+ }
+
if s.netStore != nil {
s.netStore.Close()
}
s.sfs.Stop()
stopCounter.Inc(1)
- s.streamer.Stop()
err := s.bzzEth.Stop()
if err != nil {
@@ -528,7 +534,7 @@ func (s *Swarm) APIs() []rpc.API {
{
Namespace: "bzz",
Version: "3.0",
- Service: api.NewInspector(s.api, s.bzz.Hive, s.netStore),
+ Service: api.NewInspector(s.api, s.bzz.Hive, s.netStore, s.streamer),
Public: false,
},
{
@@ -546,8 +552,8 @@ func (s *Swarm) APIs() []rpc.API {
}
apis = append(apis, s.bzz.APIs()...)
- apis = append(apis, s.bzzEth.APIs()...)
apis = append(apis, s.streamer.APIs()...)
+ apis = append(apis, s.bzzEth.APIs()...)
if s.ps != nil {
apis = append(apis, s.ps.APIs()...)
diff --git a/swarm_test.go b/swarm_test.go
index 0e71bdc256..36638dd7d9 100644
--- a/swarm_test.go
+++ b/swarm_test.go
@@ -93,6 +93,9 @@ func TestNewSwarm(t *testing.T) {
if s.streamer == nil {
t.Error("streamer not initialized")
}
+ if s.retrieval == nil {
+ t.Error("retrieval not initialized")
+ }
if s.fileStore == nil {
t.Error("fileStore not initialized")
}