Skip to content
This repository was archived by the owner on Aug 2, 2021. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
181 commits
Select commit Hold shift + click to select a range
e8f8f4b
network/syncer: initial commit
acud May 24, 2019
85884ec
network/newstream: pass bzz peer to run function
janos Jul 29, 2019
7e316d4
network: pass bzz peer to retrieval run function
janos Jul 29, 2019
5b0c155
network: pass the correct run function for retrieval protocol
janos Jul 29, 2019
a614719
network/newstream: remove notion of quit bins - no longer necessary
acud Jul 29, 2019
2b4e37f
Revert "network/newstream: remove notion of quit bins - no longer nec…
acud Jul 29, 2019
6dd176b
network/newstream: fix streams handling
acud Jul 29, 2019
c36fe30
network/newstream: fix loglines
acud Jul 29, 2019
24743f4
all: fix linter
acud Jul 29, 2019
3317445
network/newstream: add logging to see which test is failing
acud Jul 30, 2019
f5d1740
Revert "network/newstream: add logging to see which test is failing"
acud Jul 30, 2019
345253a
network/newstream: clean logging
acud Jul 30, 2019
ee78cbb
network/newstream: adjust timeouts for travis
acud Jul 30, 2019
d3e7ada
network/newstream: remove test cases overlap
acud Jul 30, 2019
2b391cf
network/newstream: remove overlapping tests, adjust test params for t…
acud Jul 30, 2019
29161fc
network/newstream: comment tests
acud Jul 30, 2019
1b1102f
network/newstream: exit clientSealBatch on stream quit
janos Jul 30, 2019
af33d8c
network/newstream: fix goroutine leak
acud Jul 30, 2019
1b7c105
all: pull changes from master
acud Jul 30, 2019
9117605
network/newstream: remove duplicate test
acud Jul 30, 2019
fcbcaf0
network/newstream: remove unused snapshot
janos Jul 30, 2019
e24bf84
network/newstream: clean up open offers in handleWantedHashes
janos Jul 30, 2019
a5091a4
chunk: avoid allocations in Address methods
janos Jul 31, 2019
d0a904b
Revert "chunk: avoid allocations in Address methods"
janos Aug 1, 2019
7df6986
network/newstream: add comments, make PeerCursors return a JSON, remo…
acud Aug 8, 2019
8e5b61e
network, storage: minor cleanup
acud Aug 9, 2019
cc0348f
network, storage: add more logging
acud Aug 9, 2019
0fd11c1
network: add logging
acud Aug 12, 2019
ed4f430
network, storage: add more meaningful logging
acud Aug 12, 2019
ff7ad82
netstore: more meaningful logging
acud Aug 12, 2019
6477971
network/newstream: use NetStore not LocalStore for chunk operations
janos Aug 12, 2019
9b1618a
swarm: remove skip check tests
acud Aug 13, 2019
995494d
network/newstream: implement stream pauser for tests and fix newSyncS…
janos Aug 13, 2019
6a48c22
storage: cherry pick netstore panic change
acud Aug 14, 2019
5c1be00
network/{newstream,retrieval,stream}: fix compilation errors
janos Aug 14, 2019
83c6166
network/newstream: fix fileStore.GetAllReferences call in tests
janos Aug 14, 2019
2f53ffd
network/newstream: remove unneeded conversions
janos Aug 14, 2019
571ad55
network/newstream: decrease loglevel
acud Aug 14, 2019
969e4ed
network: add test names for travis output
acud Aug 14, 2019
641d119
network/newstream: fix goroutine leak on peer disconnect
acud Aug 15, 2019
5f213bc
network/newstream: fixed possible few other leaks
acud Aug 15, 2019
cc96575
network/newstream: decrease params of test due to CI flaking
acud Aug 15, 2019
7f21e3c
network/newstream: remove log
acud Aug 15, 2019
b18700d
remove test logs
acud Aug 15, 2019
0c5e6ef
network/newstream: reduce log verbosity
acud Aug 15, 2019
7318ca0
network/newstream: counting chunks by bin ids is unreliable after chu…
janos Aug 15, 2019
2bf138c
network: reduce number of goroutines
acud Aug 15, 2019
37b7d61
network, : change batch size, revert localstore files
acud Aug 16, 2019
7768d63
revert change
acud Aug 16, 2019
7dc99c7
network/newstream: increase batch timeout
acud Aug 16, 2019
3fac3a0
network/newstream: apply janos fix, use context for put
acud Aug 16, 2019
f784f9e
storage: fix regression
acud Aug 16, 2019
a54beed
storage: fix regression
acud Aug 16, 2019
e98667d
retrieve: remove return, add test logline
acud Aug 16, 2019
e895ecf
storage: fix regression
acud Aug 16, 2019
a2886f9
network/newstream: fix ability to disable syncing
acud Aug 16, 2019
70cd645
network/newstream: reinstate goroutine to see if performance improves
acud Aug 16, 2019
7f48cee
all: check feasibility of changing filestore putter to localstore
acud Aug 17, 2019
1bdc8bf
network/newstream: change batch timeout
acud Aug 17, 2019
91ecfe6
network/newstream: add some time measurements, try to separate set an…
acud Aug 17, 2019
9fa12b4
network/newstream: move set
acud Aug 17, 2019
3947285
storage/netstore: move locks to prevent resource contention
acud Aug 19, 2019
fb4abbb
Revert "network/newstream: move set"
acud Aug 19, 2019
214e597
Revert "network/newstream: add some time measurements, try to separat…
acud Aug 19, 2019
267c27e
network/newstream: add some time measurements, try to separate set an…
acud Aug 19, 2019
9f6cff7
network: rename metrics
acud Aug 19, 2019
4b42eed
network/newstream: rename metrics
acud Aug 19, 2019
8068c2a
network/newstream: add more metrics
acud Aug 19, 2019
d99f336
network/newstream: rename metrics
acud Aug 19, 2019
cefa178
network/newstream: change to gauge
acud Aug 19, 2019
6da795c
network/newstream: fix metric
acud Aug 19, 2019
82f3c66
network/newstream: fix compiliation errors
acud Aug 19, 2019
508c54f
network/newstream: rename methods for clearer distinction on which si…
acud Aug 20, 2019
fcb0345
all: rename type to Registry
acud Aug 20, 2019
76ee52b
network/newstream: reduce goroutines, prevent deadlock
acud Aug 20, 2019
a3e8c30
network/newstream: rename receivers, related tests
acud Aug 20, 2019
b2d6f22
network/newstream: remove underscores
acud Aug 20, 2019
910e28f
Revert "network/newstream: reduce goroutines"
acud Aug 20, 2019
6c22700
network/newstream: rename conflicting metrics
acud Aug 20, 2019
fcbb039
Revert "Revert "network/newstream: reduce goroutines""
acud Aug 20, 2019
329a4c2
network/retrieve: fix logging and some metrics
acud Aug 20, 2019
8a8a104
network/retrieval: fix dup metric
acud Aug 20, 2019
ea59871
chunk, storage: chunk.Store multiple chunk put
janos Aug 20, 2019
a4e946e
network/newstream: multiput integration
acud Aug 20, 2019
164ad74
network/newstream: add more metrics to trace speed
acud Aug 21, 2019
5ba18f5
network/newstream: change how metric is updated
acud Aug 21, 2019
1d255c6
storage: add slow chunk metric
acud Aug 21, 2019
b0c8ccf
network/newstream: change batch size
acud Aug 21, 2019
b63c39b
all: try some rate limiting
acud Aug 21, 2019
68c8c9a
network, storage: change pyramid chunker job limit, change metric
acud Aug 21, 2019
7888f62
network/newstream: metrics
acud Aug 21, 2019
2ed1b67
network/newstream: peer should drop on timeout
acud Aug 21, 2019
fd71540
network/newstream: change frame size to see performance impact
acud Aug 21, 2019
9c9970e
network/newstream: add timer to see how long gets take
acud Aug 21, 2019
683117e
network/newstream: adjust frame size
acud Aug 21, 2019
6bb3362
network/newstream: experiment with adding cache
acud Aug 21, 2019
96e40ca
network/newstream: change how metrics are updated
acud Aug 21, 2019
cae5c03
stream: add cache hit metric
acud Aug 21, 2019
0374b5c
network: removed some log lines
acud Aug 21, 2019
972935e
netstore: move lock
acud Aug 22, 2019
bf510a2
newstream: check cache before has
acud Aug 22, 2019
dceb1cd
newstream: remove semaphore
acud Aug 22, 2019
0b055e0
stream: change maxframe
acud Aug 22, 2019
1ee2736
stream: adjust batchsize and timeout
acud Aug 26, 2019
99d2e90
Revert "all: check feasibility of changing filestore putter to locals…
acud Aug 26, 2019
b96cdc8
swarm: fix tags init
acud Aug 26, 2019
3a06260
stream: integrate multi has
acud Aug 26, 2019
8d6223c
netstore: has within GetOrCreateFetcher no longer necessary as callin…
acud Aug 26, 2019
d7079ce
network, storage: fix changes from cherry pick
acud Aug 26, 2019
b11210c
network/newstream: integrate localstore multi set
janos Aug 26, 2019
7b97e39
netstore: voodoo
acud Aug 26, 2019
d11db84
stream: integrate multi has with cache
acud Aug 27, 2019
8190844
stream: integrate multiget
acud Aug 27, 2019
94fe7e1
stream: debug unsolicited chunks
acud Aug 27, 2019
b578ace
stream: batch size
acud Aug 27, 2019
c13ed96
network/newstream: sync provider MultiNeedData cleanup
janos Aug 27, 2019
dbbbae1
stream: interleave requests
acud Aug 27, 2019
eb09df5
batchsize
acud Aug 27, 2019
a2bd518
main: add mutex profile
acud Aug 27, 2019
93a0e7a
stream: more granular lock
acud Aug 29, 2019
894ed21
stream: remove panics
acud Aug 29, 2019
ef12c5a
storage: fix hasherstore seen check to happen when error is nil
acud Aug 29, 2019
7fbf792
stream: clean up and simplify
acud Aug 30, 2019
309fa5a
stream: cleanup
acud Aug 30, 2019
9c3e915
stream: more cleanup
acud Aug 30, 2019
7b52485
stream: more cleanups
acud Aug 30, 2019
07a0607
sync_sym.go: add simulation
janos Aug 30, 2019
1436818
network/newstream: remove unused variable
janos Aug 30, 2019
7bda6bf
Merge branch 'master' into new-syncer-gs
acud Aug 30, 2019
7834992
stream: cleanup
acud Aug 30, 2019
49584be
stream: add comments, cleanup, remove duplicate code
acud Aug 30, 2019
9a8b98e
stream: cleanup cleanup cleanup
acud Aug 30, 2019
3d1ced6
Merge branch 'master' into new-syncer-gs
acud Aug 30, 2019
174640e
stream: remove Roundtrip
acud Aug 30, 2019
825cf24
Merge branch 'new-syncer-gs' of github.com:ethersphere/swarm into new…
acud Aug 31, 2019
07e88bc
dockerfile: add perf!
acud Aug 31, 2019
468512f
stream: increase batch size
acud Aug 31, 2019
724efbc
Dockerfile: add kernel profiler to alltools image
acud Aug 31, 2019
5684eba
all: check feasibility of changing filestore putter to localstore
acud Aug 31, 2019
5edb70b
stream: increase batch size
acud Aug 31, 2019
5471bdf
client: increase bitvector size again
acud Aug 31, 2019
c29204c
stream: fixed bug with full empty interval not sending back the corre…
acud Aug 31, 2019
2cc03c3
smoke, stream: add more metrics, fix duplicate trackChunks
acud Aug 31, 2019
af0132c
retrieve: handle retrieve requests in a goroutine\
acud Sep 1, 2019
3794ae2
retrieve: move goroutine launch to message handler
acud Sep 1, 2019
2555544
netstore: move mutex for the 1+e11th time
acud Sep 1, 2019
edd5446
docker: change level
acud Sep 1, 2019
a8f5b17
network/newstream: set all chunks as synced in serverHandleWantedHashes
janos Sep 2, 2019
5b243c1
stream: cleanup, more comments, reiterate cache semantics
acud Sep 3, 2019
9b6d7d2
stream: fix compile err
acud Sep 3, 2019
5c906db
newstream: add snapshot sync test
acud Sep 3, 2019
8ebdefd
newstream: add snapshot sync test
acud Sep 3, 2019
67d4f08
retrieve: skip peer which from findPeer which was not found in protoc…
acud Sep 3, 2019
547e1a4
retrieve, stream: fix peers to skip, remove unused metric
acud Sep 3, 2019
78a281c
api, network/newstream: add intervals to PeerStreams inspector response
janos Sep 3, 2019
7ebc879
api: fix inspector
acud Sep 3, 2019
2a2521f
network/newstream: add provider cursors to PeerInfo
janos Sep 3, 2019
07e5e58
dockerfile: add jq and websocat
acud Sep 4, 2019
3dd9fa1
api: convert peerInfo response to string
acud Sep 4, 2019
6fd9eeb
api, network/newstream: return error from PeerInfo
janos Sep 4, 2019
5308b68
api: inspector PeerStreams returns struct and add TestInspectorPeerSt…
janos Sep 4, 2019
ac6f356
network/newstream: truncate peer keys to 16 in PeerInfo
janos Sep 4, 2019
133a909
api: inspector peer streams returns string, again
janos Sep 4, 2019
e1d38ee
network/newstream: add kademlia to PeerInfo response
janos Sep 4, 2019
98aac6e
network/newstream: one intervals for registry in PeerInfo
janos Sep 4, 2019
c1b1dca
network/newstream: use bzz oaddr in peerStreamIntervalKey
janos Sep 4, 2019
80b5705
smoke: change to bytes
acud Sep 4, 2019
5e9024b
smoke: remove submitMetrics and add number of nodes metric
acud Sep 5, 2019
b00ce81
stream: move the locks again
acud Sep 6, 2019
c418705
network/newstream: encode sync stream keys with base 36 (#1727)
janos Sep 6, 2019
6d466b9
stream: remove unnecessary shadowing
acud Sep 6, 2019
120c034
network/stream: remove package
janos Sep 10, 2019
9be1a68
network/simulation, pss, storage/localstore: fix tests
janos Sep 10, 2019
61d7452
all: address some review comments
janos Sep 10, 2019
4f311d3
network/stream/v2: fix BenchmarkHistoricalStream
janos Sep 11, 2019
2a8bca9
Merge branch 'master' into new-syncer-gs
janos Sep 11, 2019
94a4f83
Merge branch 'master' into new-syncer-gs
janos Sep 11, 2019
0965d3c
network/stream/v2: enable TestSyncingViaGlobalSync on travis
janos Sep 11, 2019
645f520
network/stream/v2: reduce TestSyncingViaGlobalSync test cases
janos Sep 11, 2019
ae2bf00
network/stream/v2: align want.remaining
janos Sep 11, 2019
7b906c2
network/stream/v2: limit test cases on 386 arch
janos Sep 11, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion api/http/test_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API, *pin.API) TestSe
}

tags := chunk.NewTags()
fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams(), tags)
fileStore := storage.NewFileStore(localStore, localStore, storage.NewFileStoreParams(), tags)

// Swarm feeds test setup
feedsDir, err := ioutil.TempDir("", "swarm-feeds-test")
Expand Down
28 changes: 21 additions & 7 deletions api/inspector.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,24 +18,29 @@ package api

import (
"context"
"encoding/json"
"fmt"
"strings"
"time"

"github.com/ethereum/go-ethereum/metrics"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/network"
stream "github.com/ethersphere/swarm/network/stream/v2"
"github.com/ethersphere/swarm/storage"
)

const InspectorIsPullSyncingTolerance = 15 * time.Second

type Inspector struct {
api *API
hive *network.Hive
netStore *storage.NetStore
stream *stream.Registry
}

func NewInspector(api *API, hive *network.Hive, netStore *storage.NetStore) *Inspector {
return &Inspector{api, hive, netStore}
func NewInspector(api *API, hive *network.Hive, netStore *storage.NetStore, pullSyncer *stream.Registry) *Inspector {
return &Inspector{api, hive, netStore, pullSyncer}
}

// Hive prints the kademlia table
Expand All @@ -49,15 +54,12 @@ func (i *Inspector) KademliaInfo() network.KademliaInfo {
}

func (i *Inspector) IsPullSyncing() bool {
lastReceivedChunksMsg := metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)

// last received chunks msg time
lrct := time.Unix(0, lastReceivedChunksMsg.Value())
t := i.stream.LastReceivedChunkTime()

// if last received chunks msg time is after now-15sec. (i.e. within the last 15sec.) then we say that the node is still syncing
// technically this is not correct, because this might have been a retrieve request, but for the time being it works for our purposes
// because we know we are not making retrieve requests on the node while checking this
return lrct.After(time.Now().Add(-15 * time.Second))
return t.After(time.Now().Add(-InspectorIsPullSyncingTolerance))
}

// DeliveriesPerPeer returns the sum of chunks we received from a given peer
Expand Down Expand Up @@ -96,3 +98,15 @@ func (i *Inspector) Has(chunkAddresses []storage.Address) string {

return strings.Join(hostChunks, "")
}

func (i *Inspector) PeerStreams() (string, error) {
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

comment exported functions

peerInfo, err := i.stream.PeerInfo()
if err != nil {
return "", err
}
v, err := json.Marshal(peerInfo)
if err != nil {
return "", err
}
return string(v), nil
}
66 changes: 66 additions & 0 deletions api/inspector_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
package api

import (
"crypto/rand"
"encoding/hex"
"io/ioutil"
"os"
"strings"
"testing"

"github.com/ethersphere/swarm/network"
stream "github.com/ethersphere/swarm/network/stream/v2"
"github.com/ethersphere/swarm/storage"
"github.com/ethersphere/swarm/storage/localstore"

"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethersphere/swarm/state"
)

// TestInspectorPeerStreams validates that response from RPC peerStream has at
// least some data.
func TestInspectorPeerStreams(t *testing.T) {
dir, err := ioutil.TempDir("", "swarm-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)

baseKey := make([]byte, 32)
_, err = rand.Read(baseKey)
if err != nil {
t.Fatal(err)
}

localStore, err := localstore.New(dir, baseKey, &localstore.Options{})
if err != nil {
t.Fatal(err)
}
netStore := storage.NewNetStore(localStore, baseKey, enode.ID{})

i := NewInspector(nil, nil, netStore, stream.New(state.NewInmemoryStore(), baseKey, stream.NewSyncProvider(netStore, network.NewKademlia(
baseKey,
network.NewKadParams(),
), false, false)))

server := rpc.NewServer()
if err := server.RegisterName("inspector", i); err != nil {
t.Fatal(err)
}

client := rpc.DialInProc(server)

var peerInfo string

err = client.Call(&peerInfo, "inspector_peerStreams")
if err != nil {
t.Fatal(err)
}

if !strings.Contains(peerInfo, `"base":"`+hex.EncodeToString(baseKey)[:16]+`"`) {
t.Error("missing base key in response")
}

t.Log(peerInfo)
}
6 changes: 3 additions & 3 deletions api/manifest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func manifest(paths ...string) (manifestReader storage.LazySectionReader) {

func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie {
quitC := make(chan bool)
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
fileStore := storage.NewFileStore(nil, nil, storage.NewFileStoreParams(), chunk.NewTags())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC, NOOPDecrypt)
if err != nil {
Expand Down Expand Up @@ -100,7 +100,7 @@ func TestGetEntry(t *testing.T) {
func TestExactMatch(t *testing.T) {
quitC := make(chan bool)
mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map")
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
fileStore := storage.NewFileStore(nil, nil, storage.NewFileStoreParams(), chunk.NewTags())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(mf, ref, fileStore, false, quitC, nil)
if err != nil {
Expand Down Expand Up @@ -133,7 +133,7 @@ func TestAddFileWithManifestPath(t *testing.T) {
reader := &storage.LazyTestSectionReader{
SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))),
}
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
fileStore := storage.NewFileStore(nil, nil, storage.NewFileStoreParams(), chunk.NewTags())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(reader, ref, fileStore, false, nil, NOOPDecrypt)
if err != nil {
Expand Down
11 changes: 9 additions & 2 deletions cmd/swarm-smoke/upload_and_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ func trackChunks(testData []byte) error {
wg.Wait()

checkChunksVsMostProxHosts(addrs, allHostChunks, bzzAddrs)
metrics.GetOrRegisterGauge("deployment.nodes", nil).Update(int64(len(hosts)))

if !hasErr {
// remove the chunks stored on the uploader node
Expand Down Expand Up @@ -207,11 +208,10 @@ func checkChunksVsMostProxHosts(addrs []storage.Address, allHostChunks map[strin
}
}

log.Debug("sync mode", "sync mode", syncMode)

if syncMode == "pullsync" || syncMode == "both" {
for _, maxProxHost := range maxProxHosts {
if allHostChunks[maxProxHost][i] == '0' {
metrics.GetOrRegisterCounter("upload-and-sync.pull-sync.chunk-not-max-prox", nil).Inc(1)
log.Error("chunk not found at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
} else {
log.Trace("chunk present at max prox host", "ref", addrs[i], "host", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
Expand All @@ -220,6 +220,7 @@ func checkChunksVsMostProxHosts(addrs []storage.Address, allHostChunks map[strin

// if chunk found at less than 2 hosts, which is actually less that the min size of a NN
if foundAt < 2 {
metrics.GetOrRegisterCounter("upload-and-sync.pull-sync.chunk-less-nn", nil).Inc(1)
log.Error("chunk found at less than two hosts", "foundAt", foundAt, "ref", addrs[i])
}
}
Expand All @@ -235,6 +236,7 @@ func checkChunksVsMostProxHosts(addrs []storage.Address, allHostChunks map[strin

if !found {
for _, maxProxHost := range maxProxHosts {
metrics.GetOrRegisterCounter("upload-and-sync.push-sync.chunk-not-max-prox", nil).Inc(1)
log.Error("chunk not found at any max prox host", "ref", addrs[i], "hosts", maxProxHost, "bzzAddr", bzzAddrs[maxProxHost])
}
}
Expand Down Expand Up @@ -270,6 +272,8 @@ func uploadAndSync(c *cli.Context, randomBytes []byte) error {
}
t2 := time.Since(t1)
metrics.GetOrRegisterResettingTimer("upload-and-sync.upload-time", nil).Update(t2)
uploadSpeed := float64(len(randomBytes)) / t2.Seconds() // bytes per second
metrics.GetOrRegisterGauge("upload-and-sync.upload-speed", nil).Update(int64(uploadSpeed))

fhash, err := digest(bytes.NewReader(randomBytes))
if err != nil {
Expand Down Expand Up @@ -315,6 +319,9 @@ func uploadAndSync(c *cli.Context, randomBytes []byte) error {
ended := time.Since(start)

metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended)
downloadSpeed := float64(len(randomBytes)) / ended.Seconds() // bytes per second
metrics.GetOrRegisterGauge("upload-and-sync.download-speed", nil).Update(int64(downloadSpeed))

log.Info("fetch successful", "took", ended, "endpoint", httpEndpoint(hosts[randIndex]))
break
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/swarm-snapshot/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func createSnapshot(filename string, nodes int, services []string) (err error) {
UnderlayAddr: addr.Under(),
HiveParams: hp,
}
return network.NewBzz(config, kad, nil, nil, nil), nil, nil
return network.NewBzz(config, kad, nil, nil, nil, nil, nil), nil, nil
},
})
defer sim.Close()
Expand Down
12 changes: 11 additions & 1 deletion cmd/swarm/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,11 @@ import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethersphere/swarm"
"github.com/ethersphere/swarm/api"
"github.com/ethersphere/swarm/testutil"
)

func TestConfigDump(t *testing.T) {
swarm := runSwarm(t, "dumpconfig")
swarm := runSwarm(t, "--verbosity", fmt.Sprintf("%d", *testutil.Loglevel), "dumpconfig")
defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
Expand All @@ -53,6 +54,7 @@ func TestConfigFailsSwapEnabledNoBackendURL(t *testing.T) {
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
fmt.Sprintf("--%s", utils.ListenPortFlag.Name), "0",
fmt.Sprintf("--%s", SwarmSwapEnabledFlag.Name),
"--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}

swarm := runSwarm(t, flags...)
Expand Down Expand Up @@ -89,6 +91,7 @@ func TestBzzKeyFlag(t *testing.T) {
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
fmt.Sprintf("--%s", SwarmBzzKeyHexFlag.Name), hexKey,
"--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}

node.Cmd = runSwarm(t, flags...)
Expand Down Expand Up @@ -137,6 +140,7 @@ func TestEmptyBzzAccountFlagMultipleAccounts(t *testing.T) {
fmt.Sprintf("--%s", SwarmPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.ListenPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
"--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}

node.Cmd = runSwarm(t, flags...)
Expand All @@ -160,6 +164,7 @@ func TestEmptyBzzAccountFlagSingleAccount(t *testing.T) {
fmt.Sprintf("--%s", utils.ListenPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
"--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}

node.Cmd = runSwarm(t, flags...)
Expand Down Expand Up @@ -205,6 +210,7 @@ func TestEmptyBzzAccountFlagNoAccountWrongPassword(t *testing.T) {
fmt.Sprintf("--%s", SwarmPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.ListenPortFlag.Name), "0",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
"--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}

node.Cmd = runSwarm(t, flags...)
Expand Down Expand Up @@ -244,6 +250,7 @@ func TestConfigCmdLineOverrides(t *testing.T) {
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
"--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
Expand Down Expand Up @@ -342,6 +349,7 @@ func TestConfigFileOverrides(t *testing.T) {
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
"--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
Expand Down Expand Up @@ -420,6 +428,7 @@ func TestConfigEnvVars(t *testing.T) {
"--ens-api", "",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
"--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}

//node.Cmd = runSwarm(t,flags...)
Expand Down Expand Up @@ -551,6 +560,7 @@ func TestConfigCmdLineOverridesFile(t *testing.T) {
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
fmt.Sprintf("--%s", utils.DataDirFlag.Name), dir,
fmt.Sprintf("--%s", utils.IPCPathFlag.Name), conf.IPCPath,
"--verbosity", fmt.Sprintf("%d", *testutil.Loglevel),
}
node.Cmd = runSwarm(t, flags...)
node.Cmd.InputLine(testPassphrase)
Expand Down
2 changes: 1 addition & 1 deletion cmd/swarm/explore.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func hashes(ctx *cli.Context) {
}
defer f.Close()

fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, &storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
refs, err := fileStore.GetAllReferences(context.TODO(), f)
if err != nil {
utils.Fatalf("%v\n", err)
Expand Down
2 changes: 1 addition & 1 deletion cmd/swarm/hash.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func hash(ctx *cli.Context) {
defer f.Close()

stat, _ := f.Stat()
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())
fileStore := storage.NewFileStore(&storage.FakeChunkStore{}, &storage.FakeChunkStore{}, storage.NewFileStoreParams(), chunk.NewTags())

addr, _, err := fileStore.Store(context.TODO(), f, stat.Size(), false)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion network/networkid_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func newServices() adapters.Services {
HiveParams: hp,
NetworkID: uint64(currentNetworkID),
}
return NewBzz(config, kademlia(ctx.Config.ID), nil, nil, nil), nil
return NewBzz(config, kademlia(ctx.Config.ID), nil, nil, nil, nil, nil), nil
},
}
}
Expand Down
Loading