This repository was archived by the owner on Aug 2, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 109
swarm/network: remove dead code #1339
Merged
nonsense
merged 12 commits into
ethersphere:swarm-rather-stable
from
nonsense:remove-dead-code
Apr 12, 2019
Merged
Changes from all commits
Commits
Show all changes
12 commits
Select commit
Hold shift + click to select a range
e709d5e
swarm/network: remove dead code
nonsense f0ef324
remove tests that reference the RETRIEVE_REQUEST stream
nonsense 3321d14
fix Drop(err)
nonsense ea51670
swarm/network/stream: log errors if handlers fail
nonsense 7734d31
remove skipCheck log line
nonsense ebab145
quit emit funciton on r.quit
nonsense 89c908d
revert node/config warning
nonsense 7d23cb2
remove retrieval options
nonsense ab8bb36
remove retrieval options
nonsense 33420e0
add delivery.Close() and quit chan
nonsense d568f0b
remove emit metrics
nonsense 4bb2f0b
make the linter happy
nonsense File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -33,11 +33,6 @@ import ( | |
| olog "github.com/opentracing/opentracing-go/log" | ||
| ) | ||
|
|
||
| const ( | ||
| swarmChunkServerStreamName = "RETRIEVE_REQUEST" | ||
| deliveryCap = 32 | ||
| ) | ||
|
|
||
| var ( | ||
| processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil) | ||
| handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil) | ||
|
|
@@ -51,85 +46,15 @@ type Delivery struct { | |
| chunkStore chunk.FetchStore | ||
| kad *network.Kademlia | ||
| getPeer func(enode.ID) *Peer | ||
| quit chan struct{} | ||
| } | ||
|
|
||
| func NewDelivery(kad *network.Kademlia, chunkStore chunk.FetchStore) *Delivery { | ||
| return &Delivery{ | ||
| chunkStore: chunkStore, | ||
| kad: kad, | ||
| } | ||
| } | ||
|
|
||
| // SwarmChunkServer implements Server | ||
| type SwarmChunkServer struct { | ||
| deliveryC chan []byte | ||
| batchC chan []byte | ||
| chunkStore storage.ChunkStore | ||
| currentLen uint64 | ||
| quit chan struct{} | ||
| } | ||
|
|
||
| // NewSwarmChunkServer is SwarmChunkServer constructor | ||
| func NewSwarmChunkServer(chunkStore storage.ChunkStore) *SwarmChunkServer { | ||
| s := &SwarmChunkServer{ | ||
| deliveryC: make(chan []byte, deliveryCap), | ||
| batchC: make(chan []byte), | ||
| chunkStore: chunkStore, | ||
| quit: make(chan struct{}), | ||
| } | ||
| go s.processDeliveries() | ||
| return s | ||
| } | ||
|
|
||
| // processDeliveries handles delivered chunk hashes | ||
| func (s *SwarmChunkServer) processDeliveries() { | ||
| var hashes []byte | ||
| var batchC chan []byte | ||
| for { | ||
| select { | ||
| case <-s.quit: | ||
| return | ||
| case hash := <-s.deliveryC: | ||
| hashes = append(hashes, hash...) | ||
| batchC = s.batchC | ||
| case batchC <- hashes: | ||
| hashes = nil | ||
| batchC = nil | ||
| } | ||
| } | ||
| } | ||
|
|
||
| // SessionIndex returns zero in all cases for SwarmChunkServer. | ||
| func (s *SwarmChunkServer) SessionIndex() (uint64, error) { | ||
| return 0, nil | ||
| } | ||
|
|
||
| // SetNextBatch | ||
| func (s *SwarmChunkServer) SetNextBatch(_, _ uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) { | ||
| select { | ||
| case hashes = <-s.batchC: | ||
| case <-s.quit: | ||
| return | ||
| } | ||
|
|
||
| from = s.currentLen | ||
| s.currentLen += uint64(len(hashes)) | ||
| to = s.currentLen | ||
| return | ||
| } | ||
|
|
||
| // Close needs to be called on a stream server | ||
| func (s *SwarmChunkServer) Close() { | ||
| close(s.quit) | ||
| } | ||
|
|
||
| // GetData retrieves chunk data from db store | ||
| func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, error) { | ||
| ch, err := s.chunkStore.Get(ctx, chunk.ModeGetRequest, storage.Address(key)) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
| return ch.Data(), nil | ||
| } | ||
|
|
||
| // RetrieveRequestMsg is the protocol msg for chunk retrieve requests | ||
|
|
@@ -150,12 +75,6 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req * | |
|
|
||
| osp.LogFields(olog.String("ref", req.Addr.String())) | ||
|
|
||
| s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", true)) | ||
| if err != nil { | ||
| return err | ||
| } | ||
| streamer := s.Server.(*SwarmChunkServer) | ||
|
|
||
| var cancel func() | ||
| // TODO: do something with this hardcoded timeout, maybe use TTL in the future | ||
| ctx = context.WithValue(ctx, "peer", sp.ID().String()) | ||
|
|
@@ -165,7 +84,7 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req * | |
| go func() { | ||
| select { | ||
| case <-ctx.Done(): | ||
| case <-streamer.quit: | ||
|
nonsense marked this conversation as resolved.
|
||
| case <-d.quit: | ||
| } | ||
| cancel() | ||
| }() | ||
|
|
@@ -178,23 +97,13 @@ func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req * | |
| log.Debug("ChunkStore.Get can not retrieve chunk", "peer", sp.ID().String(), "addr", req.Addr, "hopcount", req.HopCount, "err", err) | ||
| return | ||
| } | ||
| if req.SkipCheck { | ||
| syncing := false | ||
| osp.LogFields(olog.Bool("skipCheck", true)) | ||
| syncing := false | ||
|
|
||
| err = sp.Deliver(ctx, ch, s.priority, syncing) | ||
| if err != nil { | ||
| log.Warn("ERROR in handleRetrieveRequestMsg", "err", err) | ||
| } | ||
| osp.LogFields(olog.Bool("delivered", true)) | ||
| return | ||
| } | ||
| osp.LogFields(olog.Bool("skipCheck", false)) | ||
| select { | ||
| case streamer.deliveryC <- ch.Address()[:]: | ||
| case <-streamer.quit: | ||
| err = sp.Deliver(ctx, ch, Top, syncing) | ||
| if err != nil { | ||
| log.Warn("ERROR in handleRetrieveRequestMsg", "err", err) | ||
| } | ||
|
|
||
| osp.LogFields(olog.Bool("delivered", true)) | ||
| }() | ||
|
|
||
| return nil | ||
|
|
@@ -244,22 +153,16 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req int | |
| case *ChunkDeliveryMsgSyncing: | ||
| msg = (*ChunkDeliveryMsg)(r) | ||
| mode = chunk.ModePutSync | ||
| case *ChunkDeliveryMsg: | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. One of the tests is actually sending a This is not code that is hit in production, but I decided to add this here temporarily, rather than dig into the test. It is not clear why this test was not failing with the previous code. |
||
| msg = r | ||
| mode = chunk.ModePutSync | ||
| } | ||
|
|
||
| // retrieve the span for the originating retrieverequest | ||
| spanID := fmt.Sprintf("stream.send.request.%v.%v", sp.ID(), msg.Addr) | ||
| span := tracing.ShiftSpanByKey(spanID) | ||
|
|
||
| log.Trace("handle.chunk.delivery", "ref", msg.Addr, "from peer", sp.ID()) | ||
|
|
||
| go func() { | ||
| defer osp.Finish() | ||
|
|
||
| if span != nil { | ||
| span.LogFields(olog.String("finish", "from handleChunkDeliveryMsg")) | ||
| defer span.Finish() | ||
| } | ||
|
|
||
| msg.peer = sp | ||
| log.Trace("handle.chunk.delivery", "put", msg.Addr) | ||
| _, err := d.chunkStore.Put(ctx, mode, storage.NewChunk(msg.Addr, msg.SData)) | ||
|
|
@@ -268,14 +171,20 @@ func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req int | |
| // we removed this log because it spams the logs | ||
| // TODO: Enable this log line | ||
| // log.Warn("invalid chunk delivered", "peer", sp.ID(), "chunk", msg.Addr, ) | ||
| msg.peer.Drop(err) | ||
| msg.peer.Drop() | ||
| } | ||
| } | ||
| log.Trace("handle.chunk.delivery", "done put", msg.Addr, "err", err) | ||
| }() | ||
| return nil | ||
| } | ||
|
|
||
| func (d *Delivery) Close() { | ||
| d.kad.CloseNeighbourhoodDepthC() | ||
| d.kad.CloseAddrCountC() | ||
| close(d.quit) | ||
| } | ||
|
|
||
| // RequestFromPeers sends a chunk retrieve request to a peer | ||
| // The most eligible peer that hasn't already been sent to is chosen | ||
| // TODO: define "eligible" | ||
|
|
||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
can we please preserve the error and log it or even put it in p2p.DiscSubprotocolError?
this is losing too much contextual info
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
i agree this could be converted to:
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We could log the error, but I'd rather not propagate it down to p2p, and replace
DiscSubprotocolErrorjust now, as this is changing behaviour, and we already have too many changes.The current change preserves the behavior we already have.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@justelad I am not changing this behavior as well in this PR, let's keep it as small as possible.