Skip to content

Commit

Permalink
fix: use accurate bucket logic
Browse files Browse the repository at this point in the history
This reverts an optimization where we fudged the closer peers when returning
responses to users when we were within a power of two to the target.
  • Loading branch information
Stebalien committed Mar 27, 2020
1 parent 6f18d79 commit 08f2efa
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 101 deletions.
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ module github.com/libp2p/go-libp2p-kbucket

require (
github.com/ipfs/go-ipfs-util v0.0.1
github.com/ipfs/go-log v0.0.1
github.com/ipfs/go-log v1.0.2
github.com/jbenet/goprocess v0.1.3
github.com/libp2p/go-libp2p-core v0.5.0
github.com/libp2p/go-libp2p-peerstore v0.2.0
Expand Down
6 changes: 6 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,9 @@ github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv
github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc=
github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=
github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM=
github.com/ipfs/go-log v1.0.2 h1:s19ZwJxH8rPWzypjcDpqPLIyV7BnbLqvpli3iZoqYK0=
github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk=
github.com/ipfs/go-log/v2 v2.0.2 h1:xguurydRdfKMJjKyxNXNU8lYP0VZH1NUwJRwUorjuEw=
github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0=
github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY=
Expand Down Expand Up @@ -195,6 +197,7 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
Expand Down Expand Up @@ -243,8 +246,11 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
Expand Down
20 changes: 10 additions & 10 deletions table.go
Original file line number Diff line number Diff line change
Expand Up @@ -415,18 +415,18 @@ func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.ID {
// Add peers from the target bucket (cpl+1 shared bits).
pds.appendPeersFromList(rt.buckets[cpl].list)

// If we're short, add peers from buckets to the right until we have
// enough. All buckets to the right share exactly cpl bits (as opposed
// to the cpl+1 bits shared by the peers in the cpl bucket).
// If we're short, add peers from all buckets to the right. All buckets
// to the right share exactly cpl bits (as opposed to the cpl+1 bits
// shared by the peers in the cpl bucket).
//
// Unfortunately, to be completely correct, we can't just take from
// buckets until we have enough peers because peers because _all_ of
// these peers will be ~2**(256-cpl) from us.
//
// However, we're going to do that anyways as it's "good enough"
// This is, unfortunately, less efficient than we'd like. We will switch
// to a trie implementation eventually which will allow us to find the
// closest N peers to any target key.

for i := cpl + 1; i < len(rt.buckets) && pds.Len() < count; i++ {
pds.appendPeersFromList(rt.buckets[i].list)
if pds.Len() < count {
for i := cpl + 1; i < len(rt.buckets); i++ {
pds.appendPeersFromList(rt.buckets[i].list)
}
}

// If we're still short, add in buckets that share _fewer_ bits. We can
Expand Down
100 changes: 10 additions & 90 deletions table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -409,124 +409,44 @@ func TestTableFindMultiple(t *testing.T) {
}
}

func assertSortedPeerIdsEqual(t *testing.T, a, b []peer.ID) {
t.Helper()
if len(a) != len(b) {
t.Fatal("slices aren't the same length")
}
for i, p := range a {
if p != b[i] {
t.Fatalf("unexpected peer %d", i)
}
}
}

func TestTableFindMultipleBuckets(t *testing.T) {
t.Parallel()

local := test.RandPeerIDFatal(t)
localID := ConvertPeerID(local)
m := pstore.NewMetrics()

rt, err := NewRoutingTable(5, localID, time.Hour, m, PeerValidationFnc(PeerAlwaysValidFnc))
require.NoError(t, err)
rt, err := NewRoutingTable(5, ConvertPeerID(local), time.Hour, m, PeerValidationFnc(PeerAlwaysValidFnc))
if err != nil {
t.Fatal(err)
}

peers := make([]peer.ID, 100)
for i := 0; i < 100; i++ {
peers[i] = test.RandPeerIDFatal(t)
rt.HandlePeerAlive(peers[i])
}

targetID := ConvertPeerID(peers[2])

closest := SortClosestPeers(rt.ListPeers(), targetID)
targetCpl := CommonPrefixLen(localID, targetID)

// split the peers into closer, same, and further from the key (than us).
var (
closer, same, further []peer.ID
)
var i int
for i = 0; i < len(closest); i++ {
cpl := CommonPrefixLen(ConvertPeerID(closest[i]), targetID)
if targetCpl >= cpl {
break
}
}
closer = closest[:i]
closest := SortClosestPeers(rt.ListPeers(), ConvertPeerID(peers[2]))

var j int
for j = len(closer); j < len(closest); j++ {
cpl := CommonPrefixLen(ConvertPeerID(closest[j]), targetID)
if targetCpl > cpl {
break
}
}
same = closest[i:j]
further = closest[j:]
t.Logf("Searching for peer: '%s'", peers[2])

// should be able to find at least 30
// ~31 (logtwo(100) * 5)
found := rt.NearestPeers(targetID, 20)
found := rt.NearestPeers(ConvertPeerID(peers[2]), 20)
if len(found) != 20 {
t.Fatalf("asked for 20 peers, got %d", len(found))
}

// We expect this list to include:
// * All peers with a common prefix length > than the CPL between our key
// and the target (peers in the target bucket).
// * Then a subset of the peers with the _same_ CPL (peers in buckets to the right).
// * Finally, other peers to the left of the target bucket.

// First, handle the peers that are _closer_ than us.
if len(found) <= len(closer) {
// All of our peers are "closer".
assertSortedPeerIdsEqual(t, found, closer[:len(found)])
return
}
assertSortedPeerIdsEqual(t, found[:len(closer)], closer)

// We've worked through closer peers, let's work through peers at the
// "same" distance.
found = found[len(closer):]

// Next, handle peers that are at the same distance as us.
if len(found) <= len(same) {
// Ok, all the remaining peers are at the same distance.
// unfortunately, that means we may be missing peers that are
// technically closer.

// Make sure all remaining peers are _somewhere_ in the "closer" set.
pset := peer.NewSet()
for _, p := range same {
pset.Add(p)
}
for _, p := range found {
if !pset.Contains(p) {
t.Fatalf("unexpected peer %s", p)
}
for i, p := range found {
if p != closest[i] {
t.Fatalf("unexpected peer %d", i)
}
return
}
assertSortedPeerIdsEqual(t, found[:len(same)], same)

// We've worked through closer peers, let's work through the further
// peers.
found = found[len(same):]

// All _further_ peers will be correctly sorted.
assertSortedPeerIdsEqual(t, found, further[:len(found)])

// Finally, test getting _all_ peers. These should be in the correct
// order.

// Ok, now let's try finding all of them.
found = rt.NearestPeers(ConvertPeerID(peers[2]), 100)
if len(found) != rt.Size() {
t.Fatalf("asked for %d peers, got %d", rt.Size(), len(found))
}

// We should get _all_ peers in sorted order.
for i, p := range found {
if p != closest[i] {
t.Fatalf("unexpected peer %d", i)
Expand Down

0 comments on commit 08f2efa

Please sign in to comment.