diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go
index c78a1a6c5238..5a8ddb0c6e75 100644
--- a/cmd/bootnode/main.go
+++ b/cmd/bootnode/main.go
@@ -29,6 +29,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/p2p/discover"
"github.com/XinFinOrg/XDPoSChain/p2p/discv5"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/nat"
"github.com/XinFinOrg/XDPoSChain/p2p/netutil"
)
@@ -85,7 +86,7 @@ func main() {
}
if *writeAddr {
- fmt.Printf("%v\n", discover.PubkeyID(&nodeKey.PublicKey))
+ fmt.Printf("%v\n", enode.PubkeyToIDV4(&nodeKey.PublicKey))
os.Exit(0)
}
@@ -118,16 +119,17 @@ func main() {
}
if *runv5 {
- if _, err := discv5.ListenUDP(nodeKey, conn, realaddr, "", restrictList); err != nil {
+ if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil {
utils.Fatalf("%v", err)
}
} else {
+ db, _ := enode.OpenDB("")
+ ln := enode.NewLocalNode(db, nodeKey)
cfg := discover.Config{
- PrivateKey: nodeKey,
- AnnounceAddr: realaddr,
- NetRestrict: restrictList,
+ PrivateKey: nodeKey,
+ NetRestrict: restrictList,
}
- if _, err := discover.ListenUDP(conn, cfg); err != nil {
+ if _, err := discover.ListenUDP(conn, ln, cfg); err != nil {
utils.Fatalf("%v", err)
}
}
diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go
index f61909393d1b..b30476277038 100644
--- a/cmd/faucet/faucet.go
+++ b/cmd/faucet/faucet.go
@@ -54,8 +54,8 @@ import (
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/node"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
"github.com/XinFinOrg/XDPoSChain/p2p/discv5"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/nat"
"github.com/XinFinOrg/XDPoSChain/params"
"github.com/gorilla/websocket"
@@ -262,8 +262,10 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u
return nil, err
}
for _, boot := range enodes {
- old, _ := discover.ParseNode(boot.String())
- stack.Server().AddPeer(old)
+ old, err := enode.ParseV4(boot.String())
+ if err != nil {
+ stack.Server().AddPeer(old)
+ }
}
// Attach to the client and retrieve and interesting metadatas
api, err := stack.Attach()
diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go
index 539b29cb7331..a04853a1c781 100644
--- a/cmd/p2psim/main.go
+++ b/cmd/p2psim/main.go
@@ -19,21 +19,20 @@
// Here is an example of creating a 2 node network with the first node
// connected to the second:
//
-// $ p2psim node create
-// Created node01
+// $ p2psim node create
+// Created node01
//
-// $ p2psim node start node01
-// Started node01
+// $ p2psim node start node01
+// Started node01
//
-// $ p2psim node create
-// Created node02
+// $ p2psim node create
+// Created node02
//
-// $ p2psim node start node02
-// Started node02
-//
-// $ p2psim node connect node01 node02
-// Connected node01 to node02
+// $ p2psim node start node02
+// Started node02
//
+// $ p2psim node connect node01 node02
+// Connected node01 to node02
package main
import (
@@ -47,7 +46,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
"github.com/XinFinOrg/XDPoSChain/rpc"
@@ -283,7 +282,7 @@ func createNode(ctx *cli.Context) error {
if err != nil {
return err
}
- config.ID = discover.PubkeyID(&privKey.PublicKey)
+ config.ID = enode.PubkeyToIDV4(&privKey.PublicKey)
config.PrivateKey = privKey
}
if services := ctx.String("services"); services != "" {
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 310a3f12e9b6..ae845a4469a4 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -47,8 +47,8 @@ import (
"github.com/XinFinOrg/XDPoSChain/metrics/exp"
"github.com/XinFinOrg/XDPoSChain/node"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
"github.com/XinFinOrg/XDPoSChain/p2p/discv5"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/nat"
"github.com/XinFinOrg/XDPoSChain/p2p/netutil"
"github.com/XinFinOrg/XDPoSChain/params"
@@ -681,9 +681,10 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
case ctx.GlobalBool(XDCTestnetFlag.Name):
urls = params.TestnetBootnodes
}
- cfg.BootstrapNodes = make([]*discover.Node, 0, len(urls))
+
+ cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls))
for _, url := range urls {
- node, err := discover.ParseNode(url)
+ node, err := enode.ParseV4(url)
if err != nil {
log.Error("Bootstrap URL invalid", "enode", url, "err", err)
continue
diff --git a/cmd/wnode/main.go b/cmd/wnode/main.go
index 5fa29ab96c54..8717a73442f7 100644
--- a/cmd/wnode/main.go
+++ b/cmd/wnode/main.go
@@ -40,7 +40,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/nat"
"github.com/XinFinOrg/XDPoSChain/whisper/mailserver"
whisper "github.com/XinFinOrg/XDPoSChain/whisper/whisperv6"
@@ -174,7 +174,7 @@ func initialize() {
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*argVerbosity), log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
done = make(chan struct{})
- var peers []*discover.Node
+ var peers []*enode.Node
var err error
if *generateKey {
@@ -202,7 +202,7 @@ func initialize() {
if len(*argEnode) == 0 {
argEnode = scanLineA("Please enter the peer's enode: ")
}
- peer := discover.MustParseNode(*argEnode)
+ peer := enode.MustParseV4(*argEnode)
peers = append(peers, peer)
}
@@ -748,11 +748,11 @@ func requestExpiredMessagesLoop() {
}
func extractIDFromEnode(s string) []byte {
- n, err := discover.ParseNode(s)
+ n, err := enode.ParseV4(s)
if err != nil {
utils.Fatalf("Failed to parse enode: %s", err)
}
- return n.ID[:]
+ return n.ID().Bytes()
}
// obfuscateBloom adds 16 random bits to the the bloom
diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go
new file mode 100644
index 000000000000..4a106c7e0e21
--- /dev/null
+++ b/core/forkid/forkid.go
@@ -0,0 +1,247 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package forkid implements EIP-2124 (https://eips.ethereum.org/EIPS/eip-2124).
+package forkid
+
+import (
+ "encoding/binary"
+ "errors"
+ "hash/crc32"
+ "math"
+ "math/big"
+ "reflect"
+ "strings"
+
+ "github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/log"
+ "github.com/XinFinOrg/XDPoSChain/params"
+)
+
+var (
+ // ErrRemoteStale is returned by the validator if a remote fork checksum is a
+ // subset of our already applied forks, but the announced next fork block is
+ // not on our already passed chain.
+ ErrRemoteStale = errors.New("remote needs update")
+
+ // ErrLocalIncompatibleOrStale is returned by the validator if a remote fork
+ // checksum does not match any local checksum variation, signalling that the
+ // two chains have diverged in the past at some point (possibly at genesis).
+ ErrLocalIncompatibleOrStale = errors.New("local incompatible or needs update")
+)
+
+// ID is a fork identifier as defined by EIP-2124.
+type ID struct {
+ Hash [4]byte // CRC32 checksum of the genesis block and passed fork block numbers
+ Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known
+}
+
+// Filter is a fork id filter to validate a remotely advertised ID.
+type Filter func(id ID) error
+
+// NewID calculates the Ethereum fork ID from the chain config and head.
+func NewID(chain *core.BlockChain) ID {
+ return newID(
+ chain.Config(),
+ chain.Genesis().Hash(),
+ chain.CurrentHeader().Number.Uint64(),
+ )
+}
+
+// newID is the internal version of NewID, which takes extracted values as its
+// arguments instead of a chain. The reason is to allow testing the IDs without
+// having to simulate an entire blockchain.
+func newID(config *params.ChainConfig, genesis common.Hash, head uint64) ID {
+ // Calculate the starting checksum from the genesis hash
+ hash := crc32.ChecksumIEEE(genesis[:])
+
+ // Calculate the current fork checksum and the next fork block
+ var next uint64
+ for _, fork := range gatherForks(config) {
+ if fork <= head {
+ // Fork already passed, checksum the previous hash and the fork number
+ hash = checksumUpdate(hash, fork)
+ continue
+ }
+ next = fork
+ break
+ }
+ return ID{Hash: checksumToBytes(hash), Next: next}
+}
+
+// NewFilter creates an filter that returns if a fork ID should be rejected or not
+// based on the local chain's status.
+func NewFilter(chain *core.BlockChain) Filter {
+ return newFilter(
+ chain.Config(),
+ chain.Genesis().Hash(),
+ func() uint64 {
+ return chain.CurrentHeader().Number.Uint64()
+ },
+ )
+}
+
+// newFilter is the internal version of NewFilter, taking closures as its arguments
+// instead of a chain. The reason is to allow testing it without having to simulate
+// an entire blockchain.
+func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) func(id ID) error {
+ // Calculate the all the valid fork hash and fork next combos
+ var (
+ forks = gatherForks(config)
+ sums = make([][4]byte, len(forks)+1) // 0th is the genesis
+ )
+ hash := crc32.ChecksumIEEE(genesis[:])
+ sums[0] = checksumToBytes(hash)
+ for i, fork := range forks {
+ hash = checksumUpdate(hash, fork)
+ sums[i+1] = checksumToBytes(hash)
+ }
+ // Add two sentries to simplify the fork checks and don't require special
+ // casing the last one.
+ forks = append(forks, math.MaxUint64) // Last fork will never be passed
+
+ // Create a validator that will filter out incompatible chains
+ return func(id ID) error {
+ // Run the fork checksum validation ruleset:
+ // 1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT.
+ // The two nodes are in the same fork state currently. They might know
+ // of differing future forks, but that's not relevant until the fork
+ // triggers (might be postponed, nodes might be updated to match).
+ // 1a. A remotely announced but remotely not passed block is already passed
+ // locally, disconnect, since the chains are incompatible.
+ // 1b. No remotely announced fork; or not yet passed locally, connect.
+ // 2. If the remote FORK_CSUM is a subset of the local past forks and the
+ // remote FORK_NEXT matches with the locally following fork block number,
+ // connect.
+ // Remote node is currently syncing. It might eventually diverge from
+ // us, but at this current point in time we don't have enough information.
+ // 3. If the remote FORK_CSUM is a superset of the local past forks and can
+ // be completed with locally known future forks, connect.
+ // Local node is currently syncing. It might eventually diverge from
+ // the remote, but at this current point in time we don't have enough
+ // information.
+ // 4. Reject in all other cases.
+ head := headfn()
+ for i, fork := range forks {
+ // If our head is beyond this fork, continue to the next (we have a dummy
+ // fork of maxuint64 as the last item to always fail this check eventually).
+ if head >= fork {
+ continue
+ }
+ // Found the first unpassed fork block, check if our current state matches
+ // the remote checksum (rule #1).
+ if sums[i] == id.Hash {
+ // Fork checksum matched, check if a remote future fork block already passed
+ // locally without the local node being aware of it (rule #1a).
+ if id.Next > 0 && head >= id.Next {
+ return ErrLocalIncompatibleOrStale
+ }
+ // Haven't passed locally a remote-only fork, accept the connection (rule #1b).
+ return nil
+ }
+ // The local and remote nodes are in different forks currently, check if the
+ // remote checksum is a subset of our local forks (rule #2).
+ for j := 0; j < i; j++ {
+ if sums[j] == id.Hash {
+ // Remote checksum is a subset, validate based on the announced next fork
+ if forks[j] != id.Next {
+ return ErrRemoteStale
+ }
+ return nil
+ }
+ }
+ // Remote chain is not a subset of our local one, check if it's a superset by
+ // any chance, signalling that we're simply out of sync (rule #3).
+ for j := i + 1; j < len(sums); j++ {
+ if sums[j] == id.Hash {
+ // Yay, remote checksum is a superset, ignore upcoming forks
+ return nil
+ }
+ }
+ // No exact, subset or superset match. We are on differing chains, reject.
+ return ErrLocalIncompatibleOrStale
+ }
+ log.Error("Impossible fork ID validation", "id", id)
+ return nil // Something's very wrong, accept rather than reject
+ }
+}
+
+// checksum calculates the IEEE CRC32 checksum of a block number.
+func checksum(fork uint64) uint32 {
+ var blob [8]byte
+ binary.BigEndian.PutUint64(blob[:], fork)
+ return crc32.ChecksumIEEE(blob[:])
+}
+
+// checksumUpdate calculates the next IEEE CRC32 checksum based on the previous
+// one and a fork block number (equivalent to CRC32(original-blob || fork)).
+func checksumUpdate(hash uint32, fork uint64) uint32 {
+ var blob [8]byte
+ binary.BigEndian.PutUint64(blob[:], fork)
+ return crc32.Update(hash, crc32.IEEETable, blob[:])
+}
+
+// checksumToBytes converts a uint32 checksum into a [4]byte array.
+func checksumToBytes(hash uint32) [4]byte {
+ var blob [4]byte
+ binary.BigEndian.PutUint32(blob[:], hash)
+ return blob
+}
+
+// gatherForks gathers all the known forks and creates a sorted list out of them.
+func gatherForks(config *params.ChainConfig) []uint64 {
+ // Gather all the fork block numbers via reflection
+ kind := reflect.TypeOf(params.ChainConfig{})
+ conf := reflect.ValueOf(config).Elem()
+
+ var forks []uint64
+ for i := 0; i < kind.NumField(); i++ {
+ // Fetch the next field and skip non-fork rules
+ field := kind.Field(i)
+ if !strings.HasSuffix(field.Name, "Block") {
+ continue
+ }
+ if field.Type != reflect.TypeOf(new(big.Int)) {
+ continue
+ }
+ // Extract the fork rule block number and aggregate it
+ rule := conf.Field(i).Interface().(*big.Int)
+ if rule != nil {
+ forks = append(forks, rule.Uint64())
+ }
+ }
+ // Sort the fork block numbers to permit chronologival XOR
+ for i := 0; i < len(forks); i++ {
+ for j := i + 1; j < len(forks); j++ {
+ if forks[i] > forks[j] {
+ forks[i], forks[j] = forks[j], forks[i]
+ }
+ }
+ }
+ // Deduplicate block numbers applying multiple forks
+ for i := 1; i < len(forks); i++ {
+ if forks[i] == forks[i-1] {
+ forks = append(forks[:i], forks[i+1:]...)
+ i--
+ }
+ }
+ // Skip any forks in block 0, that's the genesis ruleset
+ if len(forks) > 0 && forks[0] == 0 {
+ forks = forks[1:]
+ }
+ return forks
+}
diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go
new file mode 100644
index 000000000000..a3cea725eb9c
--- /dev/null
+++ b/core/forkid/forkid_test.go
@@ -0,0 +1,294 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package forkid
+
+import (
+ "bytes"
+ "math"
+ "math/big"
+ "testing"
+
+ "github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/params"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
+)
+
+var ( //from go-ethereum
+ MainnetChainConfig = ¶ms.ChainConfig{
+ ChainId: big.NewInt(1),
+ HomesteadBlock: big.NewInt(1150000),
+ DAOForkBlock: big.NewInt(1920000),
+ DAOForkSupport: true,
+ EIP150Block: big.NewInt(2463000),
+ EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"),
+ EIP155Block: big.NewInt(2675000),
+ EIP158Block: big.NewInt(2675000),
+ ByzantiumBlock: big.NewInt(4370000),
+ ConstantinopleBlock: big.NewInt(7280000),
+ PetersburgBlock: big.NewInt(7280000),
+ Ethash: new(params.EthashConfig),
+ }
+
+ RopstenChainConfig = ¶ms.ChainConfig{
+ ChainId: big.NewInt(3),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ DAOForkSupport: true,
+ EIP150Block: big.NewInt(0),
+ EIP150Hash: common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d"),
+ EIP155Block: big.NewInt(10),
+ EIP158Block: big.NewInt(10),
+ ByzantiumBlock: big.NewInt(1700000),
+ ConstantinopleBlock: big.NewInt(4230000),
+ PetersburgBlock: big.NewInt(4939394),
+ Ethash: new(params.EthashConfig),
+ }
+
+ RinkebyChainConfig = ¶ms.ChainConfig{
+ ChainId: big.NewInt(4),
+ HomesteadBlock: big.NewInt(1),
+ DAOForkBlock: nil,
+ DAOForkSupport: true,
+ EIP150Block: big.NewInt(2),
+ EIP150Hash: common.HexToHash("0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9"),
+ EIP155Block: big.NewInt(3),
+ EIP158Block: big.NewInt(3),
+ ByzantiumBlock: big.NewInt(1035301),
+ ConstantinopleBlock: big.NewInt(3660663),
+ PetersburgBlock: big.NewInt(4321234),
+ Clique: ¶ms.CliqueConfig{
+ Period: 15,
+ Epoch: 30000,
+ },
+ }
+
+ GoerliChainConfig = ¶ms.ChainConfig{
+ ChainId: big.NewInt(5),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ DAOForkSupport: true,
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ Clique: ¶ms.CliqueConfig{
+ Period: 15,
+ Epoch: 30000,
+ },
+ }
+
+ MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3")
+ RopstenGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d")
+ RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177")
+ GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a")
+)
+
+// TestCreation tests that different genesis and fork rule combinations result in
+// the correct fork ID.
+func TestCreation(t *testing.T) {
+
+ type testcase struct {
+ head uint64
+ want ID
+ }
+ tests := []struct {
+ config *params.ChainConfig
+ genesis common.Hash
+ cases []testcase
+ }{
+ // Mainnet test cases
+ {
+ MainnetChainConfig,
+ MainnetGenesisHash,
+ []testcase{
+ {0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
+ {1149999, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
+ {1150000, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
+ {1919999, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
+ {1920000, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
+ {2462999, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
+ {2463000, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
+ {2674999, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
+ {2675000, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
+ {4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
+ {4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
+ {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
+ {7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // First and last Constantinople, first Petersburg block
+ {7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // Today Petersburg block
+ },
+ },
+ // Ropsten test cases
+ {
+ RopstenChainConfig,
+ RopstenGenesisHash,
+ []testcase{
+ {0, ID{Hash: checksumToBytes(0x30c7ddbc), Next: 10}}, // Unsynced, last Frontier, Homestead and first Tangerine block
+ {9, ID{Hash: checksumToBytes(0x30c7ddbc), Next: 10}}, // Last Tangerine block
+ {10, ID{Hash: checksumToBytes(0x63760190), Next: 1700000}}, // First Spurious block
+ {1699999, ID{Hash: checksumToBytes(0x63760190), Next: 1700000}}, // Last Spurious block
+ {1700000, ID{Hash: checksumToBytes(0x3ea159c7), Next: 4230000}}, // First Byzantium block
+ {4229999, ID{Hash: checksumToBytes(0x3ea159c7), Next: 4230000}}, // Last Byzantium block
+ {4230000, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // First Constantinople block
+ {4939393, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // Last Constantinople block
+ {4939394, ID{Hash: checksumToBytes(0xd6e2149b), Next: 0}}, // First Petersburg block
+ {5822692, ID{Hash: checksumToBytes(0xd6e2149b), Next: 0}}, // Today Petersburg block
+ },
+ },
+ // Rinkeby test cases
+ {
+ RinkebyChainConfig,
+ RinkebyGenesisHash,
+ []testcase{
+ {0, ID{Hash: checksumToBytes(0x3b8e0691), Next: 1}}, // Unsynced, last Frontier block
+ {1, ID{Hash: checksumToBytes(0x60949295), Next: 2}}, // First and last Homestead block
+ {2, ID{Hash: checksumToBytes(0x8bde40dd), Next: 3}}, // First and last Tangerine block
+ {3, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // First Spurious block
+ {1035300, ID{Hash: checksumToBytes(0xcb3a64bb), Next: 1035301}}, // Last Spurious block
+ {1035301, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // First Byzantium block
+ {3660662, ID{Hash: checksumToBytes(0x8d748b57), Next: 3660663}}, // Last Byzantium block
+ {3660663, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // First Constantinople block
+ {4321233, ID{Hash: checksumToBytes(0xe49cab14), Next: 4321234}}, // Last Constantinople block
+ {4321234, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}}, // First Petersburg block
+ {4586649, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}}, // Today Petersburg block
+ },
+ },
+ // Goerli test cases
+
+ {
+ GoerliChainConfig,
+ GoerliGenesisHash,
+ []testcase{
+ {0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 0}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block
+ {795329, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 0}}, // Today Petersburg block
+ },
+ },
+ }
+ for i, tt := range tests {
+ for j, ttt := range tt.cases {
+ if have := newID(tt.config, tt.genesis, ttt.head); have != ttt.want {
+ t.Errorf("test %d, case %d: fork ID mismatch: have %x, want %x", i, j, have, ttt.want)
+ }
+ }
+ }
+}
+
+// TestValidation tests that a local peer correctly validates and accepts a remote
+// fork ID.
+func TestValidation(t *testing.T) {
+ tests := []struct {
+ head uint64
+ id ID
+ err error
+ }{
+ // Local is mainnet Petersburg, remote announces the same. No future fork is announced.
+ {7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil},
+
+ // Local is mainnet Petersburg, remote announces the same. Remote also announces a next fork
+ // at block 0xffffffff, but that is uncertain.
+ {7987396, ID{Hash: checksumToBytes(0x668db0af), Next: math.MaxUint64}, nil},
+
+ // Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
+ // also Byzantium, but it's not yet aware of Petersburg (e.g. non updated node before the fork).
+ // In this case we don't know if Petersburg passed yet or not.
+ {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
+
+ // Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
+ // also Byzantium, and it's also aware of Petersburg (e.g. updated node before the fork). We
+ // don't know if Petersburg passed yet (will pass) or not.
+ {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
+
+ // Local is mainnet currently in Byzantium only (so it's aware of Petersburg), remote announces
+ // also Byzantium, and it's also aware of some random fork (e.g. misconfigured Petersburg). As
+ // neither forks passed at neither nodes, they may mismatch, but we still connect for now.
+ {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: math.MaxUint64}, nil},
+
+ // Local is mainnet exactly on Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
+ // is simply out of sync, accept.
+ {7280000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
+
+ // Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote
+ // is simply out of sync, accept.
+ {7987396, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil},
+
+ // Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium. Remote
+ // is definitely out of sync. It may or may not need the Petersburg update, we don't know yet.
+ {7987396, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
+
+ // Local is mainnet Byzantium, remote announces Petersburg. Local is out of sync, accept.
+ {7279999, ID{Hash: checksumToBytes(0x668db0af), Next: 0}, nil},
+
+ // Local is mainnet Spurious, remote announces Byzantium, but is not aware of Petersburg. Local
+ // out of sync. Local also knows about a future fork, but that is uncertain yet.
+ {4369999, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, nil},
+
+ // Local is mainnet Petersburg. remote announces Byzantium but is not aware of further forks.
+ // Remote needs software update.
+ {7987396, ID{Hash: checksumToBytes(0xa00bc324), Next: 0}, ErrRemoteStale},
+
+ // Local is mainnet Petersburg, and isn't aware of more forks. Remote announces Petersburg +
+ // 0xffffffff. Local needs software update, reject.
+ {7987396, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
+
+ // Local is mainnet Byzantium, and is aware of Petersburg. Remote announces Petersburg +
+ // 0xffffffff. Local needs software update, reject.
+ {7279999, ID{Hash: checksumToBytes(0x5cddc0e1), Next: 0}, ErrLocalIncompatibleOrStale},
+
+ // Local is mainnet Petersburg, remote is Rinkeby Petersburg.
+ {7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
+
+ // Local is mainnet Petersburg, far in the future. Remote announces Gopherium (non existing fork)
+ // at some future block 88888888, for itself, but past block for local. Local is incompatible.
+ //
+ // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
+ {88888888, ID{Hash: checksumToBytes(0x668db0af), Next: 88888888}, ErrLocalIncompatibleOrStale},
+
+ // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
+ // fork) at block 7279999, before Petersburg. Local is incompatible.
+ {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
+ }
+ for i, tt := range tests {
+ filter := newFilter(MainnetChainConfig, MainnetGenesisHash, func() uint64 { return tt.head })
+ if err := filter(tt.id); err != tt.err {
+ t.Errorf("test %d: validation error mismatch: have %v, want %v", i, err, tt.err)
+ }
+ }
+}
+
+// Tests that IDs are properly RLP encoded (specifically important because we
+// use uint32 to store the hash, but we need to encode it as [4]byte).
+func TestEncoding(t *testing.T) {
+ tests := []struct {
+ id ID
+ want []byte
+ }{
+ {ID{Hash: checksumToBytes(0), Next: 0}, common.Hex2Bytes("c6840000000080")},
+ {ID{Hash: checksumToBytes(0xdeadbeef), Next: 0xBADDCAFE}, common.Hex2Bytes("ca84deadbeef84baddcafe,")},
+ {ID{Hash: checksumToBytes(math.MaxUint32), Next: math.MaxUint64}, common.Hex2Bytes("ce84ffffffff88ffffffffffffffff")},
+ }
+ for i, tt := range tests {
+ have, err := rlp.EncodeToBytes(tt.id)
+ if err != nil {
+ t.Errorf("test %d: failed to encode forkid: %v", i, err)
+ continue
+ }
+ if !bytes.Equal(have, tt.want) {
+ t.Errorf("test %d: RLP mismatch: have %x, want %x", i, have, tt.want)
+ }
+ }
+}
diff --git a/core/types/block.go b/core/types/block.go
index 071666a801fa..de4ed128506a 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -154,6 +154,25 @@ func (h *Header) Size() common.StorageSize {
return common.StorageSize(unsafe.Sizeof(*h)) + common.StorageSize(len(h.Extra)+(h.Difficulty.BitLen()+h.Number.BitLen()+h.Time.BitLen())/8)
}
+// SanityCheck checks a few basic things -- these checks are way beyond what
+// any 'sane' production values should hold, and can mainly be used to prevent
+// that the unbounded fields are stuffed with junk data to add processing
+// overhead
+func (h *Header) SanityCheck() error {
+ if h.Number != nil && !h.Number.IsUint64() {
+ return fmt.Errorf("too large block number: bitlen %d", h.Number.BitLen())
+ }
+ if h.Difficulty != nil {
+ if diffLen := h.Difficulty.BitLen(); diffLen > 80 {
+ return fmt.Errorf("too large block difficulty: bitlen %d", diffLen)
+ }
+ }
+ if eLen := len(h.Extra); eLen > 100*1024 {
+ return fmt.Errorf("too large block extradata: size %d", eLen)
+ }
+ return nil
+}
+
// Body is a simple (mutable, non-safe) data container for storing and moving
// a block's data contents (transactions and uncles) together.
type Body struct {
@@ -369,6 +388,12 @@ func (b *Block) Size() common.StorageSize {
return common.StorageSize(c)
}
+// SanityCheck can be used to prevent that unbounded fields are
+// stuffed with junk data to add processing overhead
+func (b *Block) SanityCheck() error {
+ return b.header.SanityCheck()
+}
+
type writeCounter common.StorageSize
func (c *writeCounter) Write(b []byte) (int, error) {
diff --git a/eth/backend.go b/eth/backend.go
index 531d633fa45f..253d280e046a 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -51,6 +51,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/miner"
"github.com/XinFinOrg/XDPoSChain/node"
"github.com/XinFinOrg/XDPoSChain/p2p"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
"github.com/XinFinOrg/XDPoSChain/params"
"github.com/XinFinOrg/XDPoSChain/rlp"
"github.com/XinFinOrg/XDPoSChain/rpc"
@@ -69,7 +70,9 @@ type Ethereum struct {
chainConfig *params.ChainConfig
// Channel for shutting down the service
- shutdownChan chan bool // Channel for shutting down the ethereum
+ shutdownChan chan bool
+
+ server *p2p.Server
// Handlers
txPool *core.TxPool
@@ -280,8 +283,8 @@ func New(ctx *node.ServiceContext, config *ethconfig.Config, XDCXServ *XDCx.XDCX
return block, false, nil
}
- eth.protocolManager.fetcher.SetSignHook(signHook)
- eth.protocolManager.fetcher.SetAppendM2HeaderHook(appendM2HeaderHook)
+ eth.protocolManager.blockFetcher.SetSignHook(signHook)
+ eth.protocolManager.blockFetcher.SetAppendM2HeaderHook(appendM2HeaderHook)
/*
XDPoS1.0 Specific hooks
@@ -538,22 +541,29 @@ func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
func (s *Ethereum) Engine() consensus.Engine { return s.engine }
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
func (s *Ethereum) IsListening() bool { return true } // Always listening
-func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
+func (s *Ethereum) EthVersion() int { return int(ProtocolVersions[0]) }
func (s *Ethereum) NetVersion() uint64 { return s.networkId }
func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
// Protocols implements node.Service, returning all the currently configured
// network protocols to start.
func (s *Ethereum) Protocols() []p2p.Protocol {
- if s.lesServer == nil {
- return s.protocolManager.SubProtocols
+ protos := make([]p2p.Protocol, len(ProtocolVersions))
+ for i, vsn := range ProtocolVersions {
+ protos[i] = s.protocolManager.makeProtocol(vsn)
+ protos[i].Attributes = []enr.Entry{s.currentEthEntry()}
+ }
+ if s.lesServer != nil {
+ protos = append(protos, s.lesServer.Protocols()...)
}
- return append(s.protocolManager.SubProtocols, s.lesServer.Protocols()...)
+ return protos
}
// Start implements node.Service, starting all internal goroutines needed by the
// Ethereum protocol implementation.
func (s *Ethereum) Start(srvr *p2p.Server) error {
+ s.startEthEntryUpdate(srvr.LocalNode())
+
// Start the bloom bits servicing goroutines
s.startBloomHandlers()
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index bbf5889d0e9f..ca1d29fbcd9c 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -674,9 +674,12 @@ func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisa
func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) }
func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) }
func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) }
-func TestCanonicalSynchronisation64Light(t *testing.T) {
- testCanonicalSynchronisation(t, 64, LightSync)
-}
+func TestCanonicalSynchronisation64Light(t *testing.T) {testCanonicalSynchronisation(t, 64, LightSync)}
+func TestCanonicalSynchronisation100Full(t *testing.T) { testCanonicalSynchronisation(t, 100, FullSync) }
+func TestCanonicalSynchronisation100Fast(t *testing.T) { testCanonicalSynchronisation(t, 100, FastSync) }
+func TestCanonicalSynchronisation101Full(t *testing.T) { testCanonicalSynchronisation(t, 101, FullSync) }
+func TestCanonicalSynchronisation101Fast(t *testing.T) { testCanonicalSynchronisation(t, 101, FastSync) }
+func TestCanonicalSynchronisation101Light(t *testing.T) {testCanonicalSynchronisation(t, 101, LightSync)}
func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -704,6 +707,10 @@ func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) }
func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) }
func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }
+func TestThrottling100Full(t *testing.T) { testThrottling(t, 100, FullSync) }
+func TestThrottling100Fast(t *testing.T) { testThrottling(t, 100, FastSync) }
+func TestThrottling101Full(t *testing.T) { testThrottling(t, 101, FullSync) }
+func TestThrottling101Fast(t *testing.T) { testThrottling(t, 101, FastSync) }
func testThrottling(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -791,6 +798,11 @@ func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) }
func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }
func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }
func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
+func TestForkedSync100Full(t *testing.T) { testForkedSync(t, 100, FullSync) }
+func TestForkedSync100Fast(t *testing.T) { testForkedSync(t, 100, FastSync) }
+func TestForkedSync101Full(t *testing.T) { testForkedSync(t, 101, FullSync) }
+func TestForkedSync101Fast(t *testing.T) { testForkedSync(t, 101, FastSync) }
+func TestForkedSync101Light(t *testing.T) { testForkedSync(t, 101, LightSync) }
func testForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -826,6 +838,11 @@ func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastS
func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }
func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }
func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, LightSync) }
+func TestHeavyForkedSync100Full(t *testing.T) { testHeavyForkedSync(t, 100, FullSync) }
+func TestHeavyForkedSync100Fast(t *testing.T) { testHeavyForkedSync(t, 100, FastSync) }
+func TestHeavyForkedSync101Full(t *testing.T) { testHeavyForkedSync(t, 101, FullSync) }
+func TestHeavyForkedSync101Fast(t *testing.T) { testHeavyForkedSync(t, 101, FastSync) }
+func TestHeavyForkedSync101Light(t *testing.T) { testHeavyForkedSync(t, 101, LightSync) }
func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -862,6 +879,11 @@ func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, F
func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }
func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }
func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, LightSync) }
+func TestBoundedForkedSync100Full(t *testing.T) { testBoundedForkedSync(t, 100, FullSync) }
+func TestBoundedForkedSync100Fast(t *testing.T) { testBoundedForkedSync(t, 100, FastSync) }
+func TestBoundedForkedSync101Full(t *testing.T) { testBoundedForkedSync(t, 101, FullSync) }
+func TestBoundedForkedSync101Fast(t *testing.T) { testBoundedForkedSync(t, 101, FastSync) }
+func TestBoundedForkedSync101Light(t *testing.T) { testBoundedForkedSync(t, 101, LightSync) }
func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -891,12 +913,18 @@ func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) {
// Tests that chain forks are contained within a certain interval of the current
// chain head for short but heavy forks too. These are a bit special because they
// take different ancestor lookup paths.
-func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
-func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
-func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
-func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
-func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
-func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
+func TestBoundedHeavyForkedSync62(t *testing.T) { testBoundedHeavyForkedSync(t, 62, FullSync) }
+func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) }
+func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) }
+func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }
+func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }
+func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSync(t, 64, LightSync) }
+func TestBoundedHeavyForkedSync100Full(t *testing.T) { testBoundedHeavyForkedSync(t, 100, FullSync) }
+func TestBoundedHeavyForkedSync100Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 100, FastSync) }
+func TestBoundedHeavyForkedSync100Light(t *testing.T) { testBoundedHeavyForkedSync(t, 100, LightSync) }
+func TestBoundedHeavyForkedSync101Full(t *testing.T) { testBoundedHeavyForkedSync(t, 101, FullSync) }
+func TestBoundedHeavyForkedSync101Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 101, FastSync) }
+func TestBoundedHeavyForkedSync101Light(t *testing.T) { testBoundedHeavyForkedSync(t, 101, LightSync) }
func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -961,12 +989,18 @@ func TestInactiveDownloader63(t *testing.T) {
}
// Tests that a canceled download wipes all previously accumulated state.
-func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) }
-func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) }
-func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) }
-func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) }
-func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) }
-func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
+func TestCancel62(t *testing.T) { testCancel(t, 62, FullSync) }
+func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) }
+func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) }
+func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) }
+func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) }
+func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
+func TestCancel100Full(t *testing.T) { testCancel(t, 100, FullSync) }
+func TestCancel100Fast(t *testing.T) { testCancel(t, 100, FastSync) }
+func TestCancel100Light(t *testing.T) { testCancel(t, 100, LightSync) }
+func TestCancel101Full(t *testing.T) { testCancel(t, 101, FullSync) }
+func TestCancel101Fast(t *testing.T) { testCancel(t, 101, FastSync) }
+func TestCancel101Light(t *testing.T) { testCancel(t, 101, LightSync) }
func testCancel(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1002,12 +1036,18 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
}
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
-func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) }
-func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) }
-func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) }
-func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) }
-func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) }
-func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
+func TestMultiSynchronisation62(t *testing.T) { testMultiSynchronisation(t, 62, FullSync) }
+func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) }
+func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) }
+func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) }
+func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) }
+func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t, 64, LightSync) }
+func TestMultiSynchronisation100Full(t *testing.T) { testMultiSynchronisation(t, 100, FullSync) }
+func TestMultiSynchronisation100Fast(t *testing.T) { testMultiSynchronisation(t, 100, FastSync) }
+func TestMultiSynchronisation100Light(t *testing.T) { testMultiSynchronisation(t, 100, LightSync) }
+func TestMultiSynchronisation101Full(t *testing.T) { testMultiSynchronisation(t, 101, FullSync) }
+func TestMultiSynchronisation101Fast(t *testing.T) { testMultiSynchronisation(t, 101, FastSync) }
+func TestMultiSynchronisation101Light(t *testing.T) { testMultiSynchronisation(t, 101, LightSync) }
func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1032,12 +1072,18 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) {
// Tests that synchronisations behave well in multi-version protocol environments
// and not wreak havoc on other nodes in the network.
-func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) }
-func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) }
-func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) }
-func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) }
-func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) }
-func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
+func TestMultiProtoSynchronisation62(t *testing.T) { testMultiProtoSync(t, 62, FullSync) }
+func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) }
+func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) }
+func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) }
+func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) }
+func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t, 64, LightSync) }
+func TestMultiProtoSynchronisation100Full(t *testing.T) { testMultiProtoSync(t, 100, FullSync) }
+func TestMultiProtoSynchronisation100Fast(t *testing.T) { testMultiProtoSync(t, 100, FastSync) }
+func TestMultiProtoSynchronisation100Light(t *testing.T) { testMultiProtoSync(t, 100, LightSync) }
+func TestMultiProtoSynchronisation101Full(t *testing.T) { testMultiProtoSync(t, 101, FullSync) }
+func TestMultiProtoSynchronisation101Fast(t *testing.T) { testMultiProtoSync(t, 101, FastSync) }
+func TestMultiProtoSynchronisation101Light(t *testing.T) { testMultiProtoSync(t, 101, LightSync) }
func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1053,6 +1099,8 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
tester.newPeer("peer 62", 62, hashes, headers, blocks, nil)
tester.newPeer("peer 63", 63, hashes, headers, blocks, receipts)
tester.newPeer("peer 64", 64, hashes, headers, blocks, receipts)
+ tester.newPeer("peer 100", 100, hashes, headers, blocks, receipts)
+ tester.newPeer("peer 101", 101, hashes, headers, blocks, receipts)
// Synchronise with the requested peer and make sure all blocks were retrieved
if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
@@ -1061,7 +1109,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
assertOwnChain(t, tester, targetBlocks+1)
// Check that no peers have been dropped off
- for _, version := range []int{62, 63, 64} {
+ for _, version := range []int{62, 63, 64, 100, 101} {
peer := fmt.Sprintf("peer %d", version)
if _, ok := tester.peerHashes[peer]; !ok {
t.Errorf("%s dropped", peer)
@@ -1071,12 +1119,18 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) {
// Tests that if a block is empty (e.g. header only), no body request should be
// made, and instead the header should be assembled into a whole block in itself.
-func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) }
-func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) }
-func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) }
-func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }
-func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) }
-func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
+func TestEmptyShortCircuit62(t *testing.T) { testEmptyShortCircuit(t, 62, FullSync) }
+func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) }
+func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) }
+func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }
+func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) }
+func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, LightSync) }
+func TestEmptyShortCircuit100Full(t *testing.T) { testEmptyShortCircuit(t, 100, FullSync) }
+func TestEmptyShortCircuit100Fast(t *testing.T) { testEmptyShortCircuit(t, 100, FastSync) }
+func TestEmptyShortCircuit100Light(t *testing.T) { testEmptyShortCircuit(t, 100, LightSync) }
+func TestEmptyShortCircuit101Full(t *testing.T) { testEmptyShortCircuit(t, 101, FullSync) }
+func TestEmptyShortCircuit101Fast(t *testing.T) { testEmptyShortCircuit(t, 101, FastSync) }
+func TestEmptyShortCircuit101Light(t *testing.T) { testEmptyShortCircuit(t, 101, LightSync) }
func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1126,12 +1180,18 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
// Tests that headers are enqueued continuously, preventing malicious nodes from
// stalling the downloader by feeding gapped header chains.
-func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) }
-func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) }
-func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) }
-func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) }
-func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) }
-func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
+func TestMissingHeaderAttack62(t *testing.T) { testMissingHeaderAttack(t, 62, FullSync) }
+func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) }
+func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) }
+func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) }
+func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) }
+func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 64, LightSync) }
+func TestMissingHeaderAttack100Full(t *testing.T) { testMissingHeaderAttack(t, 100, FullSync) }
+func TestMissingHeaderAttack100Fast(t *testing.T) { testMissingHeaderAttack(t, 100, FastSync) }
+func TestMissingHeaderAttack100Light(t *testing.T) { testMissingHeaderAttack(t, 100, LightSync) }
+func TestMissingHeaderAttack101Full(t *testing.T) { testMissingHeaderAttack(t, 101, FullSync) }
+func TestMissingHeaderAttack101Fast(t *testing.T) { testMissingHeaderAttack(t, 101, FastSync) }
+func TestMissingHeaderAttack101Light(t *testing.T) { testMissingHeaderAttack(t, 101, LightSync) }
func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1161,12 +1221,18 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
// Tests that if requested headers are shifted (i.e. first is missing), the queue
// detects the invalid numbering.
-func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) }
-func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) }
-func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) }
-func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) }
-func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) }
-func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
+func TestShiftedHeaderAttack62(t *testing.T) { testShiftedHeaderAttack(t, 62, FullSync) }
+func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) }
+func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) }
+func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) }
+func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) }
+func TestShiftedHeaderAttack64Light(t *testing.T) { testShiftedHeaderAttack(t, 64, LightSync) }
+func TestShiftedHeaderAttack100Full(t *testing.T) { testShiftedHeaderAttack(t, 100, FullSync) }
+func TestShiftedHeaderAttack100Fast(t *testing.T) { testShiftedHeaderAttack(t, 100, FastSync) }
+func TestShiftedHeaderAttack100Light(t *testing.T) { testShiftedHeaderAttack(t, 100, LightSync) }
+func TestShiftedHeaderAttack101Full(t *testing.T) { testShiftedHeaderAttack(t, 101, FullSync) }
+func TestShiftedHeaderAttack101Fast(t *testing.T) { testShiftedHeaderAttack(t, 101, FastSync) }
+func TestShiftedHeaderAttack101Light(t *testing.T) { testShiftedHeaderAttack(t, 101, LightSync) }
func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1198,9 +1264,13 @@ func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) {
// Tests that upon detecting an invalid header, the recent ones are rolled back
// for various failure scenarios. Afterwards a full sync is attempted to make
// sure no state was corrupted.
-func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) }
-func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) }
-func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
+func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) }
+func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) }
+func TestInvalidHeaderRollback64Light(t *testing.T) { testInvalidHeaderRollback(t, 64, LightSync) }
+func TestInvalidHeaderRollback100Fast(t *testing.T) { testInvalidHeaderRollback(t, 100, FastSync) }
+func TestInvalidHeaderRollback100Light(t *testing.T) { testInvalidHeaderRollback(t, 100, LightSync) }
+func TestInvalidHeaderRollback101Fast(t *testing.T) { testInvalidHeaderRollback(t, 101, FastSync) }
+func TestInvalidHeaderRollback101Light(t *testing.T) { testInvalidHeaderRollback(t, 101, LightSync) }
func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1287,12 +1357,18 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) {
// Tests that a peer advertising an high TD doesn't get to stall the downloader
// afterwards by not sending any useful hashes.
-func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) }
-func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) }
-func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) }
-func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) }
-func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) }
-func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
+func TestHighTDStarvationAttack62(t *testing.T) { testHighTDStarvationAttack(t, 62, FullSync) }
+func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) }
+func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) }
+func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) }
+func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) }
+func TestHighTDStarvationAttack64Light(t *testing.T) { testHighTDStarvationAttack(t, 64, LightSync) }
+func TestHighTDStarvationAttack100Full(t *testing.T) { testHighTDStarvationAttack(t, 100, FullSync) }
+func TestHighTDStarvationAttack100Fast(t *testing.T) { testHighTDStarvationAttack(t, 100, FastSync) }
+func TestHighTDStarvationAttack100Light(t *testing.T) { testHighTDStarvationAttack(t, 100, LightSync) }
+func TestHighTDStarvationAttack101Full(t *testing.T) { testHighTDStarvationAttack(t, 101, FullSync) }
+func TestHighTDStarvationAttack101Fast(t *testing.T) { testHighTDStarvationAttack(t, 101, FastSync) }
+func TestHighTDStarvationAttack101Light(t *testing.T) { testHighTDStarvationAttack(t, 101, LightSync) }
func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1309,9 +1385,11 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
}
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
-func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
-func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
-func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
+func TestBlockHeaderAttackerDropping62(t *testing.T) { testBlockHeaderAttackerDropping(t, 62) }
+func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) }
+func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }
+func TestBlockHeaderAttackerDropping100(t *testing.T) { testBlockHeaderAttackerDropping(t, 100) }
+func TestBlockHeaderAttackerDropping101(t *testing.T) { testBlockHeaderAttackerDropping(t, 101) }
func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
t.Parallel()
@@ -1367,12 +1445,18 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
// Tests that synchronisation progress (origin block number, current block number
// and highest block number) is tracked and updated correctly.
-func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) }
-func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) }
-func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) }
-func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) }
-func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) }
-func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
+func TestSyncProgress62(t *testing.T) { testSyncProgress(t, 62, FullSync) }
+func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) }
+func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) }
+func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) }
+func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) }
+func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync) }
+func TestSyncProgress100Full(t *testing.T) { testSyncProgress(t, 100, FullSync) }
+func TestSyncProgress100Fast(t *testing.T) { testSyncProgress(t, 100, FastSync) }
+func TestSyncProgress100Light(t *testing.T) { testSyncProgress(t, 100, LightSync) }
+func TestSyncProgress101Full(t *testing.T) { testSyncProgress(t, 101, FullSync) }
+func TestSyncProgress101Fast(t *testing.T) { testSyncProgress(t, 101, FastSync) }
+func TestSyncProgress101Light(t *testing.T) { testSyncProgress(t, 101, LightSync) }
func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1440,12 +1524,18 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Tests that synchronisation progress (origin block number and highest block
// number) is tracked and updated correctly in case of a fork (or manual head
// revertal).
-func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) }
-func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) }
-func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) }
-func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) }
-func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) }
-func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
+func TestForkedSyncProgress62(t *testing.T) { testForkedSyncProgress(t, 62, FullSync) }
+func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) }
+func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) }
+func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) }
+func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) }
+func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64, LightSync) }
+func TestForkedSyncProgress100Full(t *testing.T) { testForkedSyncProgress(t, 100, FullSync) }
+func TestForkedSyncProgress100Fast(t *testing.T) { testForkedSyncProgress(t, 100, FastSync) }
+func TestForkedSyncProgress100Light(t *testing.T) { testForkedSyncProgress(t, 100, LightSync) }
+func TestForkedSyncProgress101Full(t *testing.T) { testForkedSyncProgress(t, 101, FullSync) }
+func TestForkedSyncProgress101Fast(t *testing.T) { testForkedSyncProgress(t, 101, FastSync) }
+func TestForkedSyncProgress101Light(t *testing.T) { testForkedSyncProgress(t, 101, LightSync) }
func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1516,12 +1606,18 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Tests that if synchronisation is aborted due to some failure, then the progress
// origin is not updated in the next sync cycle, as it should be considered the
// continuation of the previous sync and not a new instance.
-func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) }
-func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) }
-func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) }
-func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) }
-func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) }
-func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
+func TestFailedSyncProgress62(t *testing.T) { testFailedSyncProgress(t, 62, FullSync) }
+func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) }
+func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) }
+func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) }
+func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) }
+func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64, LightSync) }
+func TestFailedSyncProgress100Full(t *testing.T) { testFailedSyncProgress(t, 100, FullSync) }
+func TestFailedSyncProgress100Fast(t *testing.T) { testFailedSyncProgress(t, 100, FastSync) }
+func TestFailedSyncProgress100Light(t *testing.T) { testFailedSyncProgress(t, 100, LightSync) }
+func TestFailedSyncProgress101Full(t *testing.T) { testFailedSyncProgress(t, 101, FullSync) }
+func TestFailedSyncProgress101Fast(t *testing.T) { testFailedSyncProgress(t, 101, FastSync) }
+func TestFailedSyncProgress101Light(t *testing.T) { testFailedSyncProgress(t, 101, LightSync) }
func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1593,12 +1689,18 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
// Tests that if an attacker fakes a chain height, after the attack is detected,
// the progress height is successfully reduced at the next sync invocation.
-func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) }
-func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) }
-func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) }
-func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) }
-func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) }
-func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
+func TestFakedSyncProgress62(t *testing.T) { testFakedSyncProgress(t, 62, FullSync) }
+func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) }
+func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) }
+func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) }
+func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) }
+func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, LightSync) }
+func TestFakedSyncProgress100Full(t *testing.T) { testFakedSyncProgress(t, 100, FullSync) }
+func TestFakedSyncProgress100Fast(t *testing.T) { testFakedSyncProgress(t, 100, FastSync) }
+func TestFakedSyncProgress100Light(t *testing.T) { testFakedSyncProgress(t, 100, LightSync) }
+func TestFakedSyncProgress101Full(t *testing.T) { testFakedSyncProgress(t, 101, FullSync) }
+func TestFakedSyncProgress101Fast(t *testing.T) { testFakedSyncProgress(t, 101, FastSync) }
+func TestFakedSyncProgress101Light(t *testing.T) { testFakedSyncProgress(t, 101, LightSync) }
func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
t.Parallel()
@@ -1686,6 +1788,12 @@ func TestDeliverHeadersHang(t *testing.T) {
{64, FullSync},
{64, FastSync},
{64, LightSync},
+ {100, FullSync},
+ {100, FastSync},
+ {100, LightSync},
+ {101, FullSync},
+ {101, FastSync},
+ {101, LightSync},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) {
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index 2cc0e5e1e4a0..c160e9b2e6e7 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -477,7 +477,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
defer p.lock.RUnlock()
return p.headerThroughput
}
- return ps.idlePeers(62, 101, idle, throughput)
+ return ps.idlePeers(62, 200, idle, throughput)
}
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
@@ -491,7 +491,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
defer p.lock.RUnlock()
return p.blockThroughput
}
- return ps.idlePeers(62, 101, idle, throughput)
+ return ps.idlePeers(62, 200, idle, throughput)
}
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
@@ -505,7 +505,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {
defer p.lock.RUnlock()
return p.receiptThroughput
}
- return ps.idlePeers(63, 101, idle, throughput)
+ return ps.idlePeers(63, 200, idle, throughput)
}
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
@@ -519,7 +519,7 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {
defer p.lock.RUnlock()
return p.stateThroughput
}
- return ps.idlePeers(63, 101, idle, throughput)
+ return ps.idlePeers(63, 200, idle, throughput)
}
// idlePeers retrieves a flat list of all currently idle peers satisfying the
diff --git a/eth/enr_entry.go b/eth/enr_entry.go
new file mode 100644
index 000000000000..55dab0770287
--- /dev/null
+++ b/eth/enr_entry.go
@@ -0,0 +1,61 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package eth
+
+import (
+ "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/forkid"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
+)
+
+// ethEntry is the "eth" ENR entry which advertises eth protocol
+// on the discovery network.
+type ethEntry struct {
+ ForkID forkid.ID // Fork identifier per EIP-2124
+
+ // Ignore additional fields (for forward compatibility).
+ Rest []rlp.RawValue `rlp:"tail"`
+}
+
+// ENRKey implements enr.Entry.
+func (e ethEntry) ENRKey() string {
+ return "eth"
+}
+
+func (eth *Ethereum) startEthEntryUpdate(ln *enode.LocalNode) {
+ var newHead = make(chan core.ChainHeadEvent, 10)
+ sub := eth.blockchain.SubscribeChainHeadEvent(newHead)
+
+ go func() {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case <-newHead:
+ ln.Set(eth.currentEthEntry())
+ case <-sub.Err():
+ // Would be nice to sync with eth.Stop, but there is no
+ // good way to do that.
+ return
+ }
+ }
+ }()
+}
+
+func (eth *Ethereum) currentEthEntry() *ethEntry {
+ return ðEntry{ForkID: forkid.NewID(eth.blockchain)}
+}
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/block_fetcher.go
similarity index 73%
rename from eth/fetcher/fetcher.go
rename to eth/fetcher/block_fetcher.go
index 7d1e15fd4dea..4c7985ffa4dc 100644
--- a/eth/fetcher/fetcher.go
+++ b/eth/fetcher/block_fetcher.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// Package fetcher contains the block announcement based synchronisation.
+// Package fetcher contains the announcement based blocks or transaction synchronisation.
package fetcher
import (
@@ -29,16 +29,40 @@ import (
"github.com/XinFinOrg/XDPoSChain/consensus"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/log"
+ "github.com/XinFinOrg/XDPoSChain/metrics"
)
const (
- arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
+ arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
- fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block
- maxUncleDist = 7 // Maximum allowed backward distance from the chain head
- maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
- hashLimit = 256 // Maximum number of unique blocks a peer may have announced
- blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
+ fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction
+)
+
+const (
+ maxUncleDist = 7 // Maximum allowed backward distance from the chain head
+ maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
+ hashLimit = 256 // Maximum number of unique blocks a peer may have announced
+ blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
+)
+
+var (
+ blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil)
+ blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil)
+ blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil)
+ blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil)
+
+ blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil)
+ blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil)
+ blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil)
+ blockBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil)
+
+ headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil)
+ bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil)
+
+ headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil)
+ headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil)
+ bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil)
+ bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
)
var (
@@ -66,17 +90,20 @@ type blockBroadcasterFn func(block *types.Block, propagate bool)
// chainHeightFn is a callback type to retrieve the current chain height.
type chainHeightFn func() uint64
+// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
+type chainInsertFn func(types.Blocks) (int, error)
+
// blockInsertFn is a callback type to insert a batch of blocks into the local chain.
-type blockInsertFn func(block *types.Block) error
+type blockInsertFn func(types.Block) (error)
type blockPrepareFn func(block *types.Block) error
// peerDropFn is a callback type for dropping a peer detected as malicious.
type peerDropFn func(id string)
-// announce is the hash notification of the availability of a new block in the
+// blockAnnounce is the hash notification of the availability of a new block in the
// network.
-type announce struct {
+type blockAnnounce struct {
hash common.Hash // Hash of the block being announced
number uint64 // Number of the block being announced (0 = unknown | old protocol)
header *types.Header // Header of the block partially reassembled (new protocol)
@@ -104,18 +131,18 @@ type bodyFilterTask struct {
time time.Time // Arrival time of the blocks' contents
}
-// inject represents a schedules import operation.
-type inject struct {
+// blockInject represents a schedules import operation.
+type blockInject struct {
origin string
block *types.Block
}
-// Fetcher is responsible for accumulating block announcements from various peers
+// BlockFetcher is responsible for accumulating block announcements from various peers
// and scheduling them for retrieval.
-type Fetcher struct {
+type BlockFetcher struct {
// Various event channels
- notify chan *announce
- inject chan *inject
+ notify chan *blockAnnounce
+ inject chan *blockInject
blockFilter chan chan []*types.Block
headerFilter chan chan *headerFilterTask
@@ -125,16 +152,16 @@ type Fetcher struct {
quit chan struct{}
// Announce states
- announces map[string]int // Per peer announce counts to prevent memory exhaustion
- announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching
- fetching map[common.Hash]*announce // Announced blocks, currently fetching
- fetched map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
- completing map[common.Hash]*announce // Blocks with headers, currently body-completing
+ announces map[string]int // Per peer blockAnnounce counts to prevent memory exhaustion
+ announced map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
+ fetching map[common.Hash]*blockAnnounce // Announced blocks, currently fetching
+ fetched map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
+ completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing
// Block cache
- queue *prque.Prque // Queue containing the import operations (block number sorted)
- queues map[string]int // Per peer block counts to prevent memory exhaustion
- queued map[common.Hash]*inject // Set of already queued blocks (to dedup imports)
+ queue *prque.Prque // Queue containing the import operations (block number sorted)
+ queues map[string]int // Per peer block counts to prevent memory exhaustion
+ queued map[common.Hash]*blockInject // Set of already queued blocks (to dedupe imports)
knowns *lru.ARCCache
// Callbacks
getBlock blockRetrievalFn // Retrieves a block from the local chain
@@ -142,38 +169,69 @@ type Fetcher struct {
handleProposedBlock proposeBlockHandlerFn // Consensus v2 specific: Hanle new proposed block
broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
chainHeight chainHeightFn // Retrieves the current chain's height
- insertBlock blockInsertFn // Injects a batch of blocks into the chain
- prepareBlock blockPrepareFn
- dropPeer peerDropFn // Drops a peer for misbehaving
+ // insertChain chainInsertFn // Injects a batch of blocks into the chain
+ insertBlock blockInsertFn // Injects a batch of blocks into the chain
+ prepareBlock blockPrepareFn
+ dropPeer peerDropFn // Drops a peer for misbehaving
// Testing hooks
- announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list
+ announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
+ importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62)
+
signHook func(*types.Block) error
appendM2HeaderHook func(*types.Block) (*types.Block, bool, error)
}
-// New creates a block fetcher to retrieve blocks based on hash announcements.
-func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, handleProposedBlock proposeBlockHandlerFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertBlock blockInsertFn, prepareBlock blockPrepareFn, dropPeer peerDropFn) *Fetcher {
+// // New creates a block fetcher to retrieve blocks based on hash announcements.
+// func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, handleProposedBlock proposeBlockHandlerFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertBlock blockInsertFn, prepareBlock blockPrepareFn, dropPeer peerDropFn) *Fetcher {
+// knownBlocks, _ := lru.NewARC(blockLimit)
+// return &Fetcher{
+// notify: make(chan *announce),
+// inject: make(chan *inject),
+// blockFilter: make(chan chan []*types.Block),
+// headerFilter: make(chan chan *headerFilterTask),
+// bodyFilter: make(chan chan *bodyFilterTask),
+// done: make(chan common.Hash),
+// quit: make(chan struct{}),
+// announces: make(map[string]int),
+// announced: make(map[common.Hash][]*announce),
+// fetching: make(map[common.Hash]*announce),
+// fetched: make(map[common.Hash][]*announce),
+// completing: make(map[common.Hash]*announce),
+// queue: prque.New(nil),
+// queues: make(map[string]int),
+// queued: make(map[common.Hash]*inject),
+// knowns: knownBlocks,
+// getBlock: getBlock,
+// verifyHeader: verifyHeader,
+// handleProposedBlock: handleProposedBlock,
+// broadcastBlock: broadcastBlock,
+// chainHeight: chainHeight,
+// insertBlock: insertBlock,
+// prepareBlock: prepareBlock,
+// dropPeer: dropPeer,
+// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
+
+func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, handleProposedBlock proposeBlockHandlerFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertBlock blockInsertFn, prepareBlock blockPrepareFn, dropPeer peerDropFn) *BlockFetcher {
knownBlocks, _ := lru.NewARC(blockLimit)
- return &Fetcher{
- notify: make(chan *announce),
- inject: make(chan *inject),
- blockFilter: make(chan chan []*types.Block),
+ return &BlockFetcher{
+ notify: make(chan *blockAnnounce),
+ inject: make(chan *blockInject),
headerFilter: make(chan chan *headerFilterTask),
bodyFilter: make(chan chan *bodyFilterTask),
done: make(chan common.Hash),
quit: make(chan struct{}),
announces: make(map[string]int),
- announced: make(map[common.Hash][]*announce),
- fetching: make(map[common.Hash]*announce),
- fetched: make(map[common.Hash][]*announce),
- completing: make(map[common.Hash]*announce),
+ announced: make(map[common.Hash][]*blockAnnounce),
+ fetching: make(map[common.Hash]*blockAnnounce),
+ fetched: make(map[common.Hash][]*blockAnnounce),
+ completing: make(map[common.Hash]*blockAnnounce),
queue: prque.New(nil),
queues: make(map[string]int),
- queued: make(map[common.Hash]*inject),
+ queued: make(map[common.Hash]*blockInject),
knowns: knownBlocks,
getBlock: getBlock,
verifyHeader: verifyHeader,
@@ -181,28 +239,29 @@ func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, handlePropose
broadcastBlock: broadcastBlock,
chainHeight: chainHeight,
insertBlock: insertBlock,
- prepareBlock: prepareBlock,
- dropPeer: dropPeer,
+ // insertChain: insertChain,
+ prepareBlock: prepareBlock,
+ dropPeer: dropPeer,
}
}
// Start boots up the announcement based synchroniser, accepting and processing
// hash notifications and block fetches until termination requested.
-func (f *Fetcher) Start() {
+func (f *BlockFetcher) Start() {
go f.loop()
}
// Stop terminates the announcement based synchroniser, canceling all pending
// operations.
-func (f *Fetcher) Stop() {
+func (f *BlockFetcher) Stop() {
close(f.quit)
}
// Notify announces the fetcher of the potential availability of a new block in
// the network.
-func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
+func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
- block := &announce{
+ block := &blockAnnounce{
hash: hash,
number: number,
time: time,
@@ -218,9 +277,9 @@ func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time
}
}
-// Enqueue tries to fill gaps the the fetcher's future import queue.
-func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
- op := &inject{
+// Enqueue tries to fill gaps the fetcher's future import queue.
+func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
+ op := &blockInject{
origin: peer,
block: block,
}
@@ -234,7 +293,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
// returning those that should be handled differently.
-func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
+func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
// Send the filter channel to the fetcher
@@ -262,7 +321,7 @@ func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.
// FilterBodies extracts all the block bodies that were explicitly requested by
// the fetcher, returning those that should be handled differently.
-func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
+func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
// Send the filter channel to the fetcher
@@ -290,7 +349,7 @@ func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction,
// Loop is the main fetcher loop, checking and processing various notification
// events.
-func (f *Fetcher) loop() {
+func (f *BlockFetcher) loop() {
// Iterate the block fetching until a quit is requested
fetchTimer := time.NewTimer(0)
completeTimer := time.NewTimer(0)
@@ -305,48 +364,49 @@ func (f *Fetcher) loop() {
// Import any queued blocks that could potentially fit
height := f.chainHeight()
for !f.queue.Empty() {
- op := f.queue.PopItem().(*inject)
+ op := f.queue.PopItem().(*blockInject)
+ hash := op.block.Hash()
if f.queueChangeHook != nil {
- f.queueChangeHook(op.block.Hash(), false)
+ f.queueChangeHook(hash, false)
}
// If too high up the chain or phase, continue later
number := op.block.NumberU64()
if number > height+1 {
- f.queue.Push(op, -int64(op.block.NumberU64()))
+ f.queue.Push(op, -int64(number))
if f.queueChangeHook != nil {
- f.queueChangeHook(op.block.Hash(), true)
+ f.queueChangeHook(hash, true)
}
break
}
// Otherwise if fresh and still unknown, try and import
- hash := op.block.Hash()
if number+maxUncleDist < height || f.getBlock(hash) != nil {
f.forgetBlock(hash)
continue
}
f.insert(op.origin, op.block)
}
+
// Wait for an outside event to occur
select {
case <-f.quit:
- // Fetcher terminating, abort all operations
+ // BlockFetcher terminating, abort all operations
return
case notification := <-f.notify:
// A block was announced, make sure the peer isn't DOSing us
- propAnnounceInMeter.Mark(1)
+ blockAnnounceInMeter.Mark(1)
count := f.announces[notification.origin] + 1
if count > hashLimit {
log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
- propAnnounceDOSMeter.Mark(1)
+ blockAnnounceDOSMeter.Mark(1)
break
}
// If we have a valid block number, check that it's potentially useful
if notification.number > 0 {
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
- propAnnounceDropMeter.Mark(1)
+ blockAnnounceDropMeter.Mark(1)
break
}
}
@@ -368,7 +428,7 @@ func (f *Fetcher) loop() {
case op := <-f.inject:
// A direct block insertion was requested, try and fill any pending gaps
- propBroadcastInMeter.Mark(1)
+ blockBroadcastInMeter.Mark(1)
f.enqueue(op.origin, op.block)
case hash := <-f.done:
@@ -454,8 +514,8 @@ func (f *Fetcher) loop() {
headerFilterInMeter.Mark(int64(len(task.headers)))
// Split the batch of headers into unknown ones (to return to the caller),
- // knowns incomplete ones (requiring body retrievals) and completed blocks.
- unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
+ // known incomplete ones (requiring body retrievals) and completed blocks.
+ unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}
for _, header := range task.headers {
hash := header.Hash()
@@ -491,7 +551,7 @@ func (f *Fetcher) loop() {
f.forgetHash(hash)
}
} else {
- // Fetcher doesn't know about it, add to the return list
+ // BlockFetcher doesn't know about it, add to the return list
unknown = append(unknown, header)
}
}
@@ -578,8 +638,8 @@ func (f *Fetcher) loop() {
}
}
-// rescheduleFetch resets the specified fetch timer to the next announce timeout.
-func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
+// rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
+func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
// Short circuit if no blocks are announced
if len(f.announced) == 0 {
return
@@ -595,7 +655,7 @@ func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
}
// rescheduleComplete resets the specified completion timer to the next fetch timeout.
-func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
+func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
// Short circuit if no headers are fetched
if len(f.fetched) == 0 {
return
@@ -612,7 +672,7 @@ func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
-func (f *Fetcher) enqueue(peer string, block *types.Block) {
+func (f *BlockFetcher) enqueue(peer string, block *types.Block) {
hash := block.Hash()
if f.knowns.Contains(hash) {
log.Trace("Discarded propagated block, known block", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
@@ -622,20 +682,20 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
count := f.queues[peer] + 1
if count > blockLimit {
log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
- propBroadcastDOSMeter.Mark(1)
+ blockBroadcastDOSMeter.Mark(1)
f.forgetHash(hash)
return
}
// Discard any past or too distant blocks
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
- propBroadcastDropMeter.Mark(1)
+ blockBroadcastDropMeter.Mark(1)
f.forgetHash(hash)
return
}
// Schedule the block for future importing
if _, ok := f.queued[hash]; !ok {
- op := &inject{
+ op := &blockInject{
origin: peer,
block: block,
}
@@ -653,7 +713,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
// insert spawns a new goroutine to run a block insertion into the chain. If the
// block's number is at the same height as the current import phase, it updates
// the phase states accordingly.
-func (f *Fetcher) insert(peer string, block *types.Block) {
+func (f *BlockFetcher) insert(peer string, block *types.Block) {
hash := block.Hash()
// Run the import on a new thread
@@ -667,17 +727,18 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
return
}
- fastBroadCast := true
+ fastBroadCast := true //TODO: double check if we need fastBroadCast logic
again:
err := f.verifyHeader(block.Header())
// Quickly validate the header and propagate the block if it passes
switch err {
case nil:
// All ok, quickly propagate to our peers
- propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
+ blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
if fastBroadCast {
go f.broadcastBlock(block, true)
}
+
case consensus.ErrFutureBlock:
delay := time.Unix(block.Time().Int64(), 0).Sub(time.Now()) // nolint: gosimple
log.Info("Receive future block", "number", block.NumberU64(), "hash", block.Hash().Hex(), "delay", delay)
@@ -708,7 +769,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
}
block = newBlock
fastBroadCast = false
- goto again
+ goto again //TODO: doublecheck if goto again logic is required
default:
// Something went very wrong, drop the peer
log.Warn("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
@@ -716,7 +777,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
return
}
// Run the actual import and log any issues
- if err := f.insertBlock(block); err != nil {
+ if err := f.insertBlock(*block); err != nil {
log.Warn("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
return
}
@@ -732,20 +793,25 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
log.Warn("[insert] Unable to handle new proposed block", "err", err, "number", block.Number(), "hash", block.Hash())
}
// If import succeeded, broadcast the block
- propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
+ blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
if !fastBroadCast {
- go f.broadcastBlock(block, true)
+ go f.broadcastBlock(block, false)
+ }
+
+ // Invoke the testing hook if needed
+ if f.importedHook != nil {
+ f.importedHook(block)
}
}()
}
// forgetHash removes all traces of a block announcement from the fetcher's
// internal state.
-func (f *Fetcher) forgetHash(hash common.Hash) {
+func (f *BlockFetcher) forgetHash(hash common.Hash) {
// Remove all pending announces and decrement DOS counters
for _, announce := range f.announced[hash] {
f.announces[announce.origin]--
- if f.announces[announce.origin] == 0 {
+ if f.announces[announce.origin] <= 0 {
delete(f.announces, announce.origin)
}
}
@@ -756,7 +822,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
// Remove any pending fetches and decrement the DOS counters
if announce := f.fetching[hash]; announce != nil {
f.announces[announce.origin]--
- if f.announces[announce.origin] == 0 {
+ if f.announces[announce.origin] <= 0 {
delete(f.announces, announce.origin)
}
delete(f.fetching, hash)
@@ -765,7 +831,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
// Remove any pending completion requests and decrement the DOS counters
for _, announce := range f.fetched[hash] {
f.announces[announce.origin]--
- if f.announces[announce.origin] == 0 {
+ if f.announces[announce.origin] <= 0 {
delete(f.announces, announce.origin)
}
}
@@ -774,7 +840,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
// Remove any pending completions and decrement the DOS counters
if announce := f.completing[hash]; announce != nil {
f.announces[announce.origin]--
- if f.announces[announce.origin] == 0 {
+ if f.announces[announce.origin] <= 0 {
delete(f.announces, announce.origin)
}
delete(f.completing, hash)
@@ -783,7 +849,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
// forgetBlock removes all traces of a queued block from the fetcher's internal
// state.
-func (f *Fetcher) forgetBlock(hash common.Hash) {
+func (f *BlockFetcher) forgetBlock(hash common.Hash) {
if insert := f.queued[hash]; insert != nil {
f.queues[insert.origin]--
if f.queues[insert.origin] == 0 {
@@ -794,11 +860,11 @@ func (f *Fetcher) forgetBlock(hash common.Hash) {
}
// Bind double validate hook before block imported into chain.
-func (f *Fetcher) SetSignHook(signHook func(*types.Block) error) {
+func (f *BlockFetcher) SetSignHook(signHook func(*types.Block) error) {
f.signHook = signHook
}
// Bind append m2 to block header hook when imported into chain.
-func (f *Fetcher) SetAppendM2HeaderHook(appendM2HeaderHook func(*types.Block) (*types.Block, bool, error)) {
+func (f *BlockFetcher) SetAppendM2HeaderHook(appendM2HeaderHook func(*types.Block) (*types.Block, bool, error)) {
f.appendM2HeaderHook = appendM2HeaderHook
}
diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/block_fetcher_test.go
similarity index 84%
rename from eth/fetcher/fetcher_test.go
rename to eth/fetcher/block_fetcher_test.go
index 484d62d87243..dfb442870e02 100644
--- a/eth/fetcher/fetcher_test.go
+++ b/eth/fetcher/block_fetcher_test.go
@@ -77,7 +77,7 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
// fetcherTester is a test simulator for mocking out local block chain.
type fetcherTester struct {
- fetcher *Fetcher
+ fetcher *BlockFetcher
hashes []common.Hash // Hash chain belonging to the tester
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
@@ -93,7 +93,7 @@ func newTester() *fetcherTester {
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
drops: make(map[string]bool),
}
- tester.fetcher = New(tester.getBlock, tester.verifyHeader, tester.handleProposedBlock, tester.broadcastBlock, tester.chainHeight, tester.insertBlock, tester.prepareBlock, tester.dropPeer)
+ tester.fetcher = NewBlockFetcher(tester.getBlock, tester.verifyHeader, tester.handleProposedBlock, tester.broadcastBlock, tester.chainHeight, tester.insertBlock, tester.prepareBlock, tester.dropPeer)
tester.fetcher.Start()
return tester
@@ -150,7 +150,7 @@ func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {
}
// insertBlock injects a new blocks into the simulated chain.
-func (f *fetcherTester) insertBlock(block *types.Block) error {
+func (f *fetcherTester) insertBlock(block types.Block) error {
f.lock.Lock()
defer f.lock.Unlock()
@@ -164,7 +164,7 @@ func (f *fetcherTester) insertBlock(block *types.Block) error {
}
// Otherwise build our current chain
f.hashes = append(f.hashes, block.Hash())
- f.blocks[block.Hash()] = block
+ f.blocks[block.Hash()] = &block
return nil
}
@@ -311,9 +311,11 @@ func verifyProposeBlockHandlerCalled(t *testing.T, proposedBlockChan chan *types
// Tests that a fetcher accepts block announcements and initiates retrievals for
// them, successfully importing into the local chain.
-func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) }
-func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) }
-func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) }
+func TestSequentialAnnouncements62(t *testing.T) { testSequentialAnnouncements(t, 62) }
+func TestSequentialAnnouncements63(t *testing.T) { testSequentialAnnouncements(t, 63) }
+func TestSequentialAnnouncements64(t *testing.T) { testSequentialAnnouncements(t, 64) }
+func TestSequentialAnnouncements100(t *testing.T) { testSequentialAnnouncements(t, 100) }
+func TestSequentialAnnouncements101(t *testing.T) { testSequentialAnnouncements(t, 101) }
func testSequentialAnnouncements(t *testing.T, protocol int) {
// Create a chain of blocks to import
@@ -346,9 +348,11 @@ func testSequentialAnnouncements(t *testing.T, protocol int) {
// Tests that if blocks are announced by multiple peers (or even the same buggy
// peer), they will only get downloaded at most once.
-func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) }
-func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) }
-func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) }
+func TestConcurrentAnnouncements62(t *testing.T) { testConcurrentAnnouncements(t, 62) }
+func TestConcurrentAnnouncements63(t *testing.T) { testConcurrentAnnouncements(t, 63) }
+func TestConcurrentAnnouncements64(t *testing.T) { testConcurrentAnnouncements(t, 64) }
+func TestConcurrentAnnouncements100(t *testing.T) { testConcurrentAnnouncements(t, 100) }
+func TestConcurrentAnnouncements101(t *testing.T) { testConcurrentAnnouncements(t, 101) }
func testConcurrentAnnouncements(t *testing.T, protocol int) {
// Create a chain of blocks to import
@@ -394,9 +398,11 @@ func testConcurrentAnnouncements(t *testing.T, protocol int) {
// Tests that announcements arriving while a previous is being fetched still
// results in a valid import.
-func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) }
-func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) }
-func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) }
+func TestOverlappingAnnouncements62(t *testing.T) { testOverlappingAnnouncements(t, 62) }
+func TestOverlappingAnnouncements63(t *testing.T) { testOverlappingAnnouncements(t, 63) }
+func TestOverlappingAnnouncements64(t *testing.T) { testOverlappingAnnouncements(t, 64) }
+func TestOverlappingAnnouncements100(t *testing.T) { testOverlappingAnnouncements(t, 100) }
+func TestOverlappingAnnouncements101(t *testing.T) { testOverlappingAnnouncements(t, 101) }
func testOverlappingAnnouncements(t *testing.T, protocol int) {
// Create a chain of blocks to import
@@ -431,9 +437,11 @@ func testOverlappingAnnouncements(t *testing.T, protocol int) {
}
// Tests that announces already being retrieved will not be duplicated.
-func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) }
-func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) }
-func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) }
+func TestPendingDeduplication62(t *testing.T) { testPendingDeduplication(t, 62) }
+func TestPendingDeduplication63(t *testing.T) { testPendingDeduplication(t, 63) }
+func TestPendingDeduplication64(t *testing.T) { testPendingDeduplication(t, 64) }
+func TestPendingDeduplication100(t *testing.T) { testPendingDeduplication(t, 100) }
+func TestPendingDeduplication101(t *testing.T) { testPendingDeduplication(t, 101) }
func testPendingDeduplication(t *testing.T, protocol int) {
// Create a hash and corresponding block
@@ -474,9 +482,11 @@ func testPendingDeduplication(t *testing.T, protocol int) {
// Tests that announcements retrieved in a random order are cached and eventually
// imported when all the gaps are filled in.
-func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) }
-func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) }
-func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) }
+func TestRandomArrivalImport62(t *testing.T) { testRandomArrivalImport(t, 62) }
+func TestRandomArrivalImport63(t *testing.T) { testRandomArrivalImport(t, 63) }
+func TestRandomArrivalImport64(t *testing.T) { testRandomArrivalImport(t, 64) }
+func TestRandomArrivalImport100(t *testing.T) { testRandomArrivalImport(t, 100) }
+func TestRandomArrivalImport101(t *testing.T) { testRandomArrivalImport(t, 101) }
func testRandomArrivalImport(t *testing.T, protocol int) {
// Create a chain of blocks to import, and choose one to delay
@@ -508,9 +518,11 @@ func testRandomArrivalImport(t *testing.T, protocol int) {
// Tests that direct block enqueues (due to block propagation vs. hash announce)
// are correctly schedule, filling and import queue gaps.
-func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) }
-func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) }
-func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) }
+func TestQueueGapFill62(t *testing.T) { testQueueGapFill(t, 62) }
+func TestQueueGapFill63(t *testing.T) { testQueueGapFill(t, 63) }
+func TestQueueGapFill64(t *testing.T) { testQueueGapFill(t, 64) }
+func TestQueueGapFill100(t *testing.T) { testQueueGapFill(t, 100) }
+func TestQueueGapFill101(t *testing.T) { testQueueGapFill(t, 101) }
func testQueueGapFill(t *testing.T, protocol int) {
// Create a chain of blocks to import, and choose one to not announce at all
@@ -542,9 +554,11 @@ func testQueueGapFill(t *testing.T, protocol int) {
// Tests that blocks arriving from various sources (multiple propagations, hash
// announces, etc) do not get scheduled for import multiple times.
-func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) }
-func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) }
-func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) }
+func TestImportDeduplication62(t *testing.T) { testImportDeduplication(t, 62) }
+func TestImportDeduplication63(t *testing.T) { testImportDeduplication(t, 63) }
+func TestImportDeduplication64(t *testing.T) { testImportDeduplication(t, 64) }
+func TestImportDeduplication100(t *testing.T) { testImportDeduplication(t, 100) }
+func TestImportDeduplication101(t *testing.T) { testImportDeduplication(t, 101) }
func testImportDeduplication(t *testing.T, protocol int) {
// Create two blocks to import (one for duplication, the other for stalling)
@@ -556,7 +570,7 @@ func testImportDeduplication(t *testing.T, protocol int) {
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0)
counter := uint32(0)
- tester.fetcher.insertBlock = func(block *types.Block) error {
+ tester.fetcher.insertBlock = func(block types.Block) error {
atomic.AddUint32(&counter, uint32(1))
return tester.insertBlock(block)
}
@@ -620,9 +634,11 @@ func TestDistantPropagationDiscarding(t *testing.T) {
// Tests that announcements with numbers much lower or higher than out current
// head get discarded to prevent wasting resources on useless blocks from faulty
// peers.
-func TestDistantAnnouncementDiscarding62(t *testing.T) { testDistantAnnouncementDiscarding(t, 62) }
-func TestDistantAnnouncementDiscarding63(t *testing.T) { testDistantAnnouncementDiscarding(t, 63) }
-func TestDistantAnnouncementDiscarding64(t *testing.T) { testDistantAnnouncementDiscarding(t, 64) }
+func TestDistantAnnouncementDiscarding62(t *testing.T) { testDistantAnnouncementDiscarding(t, 62) }
+func TestDistantAnnouncementDiscarding63(t *testing.T) { testDistantAnnouncementDiscarding(t, 63) }
+func TestDistantAnnouncementDiscarding64(t *testing.T) { testDistantAnnouncementDiscarding(t, 64) }
+func TestDistantAnnouncementDiscarding100(t *testing.T) { testDistantAnnouncementDiscarding(t, 100) }
+func TestDistantAnnouncementDiscarding101(t *testing.T) { testDistantAnnouncementDiscarding(t, 101) }
func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
// Create a long chain to import and define the discard boundaries
@@ -663,9 +679,11 @@ func testDistantAnnouncementDiscarding(t *testing.T, protocol int) {
// Tests that peers announcing blocks with invalid numbers (i.e. not matching
// the headers provided afterwards) get dropped as malicious.
-func TestInvalidNumberAnnouncement62(t *testing.T) { testInvalidNumberAnnouncement(t, 62) }
-func TestInvalidNumberAnnouncement63(t *testing.T) { testInvalidNumberAnnouncement(t, 63) }
-func TestInvalidNumberAnnouncement64(t *testing.T) { testInvalidNumberAnnouncement(t, 64) }
+func TestInvalidNumberAnnouncement62(t *testing.T) { testInvalidNumberAnnouncement(t, 62) }
+func TestInvalidNumberAnnouncement63(t *testing.T) { testInvalidNumberAnnouncement(t, 63) }
+func TestInvalidNumberAnnouncement64(t *testing.T) { testInvalidNumberAnnouncement(t, 64) }
+func TestInvalidNumberAnnouncement100(t *testing.T) { testInvalidNumberAnnouncement(t, 100) }
+func TestInvalidNumberAnnouncement101(t *testing.T) { testInvalidNumberAnnouncement(t, 101) }
func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
// Create a single block to import and check numbers against
@@ -711,9 +729,11 @@ func testInvalidNumberAnnouncement(t *testing.T, protocol int) {
// Tests that if a block is empty (i.e. header only), no body request should be
// made, and instead the header should be assembled into a whole block in itself.
-func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) }
-func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) }
-func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) }
+func TestEmptyBlockShortCircuit62(t *testing.T) { testEmptyBlockShortCircuit(t, 62) }
+func TestEmptyBlockShortCircuit63(t *testing.T) { testEmptyBlockShortCircuit(t, 63) }
+func TestEmptyBlockShortCircuit64(t *testing.T) { testEmptyBlockShortCircuit(t, 64) }
+func TestEmptyBlockShortCircuit100(t *testing.T) { testEmptyBlockShortCircuit(t, 100) }
+func TestEmptyBlockShortCircuit101(t *testing.T) { testEmptyBlockShortCircuit(t, 101) }
func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
// Create a chain of blocks to import
@@ -755,9 +775,11 @@ func testEmptyBlockShortCircuit(t *testing.T, protocol int) {
// Tests that a peer is unable to use unbounded memory with sending infinite
// block announcements to a node, but that even in the face of such an attack,
// the fetcher remains operational.
-func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) }
-func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) }
-func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) }
+func TestHashMemoryExhaustionAttack62(t *testing.T) { testHashMemoryExhaustionAttack(t, 62) }
+func TestHashMemoryExhaustionAttack63(t *testing.T) { testHashMemoryExhaustionAttack(t, 63) }
+func TestHashMemoryExhaustionAttack64(t *testing.T) { testHashMemoryExhaustionAttack(t, 64) }
+func TestHashMemoryExhaustionAttack100(t *testing.T) { testHashMemoryExhaustionAttack(t, 100) }
+func TestHashMemoryExhaustionAttack101(t *testing.T) { testHashMemoryExhaustionAttack(t, 101) }
func testHashMemoryExhaustionAttack(t *testing.T, protocol int) {
// Create a tester with instrumented import hooks
diff --git a/eth/fetcher/metrics.go b/eth/fetcher/metrics.go
deleted file mode 100644
index eb4745904ae8..000000000000
--- a/eth/fetcher/metrics.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Contains the metrics collected by the fetcher.
-
-package fetcher
-
-import (
- "github.com/XinFinOrg/XDPoSChain/metrics"
-)
-
-var (
- propAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/in", nil)
- propAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/announces/out", nil)
- propAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/drop", nil)
- propAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/dos", nil)
-
- propBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/in", nil)
- propBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/broadcasts/out", nil)
- propBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/drop", nil)
- propBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/dos", nil)
-
- headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/headers", nil)
- bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/bodies", nil)
-
- headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/in", nil)
- headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/out", nil)
- bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/in", nil)
- bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/out", nil)
-)
diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go
new file mode 100644
index 000000000000..208a88cf097d
--- /dev/null
+++ b/eth/fetcher/tx_fetcher.go
@@ -0,0 +1,894 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package fetcher
+
+import (
+ "bytes"
+ "fmt"
+ mrand "math/rand"
+ "sort"
+ "time"
+
+ mapset "github.com/deckarep/golang-set"
+ "github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/common/mclock"
+ "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/types"
+ "github.com/XinFinOrg/XDPoSChain/log"
+ "github.com/XinFinOrg/XDPoSChain/metrics"
+)
+
+const (
+ // maxTxAnnounces is the maximum number of unique transaction a peer
+ // can announce in a short time.
+ maxTxAnnounces = 4096
+
+ // maxTxRetrievals is the maximum transaction number can be fetched in one
+ // request. The rationale to pick 256 is:
+ // - In eth protocol, the softResponseLimit is 2MB. Nowadays according to
+ // Etherscan the average transaction size is around 200B, so in theory
+ // we can include lots of transaction in a single protocol packet.
+ // - However the maximum size of a single transaction is raised to 128KB,
+ // so pick a middle value here to ensure we can maximize the efficiency
+ // of the retrieval and response size overflow won't happen in most cases.
+ maxTxRetrievals = 256
+
+ // maxTxUnderpricedSetSize is the size of the underpriced transaction set that
+ // is used to track recent transactions that have been dropped so we don't
+ // re-request them.
+ maxTxUnderpricedSetSize = 32768
+
+ // txArriveTimeout is the time allowance before an announced transaction is
+ // explicitly requested.
+ txArriveTimeout = 500 * time.Millisecond
+
+ // txGatherSlack is the interval used to collate almost-expired announces
+ // with network fetches.
+ txGatherSlack = 100 * time.Millisecond
+)
+
+var (
+ // txFetchTimeout is the maximum allotted time to return an explicitly
+ // requested transaction.
+ txFetchTimeout = 5 * time.Second
+)
+
+var (
+ txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil)
+ txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil)
+ txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil)
+ txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil)
+
+ txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil)
+ txBroadcastKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil)
+ txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil)
+ txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil)
+
+ txRequestOutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil)
+ txRequestFailMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil)
+ txRequestDoneMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil)
+ txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil)
+
+ txReplyInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil)
+ txReplyKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil)
+ txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil)
+ txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil)
+
+ txFetcherWaitingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil)
+ txFetcherWaitingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil)
+ txFetcherQueueingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil)
+ txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil)
+ txFetcherFetchingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil)
+ txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil)
+)
+
+// txAnnounce is the notification of the availability of a batch
+// of new transactions in the network.
+type txAnnounce struct {
+ origin string // Identifier of the peer originating the notification
+ hashes []common.Hash // Batch of transaction hashes being announced
+}
+
+// txRequest represents an in-flight transaction retrieval request destined to
+// a specific peers.
+type txRequest struct {
+ hashes []common.Hash // Transactions having been requested
+ stolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request)
+ time mclock.AbsTime // Timestamp of the request
+}
+
+// txDelivery is the notification that a batch of transactions have been added
+// to the pool and should be untracked.
+type txDelivery struct {
+ origin string // Identifier of the peer originating the notification
+ hashes []common.Hash // Batch of transaction hashes having been delivered
+ direct bool // Whether this is a direct reply or a broadcast
+}
+
+// txDrop is the notiication that a peer has disconnected.
+type txDrop struct {
+ peer string
+}
+
+// TxFetcher is responsible for retrieving new transaction based on announcements.
+//
+// The fetcher operates in 3 stages:
+// - Transactions that are newly discovered are moved into a wait list.
+// - After ~500ms passes, transactions from the wait list that have not been
+// broadcast to us in whole are moved into a queueing area.
+// - When a connected peer doesn't have in-flight retrieval requests, any
+// transaction queued up (and announced by the peer) are allocated to the
+// peer and moved into a fetching status until it's fulfilled or fails.
+//
+// The invariants of the fetcher are:
+// - Each tracked transaction (hash) must only be present in one of the
+// three stages. This ensures that the fetcher operates akin to a finite
+// state automata and there's do data leak.
+// - Each peer that announced transactions may be scheduled retrievals, but
+// only ever one concurrently. This ensures we can immediately know what is
+// missing from a reply and reschedule it.
+type TxFetcher struct {
+ notify chan *txAnnounce
+ cleanup chan *txDelivery
+ drop chan *txDrop
+ quit chan struct{}
+
+ underpriced mapset.Set // Transactions discarded as too cheap (don't re-fetch)
+
+ // Stage 1: Waiting lists for newly discovered transactions that might be
+ // broadcast without needing explicit request/reply round trips.
+ waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
+ waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
+ waitslots map[string]map[common.Hash]struct{} // Waiting announcement sgroupped by peer (DoS protection)
+
+ // Stage 2: Queue of transactions that waiting to be allocated to some peer
+ // to be retrieved directly.
+ announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer
+ announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
+
+ // Stage 3: Set of transactions currently being retrieved, some which may be
+ // fulfilled and some rescheduled. Note, this step shares 'announces' from the
+ // previous stage to avoid having to duplicate (need it for DoS checks).
+ fetching map[common.Hash]string // Transaction set currently being retrieved
+ requests map[string]*txRequest // In-flight transaction retrievals
+ alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
+
+ // Callbacks
+ hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
+ addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
+ fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
+
+ step chan struct{} // Notification channel when the fetcher loop iterates
+ clock mclock.Clock // Time wrapper to simulate in tests
+ rand *mrand.Rand // Randomizer to use in tests instead of map range loops (soft-random)
+}
+
+// NewTxFetcher creates a transaction fetcher to retrieve transaction
+// based on hash announcements.
+func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
+ return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
+}
+
+// NewTxFetcherForTests is a testing method to mock out the realtime clock with
+// a simulated version and the internal randomness with a deterministic one.
+func NewTxFetcherForTests(
+ hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
+ clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
+ return &TxFetcher{
+ notify: make(chan *txAnnounce),
+ cleanup: make(chan *txDelivery),
+ drop: make(chan *txDrop),
+ quit: make(chan struct{}),
+ waitlist: make(map[common.Hash]map[string]struct{}),
+ waittime: make(map[common.Hash]mclock.AbsTime),
+ waitslots: make(map[string]map[common.Hash]struct{}),
+ announces: make(map[string]map[common.Hash]struct{}),
+ announced: make(map[common.Hash]map[string]struct{}),
+ fetching: make(map[common.Hash]string),
+ requests: make(map[string]*txRequest),
+ alternates: make(map[common.Hash]map[string]struct{}),
+ underpriced: mapset.NewSet(),
+ hasTx: hasTx,
+ addTxs: addTxs,
+ fetchTxs: fetchTxs,
+ clock: clock,
+ rand: rand,
+ }
+}
+
+// Notify announces the fetcher of the potential availability of a new batch of
+// transactions in the network.
+func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
+ // Keep track of all the announced transactions
+ txAnnounceInMeter.Mark(int64(len(hashes)))
+
+ // Skip any transaction announcements that we already know of, or that we've
+ // previously marked as cheap and discarded. This check is of course racey,
+ // because multiple concurrent notifies will still manage to pass it, but it's
+ // still valuable to check here because it runs concurrent to the internal
+ // loop, so anything caught here is time saved internally.
+ var (
+ unknowns = make([]common.Hash, 0, len(hashes))
+ duplicate, underpriced int64
+ )
+ for _, hash := range hashes {
+ switch {
+ case f.hasTx(hash):
+ duplicate++
+
+ case f.underpriced.Contains(hash):
+ underpriced++
+
+ default:
+ unknowns = append(unknowns, hash)
+ }
+ }
+ txAnnounceKnownMeter.Mark(duplicate)
+ txAnnounceUnderpricedMeter.Mark(underpriced)
+
+ // If anything's left to announce, push it into the internal loop
+ if len(unknowns) == 0 {
+ return nil
+ }
+ announce := &txAnnounce{
+ origin: peer,
+ hashes: unknowns,
+ }
+ select {
+ case f.notify <- announce:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// Enqueue imports a batch of received transaction into the transaction pool
+// and the fetcher. This method may be called by both transaction broadcasts and
+// direct request replies. The differentiation is important so the fetcher can
+// re-shedule missing transactions as soon as possible.
+func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {
+ // Keep track of all the propagated transactions
+ if direct {
+ txReplyInMeter.Mark(int64(len(txs)))
+ } else {
+ txBroadcastInMeter.Mark(int64(len(txs)))
+ }
+ // Push all the transactions into the pool, tracking underpriced ones to avoid
+ // re-requesting them and dropping the peer in case of malicious transfers.
+ var (
+ added = make([]common.Hash, 0, len(txs))
+ duplicate int64
+ underpriced int64
+ otherreject int64
+ )
+ errs := f.addTxs(txs)
+ for i, err := range errs {
+ if err != nil {
+ // Track the transaction hash if the price is too low for us.
+ // Avoid re-request this transaction when we receive another
+ // announcement.
+ if err == core.ErrUnderpriced || err == core.ErrReplaceUnderpriced {
+ for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
+ f.underpriced.Pop()
+ }
+ f.underpriced.Add(txs[i].Hash())
+ }
+ // Track a few interesting failure types
+ switch err {
+ case nil: // Noop, but need to handle to not count these
+
+ case core.ErrAlreadyKnown:
+ duplicate++
+
+ case core.ErrUnderpriced, core.ErrReplaceUnderpriced:
+ underpriced++
+
+ default:
+ otherreject++
+ }
+ }
+ added = append(added, txs[i].Hash())
+ }
+ if direct {
+ txReplyKnownMeter.Mark(duplicate)
+ txReplyUnderpricedMeter.Mark(underpriced)
+ txReplyOtherRejectMeter.Mark(otherreject)
+ } else {
+ txBroadcastKnownMeter.Mark(duplicate)
+ txBroadcastUnderpricedMeter.Mark(underpriced)
+ txBroadcastOtherRejectMeter.Mark(otherreject)
+ }
+ select {
+ case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// Drop should be called when a peer disconnects. It cleans up all the internal
+// data structures of the given node.
+func (f *TxFetcher) Drop(peer string) error {
+ select {
+ case f.drop <- &txDrop{peer: peer}:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// Start boots up the announcement based synchroniser, accepting and processing
+// hash notifications and block fetches until termination requested.
+func (f *TxFetcher) Start() {
+ go f.loop()
+}
+
+// Stop terminates the announcement based synchroniser, canceling all pending
+// operations.
+func (f *TxFetcher) Stop() {
+ close(f.quit)
+}
+
+func (f *TxFetcher) loop() {
+ var (
+ waitTimer = new(mclock.Timer)
+ timeoutTimer = new(mclock.Timer)
+
+ waitTrigger = make(chan struct{}, 1)
+ timeoutTrigger = make(chan struct{}, 1)
+ )
+ for {
+ select {
+ case ann := <-f.notify:
+ // Drop part of the new announcements if there are too many accumulated.
+ // Note, we could but do not filter already known transactions here as
+ // the probability of something arriving between this call and the pre-
+ // filter outside is essentially zero.
+ used := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin])
+ if used >= maxTxAnnounces {
+ // This can happen if a set of transactions are requested but not
+ // all fulfilled, so the remainder are rescheduled without the cap
+ // check. Should be fine as the limit is in the thousands and the
+ // request size in the hundreds.
+ txAnnounceDOSMeter.Mark(int64(len(ann.hashes)))
+ break
+ }
+ want := used + len(ann.hashes)
+ if want > maxTxAnnounces {
+ txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
+ ann.hashes = ann.hashes[:want-maxTxAnnounces]
+ }
+ // All is well, schedule the remainder of the transactions
+ idleWait := len(f.waittime) == 0
+ _, oldPeer := f.announces[ann.origin]
+
+ for _, hash := range ann.hashes {
+ // If the transaction is already downloading, add it to the list
+ // of possible alternates (in case the current retrieval fails) and
+ // also account it for the peer.
+ if f.alternates[hash] != nil {
+ f.alternates[hash][ann.origin] = struct{}{}
+
+ // Stage 2 and 3 share the set of origins per tx
+ if announces := f.announces[ann.origin]; announces != nil {
+ announces[hash] = struct{}{}
+ } else {
+ f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ continue
+ }
+ // If the transaction is not downloading, but is already queued
+ // from a different peer, track it for the new peer too.
+ if f.announced[hash] != nil {
+ f.announced[hash][ann.origin] = struct{}{}
+
+ // Stage 2 and 3 share the set of origins per tx
+ if announces := f.announces[ann.origin]; announces != nil {
+ announces[hash] = struct{}{}
+ } else {
+ f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ continue
+ }
+ // If the transaction is already known to the fetcher, but not
+ // yet downloading, add the peer as an alternate origin in the
+ // waiting list.
+ if f.waitlist[hash] != nil {
+ f.waitlist[hash][ann.origin] = struct{}{}
+
+ if waitslots := f.waitslots[ann.origin]; waitslots != nil {
+ waitslots[hash] = struct{}{}
+ } else {
+ f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ continue
+ }
+ // Transaction unknown to the fetcher, insert it into the waiting list
+ f.waitlist[hash] = map[string]struct{}{ann.origin: struct{}{}}
+ f.waittime[hash] = f.clock.Now()
+
+ if waitslots := f.waitslots[ann.origin]; waitslots != nil {
+ waitslots[hash] = struct{}{}
+ } else {
+ f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ }
+ // If a new item was added to the waitlist, schedule it into the fetcher
+ if idleWait && len(f.waittime) > 0 {
+ f.rescheduleWait(waitTimer, waitTrigger)
+ }
+ // If this peer is new and announced something already queued, maybe
+ // request transactions from them
+ if !oldPeer && len(f.announces[ann.origin]) > 0 {
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: struct{}{}})
+ }
+
+ case <-waitTrigger:
+ // At least one transaction's waiting time ran out, push all expired
+ // ones into the retrieval queues
+ actives := make(map[string]struct{})
+ for hash, instance := range f.waittime {
+ if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout {
+ // Transaction expired without propagation, schedule for retrieval
+ if f.announced[hash] != nil {
+ panic("announce tracker already contains waitlist item")
+ }
+ f.announced[hash] = f.waitlist[hash]
+ for peer := range f.waitlist[hash] {
+ if announces := f.announces[peer]; announces != nil {
+ announces[hash] = struct{}{}
+ } else {
+ f.announces[peer] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ delete(f.waitslots[peer], hash)
+ if len(f.waitslots[peer]) == 0 {
+ delete(f.waitslots, peer)
+ }
+ actives[peer] = struct{}{}
+ }
+ delete(f.waittime, hash)
+ delete(f.waitlist, hash)
+ }
+ }
+ // If transactions are still waiting for propagation, reschedule the wait timer
+ if len(f.waittime) > 0 {
+ f.rescheduleWait(waitTimer, waitTrigger)
+ }
+ // If any peers became active and are idle, request transactions from them
+ if len(actives) > 0 {
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, actives)
+ }
+
+ case <-timeoutTrigger:
+ // Clean up any expired retrievals and avoid re-requesting them from the
+ // same peer (either overloaded or malicious, useless in both cases). We
+ // could also penalize (Drop), but there's nothing to gain, and if could
+ // possibly further increase the load on it.
+ for peer, req := range f.requests {
+ if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout {
+ txRequestTimeoutMeter.Mark(int64(len(req.hashes)))
+
+ // Reschedule all the not-yet-delivered fetches to alternate peers
+ for _, hash := range req.hashes {
+ // Skip rescheduling hashes already delivered by someone else
+ if req.stolen != nil {
+ if _, ok := req.stolen[hash]; ok {
+ continue
+ }
+ }
+ // Move the delivery back from fetching to queued
+ if _, ok := f.announced[hash]; ok {
+ panic("announced tracker already contains alternate item")
+ }
+ if f.alternates[hash] != nil { // nil if tx was broadcast during fetch
+ f.announced[hash] = f.alternates[hash]
+ }
+ delete(f.announced[hash], peer)
+ if len(f.announced[hash]) == 0 {
+ delete(f.announced, hash)
+ }
+ delete(f.announces[peer], hash)
+ delete(f.alternates, hash)
+ delete(f.fetching, hash)
+ }
+ if len(f.announces[peer]) == 0 {
+ delete(f.announces, peer)
+ }
+ // Keep track of the request as dangling, but never expire
+ f.requests[peer].hashes = nil
+ }
+ }
+ // Schedule a new transaction retrieval
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
+
+ // No idea if we sheduled something or not, trigger the timer if needed
+ // TODO(karalabe): this is kind of lame, can't we dump it into scheduleFetches somehow?
+ f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
+
+ case delivery := <-f.cleanup:
+ // Independent if the delivery was direct or broadcast, remove all
+ // traces of the hash from internal trackers
+ for _, hash := range delivery.hashes {
+ if _, ok := f.waitlist[hash]; ok {
+ for peer, txset := range f.waitslots {
+ delete(txset, hash)
+ if len(txset) == 0 {
+ delete(f.waitslots, peer)
+ }
+ }
+ delete(f.waitlist, hash)
+ delete(f.waittime, hash)
+ } else {
+ for peer, txset := range f.announces {
+ delete(txset, hash)
+ if len(txset) == 0 {
+ delete(f.announces, peer)
+ }
+ }
+ delete(f.announced, hash)
+ delete(f.alternates, hash)
+
+ // If a transaction currently being fetched from a different
+ // origin was delivered (delivery stolen), mark it so the
+ // actual delivery won't double schedule it.
+ if origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) {
+ stolen := f.requests[origin].stolen
+ if stolen == nil {
+ f.requests[origin].stolen = make(map[common.Hash]struct{})
+ stolen = f.requests[origin].stolen
+ }
+ stolen[hash] = struct{}{}
+ }
+ delete(f.fetching, hash)
+ }
+ }
+ // In case of a direct delivery, also reschedule anything missing
+ // from the original query
+ if delivery.direct {
+ // Mark the reqesting successful (independent of individual status)
+ txRequestDoneMeter.Mark(int64(len(delivery.hashes)))
+
+ // Make sure something was pending, nuke it
+ req := f.requests[delivery.origin]
+ if req == nil {
+ log.Warn("Unexpected transaction delivery", "peer", delivery.origin)
+ break
+ }
+ delete(f.requests, delivery.origin)
+
+ // Anything not delivered should be re-scheduled (with or without
+ // this peer, depending on the response cutoff)
+ delivered := make(map[common.Hash]struct{})
+ for _, hash := range delivery.hashes {
+ delivered[hash] = struct{}{}
+ }
+ cutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!!
+ for i, hash := range req.hashes {
+ if _, ok := delivered[hash]; ok {
+ cutoff = i
+ }
+ }
+ // Reschedule missing hashes from alternates, not-fulfilled from alt+self
+ for i, hash := range req.hashes {
+ // Skip rescheduling hashes already delivered by someone else
+ if req.stolen != nil {
+ if _, ok := req.stolen[hash]; ok {
+ continue
+ }
+ }
+ if _, ok := delivered[hash]; !ok {
+ if i < cutoff {
+ delete(f.alternates[hash], delivery.origin)
+ delete(f.announces[delivery.origin], hash)
+ if len(f.announces[delivery.origin]) == 0 {
+ delete(f.announces, delivery.origin)
+ }
+ }
+ if len(f.alternates[hash]) > 0 {
+ if _, ok := f.announced[hash]; ok {
+ panic(fmt.Sprintf("announced tracker already contains alternate item: %v", f.announced[hash]))
+ }
+ f.announced[hash] = f.alternates[hash]
+ }
+ }
+ delete(f.alternates, hash)
+ delete(f.fetching, hash)
+ }
+ // Something was delivered, try to rechedule requests
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too
+ }
+
+ case drop := <-f.drop:
+ // A peer was dropped, remove all traces of it
+ if _, ok := f.waitslots[drop.peer]; ok {
+ for hash := range f.waitslots[drop.peer] {
+ delete(f.waitlist[hash], drop.peer)
+ if len(f.waitlist[hash]) == 0 {
+ delete(f.waitlist, hash)
+ delete(f.waittime, hash)
+ }
+ }
+ delete(f.waitslots, drop.peer)
+ if len(f.waitlist) > 0 {
+ f.rescheduleWait(waitTimer, waitTrigger)
+ }
+ }
+ // Clean up any active requests
+ var request *txRequest
+ if request = f.requests[drop.peer]; request != nil {
+ for _, hash := range request.hashes {
+ // Skip rescheduling hashes already delivered by someone else
+ if request.stolen != nil {
+ if _, ok := request.stolen[hash]; ok {
+ continue
+ }
+ }
+ // Undelivered hash, reschedule if there's an alternative origin available
+ delete(f.alternates[hash], drop.peer)
+ if len(f.alternates[hash]) == 0 {
+ delete(f.alternates, hash)
+ } else {
+ f.announced[hash] = f.alternates[hash]
+ delete(f.alternates, hash)
+ }
+ delete(f.fetching, hash)
+ }
+ delete(f.requests, drop.peer)
+ }
+ // Clean up general announcement tracking
+ if _, ok := f.announces[drop.peer]; ok {
+ for hash := range f.announces[drop.peer] {
+ delete(f.announced[hash], drop.peer)
+ if len(f.announced[hash]) == 0 {
+ delete(f.announced, hash)
+ }
+ }
+ delete(f.announces, drop.peer)
+ }
+ // If a request was cancelled, check if anything needs to be rescheduled
+ if request != nil {
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
+ f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
+ }
+
+ case <-f.quit:
+ return
+ }
+ // No idea what happened, but bump some sanity metrics
+ txFetcherWaitingPeers.Update(int64(len(f.waitslots)))
+ txFetcherWaitingHashes.Update(int64(len(f.waitlist)))
+ txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests)))
+ txFetcherQueueingHashes.Update(int64(len(f.announced)))
+ txFetcherFetchingPeers.Update(int64(len(f.requests)))
+ txFetcherFetchingHashes.Update(int64(len(f.fetching)))
+
+ // Loop did something, ping the step notifier if needed (tests)
+ if f.step != nil {
+ f.step <- struct{}{}
+ }
+ }
+}
+
+// rescheduleWait iterates over all the transactions currently in the waitlist
+// and schedules the movement into the fetcher for the earliest.
+//
+// The method has a granularity of 'gatherSlack', since there's not much point in
+// spinning over all the transactions just to maybe find one that should trigger
+// a few ms earlier.
+func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {
+ if *timer != nil {
+ (*timer).Stop()
+ }
+ now := f.clock.Now()
+
+ earliest := now
+ for _, instance := range f.waittime {
+ if earliest > instance {
+ earliest = instance
+ if txArriveTimeout-time.Duration(now-earliest) < gatherSlack {
+ break
+ }
+ }
+ }
+ *timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() {
+ trigger <- struct{}{}
+ })
+}
+
+// rescheduleTimeout iterates over all the transactions currently in flight and
+// schedules a cleanup run when the first would trigger.
+//
+// The method has a granularity of 'gatherSlack', since there's not much point in
+// spinning over all the transactions just to maybe find one that should trigger
+// a few ms earlier.
+//
+// This method is a bit "flaky" "by design". In theory the timeout timer only ever
+// should be rescheduled if some request is pending. In practice, a timeout will
+// cause the timer to be rescheduled every 5 secs (until the peer comes through or
+// disconnects). This is a limitation of the fetcher code because we don't trac
+// pending requests and timed out requests separatey. Without double tracking, if
+// we simply didn't reschedule the timer on all-timeout then the timer would never
+// be set again since len(request) > 0 => something's running.
+func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) {
+ if *timer != nil {
+ (*timer).Stop()
+ }
+ now := f.clock.Now()
+
+ earliest := now
+ for _, req := range f.requests {
+ // If this request already timed out, skip it altogether
+ if req.hashes == nil {
+ continue
+ }
+ if earliest > req.time {
+ earliest = req.time
+ if txFetchTimeout-time.Duration(now-earliest) < gatherSlack {
+ break
+ }
+ }
+ }
+ *timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() {
+ trigger <- struct{}{}
+ })
+}
+
+// scheduleFetches starts a batch of retrievals for all available idle peers.
+func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) {
+ // Gather the set of peers we want to retrieve from (default to all)
+ actives := whitelist
+ if actives == nil {
+ actives = make(map[string]struct{})
+ for peer := range f.announces {
+ actives[peer] = struct{}{}
+ }
+ }
+ if len(actives) == 0 {
+ return
+ }
+ // For each active peer, try to schedule some transaction fetches
+ idle := len(f.requests) == 0
+
+ f.forEachPeer(actives, func(peer string) {
+ if f.requests[peer] != nil {
+ return // continue in the for-each
+ }
+ if len(f.announces[peer]) == 0 {
+ return // continue in the for-each
+ }
+ hashes := make([]common.Hash, 0, maxTxRetrievals)
+ f.forEachHash(f.announces[peer], func(hash common.Hash) bool {
+ if _, ok := f.fetching[hash]; !ok {
+ // Mark the hash as fetching and stash away possible alternates
+ f.fetching[hash] = peer
+
+ if _, ok := f.alternates[hash]; ok {
+ panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash]))
+ }
+ f.alternates[hash] = f.announced[hash]
+ delete(f.announced, hash)
+
+ // Accumulate the hash and stop if the limit was reached
+ hashes = append(hashes, hash)
+ if len(hashes) >= maxTxRetrievals {
+ return false // break in the for-each
+ }
+ }
+ return true // continue in the for-each
+ })
+ // If any hashes were allocated, request them from the peer
+ if len(hashes) > 0 {
+ f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}
+ txRequestOutMeter.Mark(int64(len(hashes)))
+
+ go func(peer string, hashes []common.Hash) {
+ // Try to fetch the transactions, but in case of a request
+ // failure (e.g. peer disconnected), reschedule the hashes.
+ if err := f.fetchTxs(peer, hashes); err != nil {
+ txRequestFailMeter.Mark(int64(len(hashes)))
+ f.Drop(peer)
+ }
+ }(peer, hashes)
+ }
+ })
+ // If a new request was fired, schedule a timeout timer
+ if idle && len(f.requests) > 0 {
+ f.rescheduleTimeout(timer, timeout)
+ }
+}
+
+// forEachPeer does a range loop over a map of peers in production, but during
+// testing it does a deterministic sorted random to allow reproducing issues.
+func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) {
+ // If we're running production, use whatever Go's map gives us
+ if f.rand == nil {
+ for peer := range peers {
+ do(peer)
+ }
+ return
+ }
+ // We're running the test suite, make iteration deterministic
+ list := make([]string, 0, len(peers))
+ for peer := range peers {
+ list = append(list, peer)
+ }
+ sort.Strings(list)
+ rotateStrings(list, f.rand.Intn(len(list)))
+ for _, peer := range list {
+ do(peer)
+ }
+}
+
+// forEachHash does a range loop over a map of hashes in production, but during
+// testing it does a deterministic sorted random to allow reproducing issues.
+func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) {
+ // If we're running production, use whatever Go's map gives us
+ if f.rand == nil {
+ for hash := range hashes {
+ if !do(hash) {
+ return
+ }
+ }
+ return
+ }
+ // We're running the test suite, make iteration deterministic
+ list := make([]common.Hash, 0, len(hashes))
+ for hash := range hashes {
+ list = append(list, hash)
+ }
+ sortHashes(list)
+ rotateHashes(list, f.rand.Intn(len(list)))
+ for _, hash := range list {
+ if !do(hash) {
+ return
+ }
+ }
+}
+
+// rotateStrings rotates the contents of a slice by n steps. This method is only
+// used in tests to simulate random map iteration but keep it deterministic.
+func rotateStrings(slice []string, n int) {
+ orig := make([]string, len(slice))
+ copy(orig, slice)
+
+ for i := 0; i < len(orig); i++ {
+ slice[i] = orig[(i+n)%len(orig)]
+ }
+}
+
+// sortHashes sorts a slice of hashes. This method is only used in tests in order
+// to simulate random map iteration but keep it deterministic.
+func sortHashes(slice []common.Hash) {
+ for i := 0; i < len(slice); i++ {
+ for j := i + 1; j < len(slice); j++ {
+ if bytes.Compare(slice[i][:], slice[j][:]) > 0 {
+ slice[i], slice[j] = slice[j], slice[i]
+ }
+ }
+ }
+}
+
+// rotateHashes rotates the contents of a slice by n steps. This method is only
+// used in tests to simulate random map iteration but keep it deterministic.
+func rotateHashes(slice []common.Hash, n int) {
+ orig := make([]common.Hash, len(slice))
+ copy(orig, slice)
+
+ for i := 0; i < len(orig); i++ {
+ slice[i] = orig[(i+n)%len(orig)]
+ }
+}
diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go
new file mode 100644
index 000000000000..cf375b8356eb
--- /dev/null
+++ b/eth/fetcher/tx_fetcher_test.go
@@ -0,0 +1,1528 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package fetcher
+
+import (
+ "errors"
+ "math/big"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/common/mclock"
+ "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/types"
+)
+
+var (
+ // testTxs is a set of transactions to use during testing that have meaninful hashes.
+ testTxs = []*types.Transaction{
+ types.NewTransaction(5577006791947779410, common.Address{0x0f}, new(big.Int), 0, new(big.Int), nil),
+ types.NewTransaction(15352856648520921629, common.Address{0xbb}, new(big.Int), 0, new(big.Int), nil),
+ types.NewTransaction(3916589616287113937, common.Address{0x86}, new(big.Int), 0, new(big.Int), nil),
+ types.NewTransaction(9828766684487745566, common.Address{0xac}, new(big.Int), 0, new(big.Int), nil),
+ }
+ // testTxsHashes is the hashes of the test transactions above
+ testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}
+)
+
+type doTxNotify struct {
+ peer string
+ hashes []common.Hash
+}
+type doTxEnqueue struct {
+ peer string
+ txs []*types.Transaction
+ direct bool
+}
+type doWait struct {
+ time time.Duration
+ step bool
+}
+type doDrop string
+type doFunc func()
+
+type isWaiting map[string][]common.Hash
+type isScheduled struct {
+ tracking map[string][]common.Hash
+ fetching map[string][]common.Hash
+ dangling map[string][]common.Hash
+}
+type isUnderpriced int
+
+// txFetcherTest represents a test scenario that can be executed by the test
+// runner.
+type txFetcherTest struct {
+ init func() *TxFetcher
+ steps []interface{}
+}
+
+// Tests that transaction announcements are added to a waitlist, and none
+// of them are scheduled for retrieval until the wait expires.
+func TestTransactionFetcherWaiting(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Initial announcement to get something into the waitlist
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ // Announce from a new peer to check that no overwrite happens
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x03}, {0x04}},
+ }),
+ // Announce clashing hashes but unique new peer
+ doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}},
+ }),
+ // Announce existing and clashing hashes from existing peer
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x05}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ // Wait for the arrival timeout which should move all expired items
+ // from the wait list to the scheduler
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x05}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}},
+ },
+ fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer
+ "A": {{0x02}, {0x03}, {0x05}},
+ "C": {{0x01}, {0x04}},
+ },
+ },
+ // Queue up a non-fetchable transaction and then trigger it with a new
+ // peer (weird case to test 1 line in the fetcher)
+ doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}},
+ isWaiting(map[string][]common.Hash{
+ "C": {{0x06}, {0x07}},
+ }),
+ doWait{time: txArriveTimeout, step: true},
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x05}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}, {0x06}, {0x07}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x02}, {0x03}, {0x05}},
+ "C": {{0x01}, {0x04}},
+ },
+ },
+ doTxNotify{peer: "D", hashes: []common.Hash{{0x06}, {0x07}}},
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x05}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}, {0x06}, {0x07}},
+ "D": {{0x06}, {0x07}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x02}, {0x03}, {0x05}},
+ "C": {{0x01}, {0x04}},
+ "D": {{0x06}, {0x07}},
+ },
+ },
+ },
+ })
+}
+
+// Tests that transaction announcements skip the waiting list if they are
+// already scheduled.
+func TestTransactionFetcherSkipWaiting(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Announce overlaps from the same peer, ensure the new ones end up
+ // in stage one, and clashing ones don't get double tracked
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x02}, {0x03}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x03}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Announce overlaps from a new peer, ensure new transactions end up
+ // in stage one and clashing ones get tracked for the new peer
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x04}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x03}},
+ "B": {{0x03}, {0x04}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ },
+ })
+}
+
+// Tests that only a single transaction request gets scheduled to a peer
+// and subsequent announces block or get allotted to someone else.
+func TestTransactionFetcherSingletonRequesting(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Announce a new set of transactions from the same peer and ensure
+ // they do not start fetching since the peer is already busy
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x03}, {0x04}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x03}, {0x04}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x04}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Announce a duplicate set of transactions from a new peer and ensure
+ // uniquely new ones start downloading, even if clashing.
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x05}, {0x06}}},
+ isWaiting(map[string][]common.Hash{
+ "B": {{0x05}, {0x06}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x04}},
+ "B": {{0x02}, {0x03}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x03}},
+ },
+ },
+ },
+ })
+}
+
+// Tests that if a transaction retrieval fails, all the transactions get
+// instantly schedule back to someone else or the announcements dropped
+// if no alternate source is available.
+func TestTransactionFetcherFailedRescheduling(t *testing.T) {
+ // Create a channel to control when tx requests can fail
+ proceed := make(chan struct{})
+
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(origin string, hashes []common.Hash) error {
+ <-proceed
+ return errors.New("peer disconnected")
+ },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // While the original peer is stuck in the request, push in an second
+ // data source.
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Wait until the original request fails and check that transactions
+ // are either rescheduled or dropped
+ doFunc(func() {
+ proceed <- struct{}{} // Allow peer A to return the failure
+ }),
+ doWait{time: 0, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "B": {{0x02}},
+ },
+ },
+ doFunc(func() {
+ proceed <- struct{}{} // Allow peer B to return the failure
+ }),
+ doWait{time: 0, step: true},
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that if a transaction retrieval succeeds, all alternate origins
+// are cleaned up.
+func TestTransactionFetcherCleanup(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Request should be delivered
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true},
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that if a transaction retrieval succeeds, but the response is empty (no
+// transactions available, then all are nuked instead of being rescheduled (yes,
+// this was a bug)).
+func TestTransactionFetcherCleanupEmpty(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Deliver an empty response and ensure the transaction is cleared, not rescheduled
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{}, direct: true},
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that non-returned transactions are either re-sheduled from a
+// different peer, or self if they are after the cutoff point.
+func TestTransactionFetcherMissingRescheduling(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
+ },
+ },
+ // Deliver the middle transaction requested, the one before which
+ // should be dropped and the one after re-requested.
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, // This depends on the deterministic random
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ },
+ },
+ },
+ })
+}
+
+// Tests that out of two transactions, if one is missing and the last is
+// delivered, the peer gets properly cleaned out from the internal state.
+func TestTransactionFetcherMissingCleanup(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ },
+ },
+ // Deliver the middle transaction requested, the one before which
+ // should be dropped and the one after re-requested.
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, // This depends on the deterministic random
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that transaction broadcasts properly clean up announcements.
+func TestTransactionFetcherBroadcasts(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Set up three transactions to be in different stats, waiting, queued and fetching
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
+
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Broadcast all the transactions and ensure everything gets cleaned
+ // up, but the dangling request is left alone to avoid doing multiple
+ // concurrent requests.
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: false},
+ isWaiting(nil),
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Deliver the requested hashes
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: true},
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that the waiting list timers properly reset and reschedule.
+func TestTransactionFetcherWaitTimerResets(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}},
+ }),
+ isScheduled{nil, nil, nil},
+ doWait{time: txArriveTimeout / 2, step: false},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}},
+ }),
+ isScheduled{nil, nil, nil},
+
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ isScheduled{nil, nil, nil},
+ doWait{time: txArriveTimeout / 2, step: true},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x02}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ },
+
+ doWait{time: txArriveTimeout / 2, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ },
+ },
+ })
+}
+
+// Tests that if a transaction request is not replied to, it will time
+// out and be re-scheduled for someone else.
+func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Wait until the delivery times out, everything should be cleaned up
+ doWait{time: txFetchTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {},
+ },
+ },
+ // Ensure that followup announcements don't get scheduled
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
+ doWait{time: txArriveTimeout, step: true},
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[1]},
+ },
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {},
+ },
+ },
+ // If the dangling request arrives a bit later, do not choke
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[1]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[1]},
+ },
+ },
+ },
+ })
+}
+
+// Tests that the fetching timeout timers properly reset and reschedule.
+func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}},
+ doWait{time: txArriveTimeout, step: true},
+
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}},
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ "B": {{0x02}},
+ },
+ },
+ doWait{time: txFetchTimeout - txArriveTimeout, step: true},
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "B": {{0x02}},
+ },
+ dangling: map[string][]common.Hash{
+ "A": {},
+ },
+ },
+ doWait{time: txArriveTimeout, step: true},
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {},
+ "B": {},
+ },
+ },
+ },
+ })
+}
+
+// Tests that if thousands of transactions are announces, only a small
+// number of them will be requested at a time.
+func TestTransactionFetcherRateLimiting(t *testing.T) {
+ // Create a slew of transactions and to announce them
+ var hashes []common.Hash
+ for i := 0; i < maxTxAnnounces; i++ {
+ hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)})
+ }
+
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Announce all the transactions, wait a bit and ensure only a small
+ // percentage gets requested
+ doTxNotify{peer: "A", hashes: hashes},
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": hashes,
+ },
+ fetching: map[string][]common.Hash{
+ "A": hashes[1643 : 1643+maxTxRetrievals],
+ },
+ },
+ },
+ })
+}
+
+// Tests that then number of transactions a peer is allowed to announce and/or
+// request at the same time is hard capped.
+func TestTransactionFetcherDoSProtection(t *testing.T) {
+ // Create a slew of transactions and to announce them
+ var hashesA []common.Hash
+ for i := 0; i < maxTxAnnounces+1; i++ {
+ hashesA = append(hashesA, common.Hash{0x01, byte(i / 256), byte(i % 256)})
+ }
+ var hashesB []common.Hash
+ for i := 0; i < maxTxAnnounces+1; i++ {
+ hashesB = append(hashesB, common.Hash{0x02, byte(i / 256), byte(i % 256)})
+ }
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Announce half of the transaction and wait for them to be scheduled
+ doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2]},
+ doTxNotify{peer: "B", hashes: hashesB[:maxTxAnnounces/2-1]},
+ doWait{time: txArriveTimeout, step: true},
+
+ // Announce the second half and keep them in the wait list
+ doTxNotify{peer: "A", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces]},
+ doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1]},
+
+ // Ensure the hashes are split half and half
+ isWaiting(map[string][]common.Hash{
+ "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces],
+ "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1],
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": hashesA[:maxTxAnnounces/2],
+ "B": hashesB[:maxTxAnnounces/2-1],
+ },
+ fetching: map[string][]common.Hash{
+ "A": hashesA[1643 : 1643+maxTxRetrievals],
+ "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
+ },
+ },
+ // Ensure that adding even one more hash results in dropping the hash
+ doTxNotify{peer: "A", hashes: []common.Hash{hashesA[maxTxAnnounces]}},
+ doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces-1 : maxTxAnnounces+1]},
+
+ isWaiting(map[string][]common.Hash{
+ "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces],
+ "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces],
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": hashesA[:maxTxAnnounces/2],
+ "B": hashesB[:maxTxAnnounces/2-1],
+ },
+ fetching: map[string][]common.Hash{
+ "A": hashesA[1643 : 1643+maxTxRetrievals],
+ "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
+ },
+ },
+ },
+ })
+}
+
+// Tests that underpriced transactions don't get rescheduled after being rejected.
+func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ errs := make([]error, len(txs))
+ for i := 0; i < len(errs); i++ {
+ if i%2 == 0 {
+ errs[i] = core.ErrUnderpriced
+ } else {
+ errs[i] = core.ErrReplaceUnderpriced
+ }
+ }
+ return errs
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Deliver a transaction through the fetcher, but reject as underpriced
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true},
+ isScheduled{nil, nil, nil},
+
+ // Try to announce the transaction again, ensure it's not scheduled back
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, // [2] is needed to force a step in the fetcher
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ }),
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that underpriced transactions don't get rescheduled after being rejected,
+// but at the same time there's a hard cap on the number of transactions that are
+// tracked.
+func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
+ // Temporarily disable fetch timeouts as they massively mess up the simulated clock
+ defer func(timeout time.Duration) { txFetchTimeout = timeout }(txFetchTimeout)
+ txFetchTimeout = 24 * time.Hour
+
+ // Create a slew of transactions to max out the underpriced set
+ var txs []*types.Transaction
+ for i := 0; i < maxTxUnderpricedSetSize+1; i++ {
+ txs = append(txs, types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil))
+ }
+ hashes := make([]common.Hash, len(txs))
+ for i, tx := range txs {
+ hashes[i] = tx.Hash()
+ }
+ // Generate a set of steps to announce and deliver the entire set of transactions
+ var steps []interface{}
+ for i := 0; i < maxTxUnderpricedSetSize/maxTxRetrievals; i++ {
+ steps = append(steps, doTxNotify{peer: "A", hashes: hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals]})
+ steps = append(steps, isWaiting(map[string][]common.Hash{
+ "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
+ }))
+ steps = append(steps, doWait{time: txArriveTimeout, step: true})
+ steps = append(steps, isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
+ },
+ fetching: map[string][]common.Hash{
+ "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
+ },
+ })
+ steps = append(steps, doTxEnqueue{peer: "A", txs: txs[i*maxTxRetrievals : (i+1)*maxTxRetrievals], direct: true})
+ steps = append(steps, isWaiting(nil))
+ steps = append(steps, isScheduled{nil, nil, nil})
+ steps = append(steps, isUnderpriced((i+1)*maxTxRetrievals))
+ }
+ testTransactionFetcher(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ errs := make([]error, len(txs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = core.ErrUnderpriced
+ }
+ return errs
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: append(steps, []interface{}{
+ // The preparation of the test has already been done in `steps`, add the last check
+ doTxNotify{peer: "A", hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true},
+ isUnderpriced(maxTxUnderpricedSetSize),
+ }...),
+ })
+}
+
+// Tests that unexpected deliveries don't corrupt the internal state.
+func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Deliver something out of the blue
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: false},
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+
+ // Set up a few hashes into various stages
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
+
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Deliver everything and more out of the blue
+ doTxEnqueue{peer: "B", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, direct: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ },
+ })
+}
+
+// Tests that dropping a peer cleans out all internal data structures in all the
+// live or danglng stages.
+func TestTransactionFetcherDrop(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Set up a few hashes into various stages
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x03}}},
+
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x03}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ },
+ // Drop the peer and ensure everything's cleaned out
+ doDrop("A"),
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+
+ // Push the node into a dangling (timeout) state
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ doWait{time: txFetchTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {},
+ },
+ },
+ // Drop the peer and ensure everything's cleaned out
+ doDrop("A"),
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that dropping a peer instantly reschedules failed announcements to any
+// available peer.
+func TestTransactionFetcherDropRescheduling(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Set up a few hashes into various stages
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x01}}},
+
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}},
+ "B": {{0x01}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ },
+ // Drop the peer and ensure everything's cleaned out
+ doDrop("A"),
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "B": {{0x01}},
+ },
+ fetching: map[string][]common.Hash{
+ "B": {{0x01}},
+ },
+ },
+ },
+ })
+}
+
+// This test reproduces a crash caught by the fuzzer. The root cause was a
+// dangling transaction timing out and clashing on readd with a concurrently
+// announced one.
+func TestTransactionFetcherFuzzCrash01(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Get a transaction into fetching mode and make it dangling with a broadcast
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
+
+ // Notify the dangling transaction once more and crash via a timeout
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txFetchTimeout, step: true},
+ },
+ })
+}
+
+// This test reproduces a crash caught by the fuzzer. The root cause was a
+// dangling transaction getting peer-dropped and clashing on readd with a
+// concurrently announced one.
+func TestTransactionFetcherFuzzCrash02(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Get a transaction into fetching mode and make it dangling with a broadcast
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
+
+ // Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout
+ doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doDrop("A"),
+ doWait{time: txFetchTimeout, step: true},
+ },
+ })
+}
+
+// This test reproduces a crash caught by the fuzzer. The root cause was a
+// dangling transaction getting rescheduled via a partial delivery, clashing
+// with a concurrent notify.
+func TestTransactionFetcherFuzzCrash03(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Get a transaction into fetching mode and make it dangling with a broadcast
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
+ doWait{time: txFetchTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}},
+
+ // Notify the dangling transaction once more, partially deliver, clash&crash with a timeout
+ doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true},
+ doWait{time: txFetchTimeout, step: true},
+ },
+ })
+}
+
+// This test reproduces a crash caught by the fuzzer. The root cause was a
+// dangling transaction getting rescheduled via a disconnect, clashing with
+// a concurrent notify.
+func TestTransactionFetcherFuzzCrash04(t *testing.T) {
+ // Create a channel to control when tx requests can fail
+ proceed := make(chan struct{})
+
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error {
+ <-proceed
+ return errors.New("peer disconnected")
+ },
+ )
+ },
+ steps: []interface{}{
+ // Get a transaction into fetching mode and make it dangling with a broadcast
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
+
+ // Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect
+ doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doFunc(func() {
+ proceed <- struct{}{} // Allow peer A to return the failure
+ }),
+ doWait{time: 0, step: true},
+ doWait{time: txFetchTimeout, step: true},
+ },
+ })
+}
+
+func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) {
+ t.Parallel()
+ testTransactionFetcher(t, tt)
+}
+
+func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
+ // Create a fetcher and hook into it's simulated fields
+ clock := new(mclock.Simulated)
+ wait := make(chan struct{})
+
+ fetcher := tt.init()
+ fetcher.clock = clock
+ fetcher.step = wait
+ fetcher.rand = rand.New(rand.NewSource(0x3a29))
+
+ fetcher.Start()
+ defer fetcher.Stop()
+
+ // Crunch through all the test steps and execute them
+ for i, step := range tt.steps {
+ switch step := step.(type) {
+ case doTxNotify:
+ if err := fetcher.Notify(step.peer, step.hashes); err != nil {
+ t.Errorf("step %d: %v", i, err)
+ }
+ <-wait // Fetcher needs to process this, wait until it's done
+ select {
+ case <-wait:
+ panic("wtf")
+ case <-time.After(time.Millisecond):
+ }
+
+ case doTxEnqueue:
+ if err := fetcher.Enqueue(step.peer, step.txs, step.direct); err != nil {
+ t.Errorf("step %d: %v", i, err)
+ }
+ <-wait // Fetcher needs to process this, wait until it's done
+
+ case doWait:
+ clock.Run(step.time)
+ if step.step {
+ <-wait // Fetcher supposed to do something, wait until it's done
+ }
+
+ case doDrop:
+ if err := fetcher.Drop(string(step)); err != nil {
+ t.Errorf("step %d: %v", i, err)
+ }
+ <-wait // Fetcher needs to process this, wait until it's done
+
+ case doFunc:
+ step()
+
+ case isWaiting:
+ // We need to check that the waiting list (stage 1) internals
+ // match with the expected set. Check the peer->hash mappings
+ // first.
+ for peer, hashes := range step {
+ waiting := fetcher.waitslots[peer]
+ if waiting == nil {
+ t.Errorf("step %d: peer %s missing from waitslots", i, peer)
+ continue
+ }
+ for _, hash := range hashes {
+ if _, ok := waiting[hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, hash)
+ }
+ }
+ for hash := range waiting {
+ if !containsHash(hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x extra in waitslots", i, peer, hash)
+ }
+ }
+ }
+ for peer := range fetcher.waitslots {
+ if _, ok := step[peer]; !ok {
+ t.Errorf("step %d: peer %s extra in waitslots", i, peer)
+ }
+ }
+ // Peer->hash sets correct, check the hash->peer and timeout sets
+ for peer, hashes := range step {
+ for _, hash := range hashes {
+ if _, ok := fetcher.waitlist[hash][peer]; !ok {
+ t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, hash, peer)
+ }
+ if _, ok := fetcher.waittime[hash]; !ok {
+ t.Errorf("step %d: hash %x missing from waittime", i, hash)
+ }
+ }
+ }
+ for hash, peers := range fetcher.waitlist {
+ if len(peers) == 0 {
+ t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash)
+ }
+ for peer := range peers {
+ if !containsHash(step[peer], hash) {
+ t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer)
+ }
+ }
+ }
+ for hash := range fetcher.waittime {
+ var found bool
+ for _, hashes := range step {
+ if containsHash(hashes, hash) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("step %d,: hash %x extra in waittime", i, hash)
+ }
+ }
+
+ case isScheduled:
+ // Check that all scheduled announces are accounted for and no
+ // extra ones are present.
+ for peer, hashes := range step.tracking {
+ scheduled := fetcher.announces[peer]
+ if scheduled == nil {
+ t.Errorf("step %d: peer %s missing from announces", i, peer)
+ continue
+ }
+ for _, hash := range hashes {
+ if _, ok := scheduled[hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, hash)
+ }
+ }
+ for hash := range scheduled {
+ if !containsHash(hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x extra in announces", i, peer, hash)
+ }
+ }
+ }
+ for peer := range fetcher.announces {
+ if _, ok := step.tracking[peer]; !ok {
+ t.Errorf("step %d: peer %s extra in announces", i, peer)
+ }
+ }
+ // Check that all announces required to be fetching are in the
+ // appropriate sets
+ for peer, hashes := range step.fetching {
+ request := fetcher.requests[peer]
+ if request == nil {
+ t.Errorf("step %d: peer %s missing from requests", i, peer)
+ continue
+ }
+ for _, hash := range hashes {
+ if !containsHash(request.hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
+ }
+ }
+ for _, hash := range request.hashes {
+ if !containsHash(hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
+ }
+ }
+ }
+ for peer := range fetcher.requests {
+ if _, ok := step.fetching[peer]; !ok {
+ if _, ok := step.dangling[peer]; !ok {
+ t.Errorf("step %d: peer %s extra in requests", i, peer)
+ }
+ }
+ }
+ for peer, hashes := range step.fetching {
+ for _, hash := range hashes {
+ if _, ok := fetcher.fetching[hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x missing from fetching", i, peer, hash)
+ }
+ }
+ }
+ for hash := range fetcher.fetching {
+ var found bool
+ for _, req := range fetcher.requests {
+ if containsHash(req.hashes, hash) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("step %d: hash %x extra in fetching", i, hash)
+ }
+ }
+ for _, hashes := range step.fetching {
+ for _, hash := range hashes {
+ alternates := fetcher.alternates[hash]
+ if alternates == nil {
+ t.Errorf("step %d: hash %x missing from alternates", i, hash)
+ continue
+ }
+ for peer := range alternates {
+ if _, ok := fetcher.announces[peer]; !ok {
+ t.Errorf("step %d: peer %s extra in alternates", i, peer)
+ continue
+ }
+ if _, ok := fetcher.announces[peer][hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x extra in alternates", i, hash, peer)
+ continue
+ }
+ }
+ for p := range fetcher.announced[hash] {
+ if _, ok := alternates[p]; !ok {
+ t.Errorf("step %d, hash %x: peer %s missing from alternates", i, hash, p)
+ continue
+ }
+ }
+ }
+ }
+ for peer, hashes := range step.dangling {
+ request := fetcher.requests[peer]
+ if request == nil {
+ t.Errorf("step %d: peer %s missing from requests", i, peer)
+ continue
+ }
+ for _, hash := range hashes {
+ if !containsHash(request.hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
+ }
+ }
+ for _, hash := range request.hashes {
+ if !containsHash(hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
+ }
+ }
+ }
+ // Check that all transaction announces that are scheduled for
+ // retrieval but not actively being downloaded are tracked only
+ // in the stage 2 `announced` map.
+ var queued []common.Hash
+ for _, hashes := range step.tracking {
+ for _, hash := range hashes {
+ var found bool
+ for _, hs := range step.fetching {
+ if containsHash(hs, hash) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ queued = append(queued, hash)
+ }
+ }
+ }
+ for _, hash := range queued {
+ if _, ok := fetcher.announced[hash]; !ok {
+ t.Errorf("step %d: hash %x missing from announced", i, hash)
+ }
+ }
+ for hash := range fetcher.announced {
+ if !containsHash(queued, hash) {
+ t.Errorf("step %d: hash %x extra in announced", i, hash)
+ }
+ }
+
+ case isUnderpriced:
+ if fetcher.underpriced.Cardinality() != int(step) {
+ t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Cardinality(), step)
+ }
+
+ default:
+ t.Fatalf("step %d: unknown step type %T", i, step)
+ }
+ // After every step, cross validate the internal uniqueness invariants
+ // between stage one and stage two.
+ for hash := range fetcher.waittime {
+ if _, ok := fetcher.announced[hash]; ok {
+ t.Errorf("step %d: hash %s present in both stage 1 and 2", i, hash)
+ }
+ }
+ }
+}
+
+// containsHash returns whether a hash is contained within a hash slice.
+func containsHash(slice []common.Hash, hash common.Hash) bool {
+ for _, have := range slice {
+ if have == hash {
+ return true
+ }
+ }
+ return false
+}
diff --git a/eth/handler.go b/eth/handler.go
index 76733fb8d863..1f56f901bd01 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -20,18 +20,18 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math"
"math/big"
"sync"
"sync/atomic"
"time"
- lru "github.com/hashicorp/golang-lru"
-
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/consensus"
"github.com/XinFinOrg/XDPoSChain/consensus/XDPoS"
"github.com/XinFinOrg/XDPoSChain/consensus/misc"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/forkid"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/eth/bft"
"github.com/XinFinOrg/XDPoSChain/eth/downloader"
@@ -40,9 +40,10 @@ import (
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/params"
"github.com/XinFinOrg/XDPoSChain/rlp"
+ lru "github.com/hashicorp/golang-lru"
)
const (
@@ -52,26 +53,30 @@ const (
// txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool.
txChanSize = 4096
+
+ // minimim number of peers to broadcast entire blocks and transactions too.
+ minBroadcastPeers = 4
)
var (
daoChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the DAO handshake challenge
)
-// errIncompatibleConfig is returned if the requested protocols and configs are
-// not compatible (low protocol version restrictions and high requirements).
-var errIncompatibleConfig = errors.New("incompatible configuration")
-
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
}
type ProtocolManager struct {
networkId uint64
+ // networkID uint64
+ forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
+ checkpointNumber uint64 // Block number for the sync progress validator to cross reference
+ checkpointHash common.Hash // Block hash for the sync progress validator to cross reference
+
txpool txPool
orderpool orderPool
lendingpool lendingPool
@@ -79,12 +84,11 @@ type ProtocolManager struct {
chainconfig *params.ChainConfig
maxPeers int
- downloader *downloader.Downloader
- fetcher *fetcher.Fetcher
- peers *peerSet
- bft *bft.Bfter
-
- SubProtocols []p2p.Protocol
+ downloader *downloader.Downloader
+ blockFetcher *fetcher.BlockFetcher
+ txFetcher *fetcher.TxFetcher
+ peers *peerSet
+ bft *bft.Bfter
eventMux *event.TypeMux
txsCh chan core.NewTxsEvent
@@ -112,6 +116,9 @@ type ProtocolManager struct {
knownVotes *lru.Cache
knownSyncInfos *lru.Cache
knownTimeouts *lru.Cache
+
+ // Test fields or hooks
+ broadcastTxAnnouncesOnly bool // Testing field, disable transaction propagation
}
// NewProtocolManagerEx add order pool to protocol
@@ -138,11 +145,13 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
// Create the protocol manager with the base fields
manager := &ProtocolManager{
- networkId: networkID,
- eventMux: mux,
- txpool: txpool,
- blockchain: blockchain,
- chainconfig: config,
+ networkId: networkID,
+ forkFilter: forkid.NewFilter(blockchain),
+ eventMux: mux,
+ txpool: txpool,
+ blockchain: blockchain,
+ chainconfig: config,
+ // whitelist: whitelist,
peers: newPeerSet(),
newPeerCh: make(chan *peer),
noMorePeers: make(chan struct{}),
@@ -167,44 +176,53 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
if mode == downloader.FastSync {
manager.fastSync = uint32(1)
}
- // Initiate a sub-protocol for every implemented version we can handle
- manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
- for i, version := range ProtocolVersions {
- // Skip protocol version if incompatible with the mode of operation
- if mode == downloader.FastSync && version < eth63 {
- continue
- }
- // Compatible; initialise the sub-protocol
- version := version // Closure for the run
- manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
- Name: ProtocolName,
- Version: version,
- Length: ProtocolLengths[i],
- Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
- peer := manager.newPeer(int(version), p, rw)
- select {
- case manager.newPeerCh <- peer:
- manager.wg.Add(1)
- defer manager.wg.Done()
- return manager.handle(peer)
- case <-manager.quitSync:
- return p2p.DiscQuitting
- }
- },
- NodeInfo: func() interface{} {
- return manager.NodeInfo()
- },
- PeerInfo: func(id discover.NodeID) interface{} {
- if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
- return p.Info()
- }
- return nil
- },
- })
- }
- if len(manager.SubProtocols) == 0 {
- return nil, errIncompatibleConfig
- }
+ // // Initiate a sub-protocol for every implemented version we can handle
+ // manager.SubProtocols = make([]p2p.Protocol, 0, len(ProtocolVersions))
+ // for i, version := range ProtocolVersions {
+ // // Skip protocol version if incompatible with the mode of operation
+ // if mode == downloader.FastSync && version < eth63 {
+ // continue
+ // }
+ // // Compatible; initialise the sub-protocol
+ // version := version // Closure for the run
+ // manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
+ // Name: ProtocolName,
+ // Version: version,
+ // Length: ProtocolLengths[i],
+ // Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
+ // peer := manager.newPeer(int(version), p, rw)
+ // select {
+ // case manager.newPeerCh <- peer:
+ // manager.wg.Add(1)
+ // defer manager.wg.Done()
+ // return manager.handle(peer)
+ // case <-manager.quitSync:
+ // return p2p.DiscQuitting
+ // }
+ // },
+ // NodeInfo: func() interface{} {
+ // return manager.NodeInfo()
+ // },
+ // PeerInfo: func(id enode.ID) interface{} {
+ // if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
+ // return p.Info()
+ // }
+ // return nil
+ // },
+ // })
+ // }
+ // if len(manager.SubProtocols) == 0 {
+ // return nil, errIncompatibleConfig
+ // }
+
+ // // Construct the downloader (long sync) and its backing state bloom if fast
+ // // sync is requested. The downloader is responsible for deallocating the state
+ // // bloom when it's done.
+ // var stateBloom *trie.SyncBloom
+ // if atomic.LoadUint32(&manager.fastSync) == 1 {
+ // stateBloom = trie.NewSyncBloom(uint64(cacheLimit), chaindb)
+ // }
+ // manager.downloader = downloader.New(manager.checkpointNumber, chaindb, stateBloom, manager.eventMux, blockchain, nil, manager.removePeer)
var handleProposedBlock func(header *types.Header) error
if config.XDPoS != nil {
@@ -228,14 +246,32 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
return blockchain.CurrentBlock().NumberU64()
}
- inserter := func(block *types.Block) error {
- // If fast sync is running, deny importing weird blocks
+ inserter := func(block types.Block) (error) {
+ // If sync hasn't reached the checkpoint yet, deny importing weird blocks.
+ //
+ // Ideally we would also compare the head block's timestamp and similarly reject
+ // the propagated block if the head is too old. Unfortunately there is a corner
+ // case when starting new networks, where the genesis might be ancient (0 unix)
+ // which would prevent full nodes from accepting it.
+ if manager.blockchain.CurrentBlock().NumberU64() < manager.checkpointNumber {
+ log.Warn("Unsynced yet, discarded propagated block", "number", block.Number(), "hash", block.Hash())
+ return nil
+ }
+ // If fast sync is running, deny importing weird blocks. This is a problematic
+ // clause when starting up a new network, because fast-syncing miners might not
+ // accept each others' blocks until a restart. Unfortunately we haven't figured
+ // out a way yet where nodes can decide unilaterally whether the network is new
+ // or not. This should be fixed if we figure out a solution.
if atomic.LoadUint32(&manager.fastSync) == 1 {
- log.Warn("Discarded bad propagated block", "number", block.Number(), "hash", block.Hash())
+ log.Warn("Fast syncing, discarded propagated block", "number", block.Number(), "hash", block.Hash())
return nil
}
- atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
- return manager.blockchain.InsertBlock(block)
+ err := manager.blockchain.InsertBlock(&block)
+ // n, err := manager.blockchain.InsertChain(blocks) //TODO: only use InsertChain like go-eth
+ if err == nil {
+ atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
+ }
+ return err
}
prepare := func(block *types.Block) error {
@@ -247,7 +283,7 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
return manager.blockchain.PrepareBlock(block)
}
- manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, handleProposedBlock, manager.BroadcastBlock, heighter, inserter, prepare, manager.removePeer)
+ manager.blockFetcher = fetcher.NewBlockFetcher(blockchain.GetBlockByHash, validator, handleProposedBlock, manager.BroadcastBlock, heighter, inserter, prepare, manager.removePeer)
//Define bft function
broadcasts := bft.BroadcastFns{
Vote: manager.BroadcastVote,
@@ -260,6 +296,15 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
manager.bft.SetConsensusFuns(engine)
}
+ fetchTx := func(peer string, hashes []common.Hash) error {
+ p := manager.peers.Peer(peer)
+ if p == nil {
+ return errors.New("unknown peer")
+ }
+ return p.RequestTxs(hashes)
+ }
+ manager.txFetcher = fetcher.NewTxFetcher(txpool.Has, txpool.AddRemotes, fetchTx)
+
return manager, nil
}
@@ -269,6 +314,40 @@ func (pm *ProtocolManager) addOrderPoolProtocol(orderpool orderPool) {
func (pm *ProtocolManager) addLendingPoolProtocol(lendingpool lendingPool) {
pm.lendingpool = lendingpool
}
+
+func (pm *ProtocolManager) makeProtocol(version uint) p2p.Protocol {
+ length, ok := protocolLengths[version]
+ if !ok {
+ panic("makeProtocol for unknown version")
+ }
+
+ return p2p.Protocol{
+ Name: protocolName,
+ Version: version,
+ Length: length,
+ Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
+ peer := pm.newPeer(int(version), p, rw, pm.txpool.Get)
+ select {
+ case pm.newPeerCh <- peer:
+ pm.wg.Add(1)
+ defer pm.wg.Done()
+ return pm.handle(peer)
+ case <-pm.quitSync:
+ return p2p.DiscQuitting
+ }
+ },
+ NodeInfo: func() interface{} {
+ return pm.NodeInfo()
+ },
+ PeerInfo: func(id enode.ID) interface{} {
+ if p := pm.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
+ return p.Info()
+ }
+ return nil
+ },
+ }
+}
+
func (pm *ProtocolManager) removePeer(id string) {
// Short circuit if the peer was already removed
peer := pm.peers.Peer(id)
@@ -279,6 +358,8 @@ func (pm *ProtocolManager) removePeer(id string) {
// Unregister the peer from the downloader and Ethereum peer set
pm.downloader.UnregisterPeer(id)
+ pm.txFetcher.Drop(id)
+
if err := pm.peers.Unregister(id); err != nil {
log.Debug("Peer removal failed", "peer", id, "err", err)
}
@@ -311,7 +392,7 @@ func (pm *ProtocolManager) Start(maxPeers int) {
// start sync handlers
go pm.syncer()
- go pm.txsyncLoop()
+ go pm.txsyncLoop64() // TODO(karalabe): Legacy initial tx echange, drop with eth/64.
}
func (pm *ProtocolManager) Stop() {
@@ -345,8 +426,8 @@ func (pm *ProtocolManager) Stop() {
log.Info("Ethereum protocol stopped")
}
-func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
- return newPeer(pv, p, newMeteredMsgWriter(rw))
+func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer {
+ return newPeer(pv, p, rw, getPooledTx)
}
// handle is the callback invoked to manage the life cycle of an eth peer. When
@@ -366,13 +447,10 @@ func (pm *ProtocolManager) handle(p *peer) error {
number = head.Number.Uint64()
td = pm.blockchain.GetTd(hash, number)
)
- if err := p.Handshake(pm.networkId, td, hash, genesis.Hash()); err != nil {
+ if err := p.Handshake(pm.networkId, td, hash, genesis.Hash(), forkid.NewID(pm.blockchain), pm.forkFilter); err != nil {
p.Log().Debug("Ethereum handshake failed", "err", err)
return err
}
- if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
- rw.Init(p.version)
- }
// Register the peer locally
err := pm.peers.Register(p)
if err != nil && err != p2p.ErrAddPairPeer {
@@ -388,7 +466,6 @@ func (pm *ProtocolManager) handle(p *peer) error {
// Propagate existing transactions. new transactions appearing
// after this will be sent via broadcasts.
pm.syncTransactions(p)
-
// If we're DAO hard-fork aware, validate any remote peer with regard to the hard-fork
if daoBlock := pm.chainconfig.DAOForkBlock; daoBlock != nil {
// Request the peer's DAO fork header for extra-data validation
@@ -426,8 +503,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err != nil {
return err
}
- if msg.Size > ProtocolMaxMsgSize {
- return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
+ if msg.Size > protocolMaxMsgSize {
+ return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
}
defer msg.Discard()
@@ -560,7 +637,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return nil
}
// Irrelevant of the fork checks, send the header to the fetcher just in case
- headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
+ headers = pm.blockFetcher.FilterHeaders(p.id, headers, time.Now())
}
if len(headers) > 0 || !filter {
err := pm.downloader.DeliverHeaders(p.id, headers)
@@ -603,26 +680,26 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Deliver them all to the downloader for queuing
- trasactions := make([][]*types.Transaction, len(request))
+ transactions := make([][]*types.Transaction, len(request))
uncles := make([][]*types.Header, len(request))
for i, body := range request {
- trasactions[i] = body.Transactions
+ transactions[i] = body.Transactions
uncles[i] = body.Uncles
}
// Filter out any explicitly requested bodies, deliver the rest to the downloader
- filter := len(trasactions) > 0 || len(uncles) > 0
+ filter := len(transactions) > 0 || len(uncles) > 0
if filter {
- trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
+ transactions, uncles = pm.blockFetcher.FilterBodies(p.id, transactions, uncles, time.Now())
}
- if len(trasactions) > 0 || len(uncles) > 0 || !filter {
- err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
+ if len(transactions) > 0 || len(uncles) > 0 || !filter {
+ err := pm.downloader.DeliverBodies(p.id, transactions, uncles)
if err != nil {
log.Debug("Failed to deliver bodies", "err", err)
}
}
- case p.version >= eth63 && msg.Code == GetNodeDataMsg:
+ case isEth63OrHigher(p.version) && msg.Code == GetNodeDataMsg:
// Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
@@ -649,7 +726,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
return p.SendNodeData(data)
- case p.version >= eth63 && msg.Code == NodeDataMsg:
+ case isEth63OrHigher(p.version) && msg.Code == NodeDataMsg:
// A batch of node state data arrived to one of our previous requests
var data [][]byte
if err := msg.Decode(&data); err != nil {
@@ -660,7 +737,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
log.Debug("Failed to deliver node state data", "err", err)
}
- case p.version >= eth63 && msg.Code == GetReceiptsMsg:
+ case isEth63OrHigher(p.version) && msg.Code == GetReceiptsMsg:
// Decode the retrieval message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
if _, err := msgStream.List(); err != nil {
@@ -696,7 +773,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
return p.SendReceiptsRLP(receipts)
- case p.version >= eth63 && msg.Code == ReceiptsMsg:
+ case isEth63OrHigher(p.version) && msg.Code == ReceiptsMsg:
// A batch of receipts arrived to one of our previous requests
var receipts [][]*types.Receipt
if err := msg.Decode(&receipts); err != nil {
@@ -724,7 +801,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
for _, block := range unknown {
- pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
+ pm.blockFetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
}
case msg.Code == NewBlockMsg:
@@ -733,12 +810,23 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
+ if hash := types.CalcUncleHash(request.Block.Uncles()); hash != request.Block.UncleHash() {
+ log.Warn("Propagated block has invalid uncles", "have", hash, "exp", request.Block.UncleHash())
+ break // TODO(karalabe): return error eventually, but wait a few releases
+ }
+ if hash := types.DeriveSha(request.Block.Transactions()); hash != request.Block.TxHash() {
+ log.Warn("Propagated block has invalid body", "have", hash, "exp", request.Block.TxHash())
+ break // TODO(karalabe): return error eventually, but wait a few releases
+ }
+ if err := request.sanityCheck(); err != nil {
+ return err
+ }
request.Block.ReceivedAt = msg.ReceivedAt
request.Block.ReceivedFrom = p
// Mark the peer as owning the block and schedule it for import
p.MarkBlock(request.Block.Hash())
- pm.fetcher.Enqueue(p.id, request.Block)
+ pm.blockFetcher.Enqueue(p.id, request.Block)
// Assuming the block is importable by the peer, but possibly not yet done so,
// calculate the head hash and TD that the peer truly must have.
@@ -759,7 +847,59 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
- case msg.Code == TxMsg:
+ case msg.Code == NewPooledTransactionHashesMsg && isEth65OrHigher(p.version):
+ // New transaction announcement arrived, make sure we have
+ // a valid and fresh chain to handle them
+ if atomic.LoadUint32(&pm.acceptTxs) == 0 {
+ break
+ }
+ var hashes []common.Hash
+ if err := msg.Decode(&hashes); err != nil {
+ return errResp(ErrDecode, "msg %v: %v", msg, err)
+ }
+ // Schedule all the unknown hashes for retrieval
+ for _, hash := range hashes {
+ p.MarkTransaction(hash)
+ }
+ pm.txFetcher.Notify(p.id, hashes)
+
+ case msg.Code == GetPooledTransactionsMsg && isEth65OrHigher(p.version):
+ // Decode the retrieval message
+ msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
+ if _, err := msgStream.List(); err != nil {
+ return err
+ }
+ // Gather transactions until the fetch or network limits is reached
+ var (
+ hash common.Hash
+ bytes int
+ hashes []common.Hash
+ txs []rlp.RawValue
+ )
+ for bytes < softResponseLimit {
+ // Retrieve the hash of the next block
+ if err := msgStream.Decode(&hash); err == rlp.EOL {
+ break
+ } else if err != nil {
+ return errResp(ErrDecode, "msg %v: %v", msg, err)
+ }
+ // Retrieve the requested transaction, skipping if unknown to us
+ tx := pm.txpool.Get(hash)
+ if tx == nil {
+ continue
+ }
+ // If known, encode and queue for response packet
+ if encoded, err := rlp.EncodeToBytes(tx); err != nil {
+ log.Error("Failed to encode transaction", "err", err)
+ } else {
+ hashes = append(hashes, hash)
+ txs = append(txs, encoded)
+ bytes += len(encoded)
+ }
+ }
+ return p.SendPooledTransactionsRLP(hashes, txs)
+
+ case msg.Code == TransactionMsg || (msg.Code == PooledTransactionsMsg && isEth65OrHigher(p.version)):
// Transactions arrived, make sure we have a valid and fresh chain to handle them
if atomic.LoadUint32(&pm.acceptTxs) == 0 {
break
@@ -785,7 +925,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
- pm.txpool.AddRemotes(txs)
+ pm.txFetcher.Enqueue(p.id, txs, msg.Code == PooledTransactionsMsg)
case msg.Code == OrderTxMsg:
// Transactions arrived, make sure we have a valid and fresh chain to handle them
@@ -926,37 +1066,73 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
return
}
// Send the block to a subset of our peers
- for _, peer := range peers {
- peer.SendNewBlock(block, td)
+ transferLen := int(math.Sqrt(float64(len(peers))))
+ if transferLen < minBroadcastPeers {
+ transferLen = minBroadcastPeers
+ }
+ if transferLen > len(peers) {
+ transferLen = len(peers)
}
- log.Trace("Propagated block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
+ transfer := peers[:transferLen]
+ for _, peer := range transfer {
+ peer.AsyncSendNewBlock(block, td)
+ }
+ log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
return
}
// Otherwise if the block is indeed in out own chain, announce it
if pm.blockchain.HasBlock(hash, block.NumberU64()) {
for _, peer := range peers {
- peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
+ peer.AsyncSendNewBlockHash(block)
}
log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
}
}
-// BroadcastTxs will propagate a batch of transactions to all peers which are not known to
+// BroadcastTransactions will propagate a batch of transactions to all peers which are not known to
// already have the given transaction.
-func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
- var txset = make(map[*peer]types.Transactions)
-
+func (pm *ProtocolManager) BroadcastTransactions(txs types.Transactions, propagate bool) {
+ var (
+ txset = make(map[*peer][]common.Hash)
+ annos = make(map[*peer][]common.Hash)
+ )
// Broadcast transactions to a batch of peers not knowing about it
+ if propagate {
+ for _, tx := range txs {
+ peers := pm.peers.PeersWithoutTx(tx.Hash())
+
+ // Send the block to a subset of our peers
+ transferLen := int(math.Sqrt(float64(len(peers))))
+ if transferLen < minBroadcastPeers {
+ transferLen = minBroadcastPeers
+ }
+ if transferLen > len(peers) {
+ transferLen = len(peers)
+ }
+ transfer := peers[:transferLen]
+ for _, peer := range transfer {
+ txset[peer] = append(txset[peer], tx.Hash())
+ }
+ log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
+ }
+ for peer, hashes := range txset {
+ peer.AsyncSendTransactions(hashes)
+ }
+ return
+ }
+ // Otherwise only broadcast the announcement to peers
for _, tx := range txs {
peers := pm.peers.PeersWithoutTx(tx.Hash())
for _, peer := range peers {
- txset[peer] = append(txset[peer], tx)
+ annos[peer] = append(annos[peer], tx.Hash())
}
- log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
}
- // FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
- for peer, txs := range txset {
- peer.SendTransactions(txs)
+ for peer, hashes := range annos {
+ if peer.version >= eth65 { //implement
+ peer.AsyncSendPooledTransactionHashes(hashes)
+ } else {
+ peer.AsyncSendTransactions(hashes)
+ }
}
}
@@ -1052,7 +1228,13 @@ func (pm *ProtocolManager) txBroadcastLoop() {
for {
select {
case event := <-pm.txsCh:
- pm.BroadcastTxs(event.Txs)
+ // For testing purpose only, disable propagation
+ if pm.broadcastTxAnnouncesOnly {
+ pm.BroadcastTransactions(event.Txs, false)
+ continue
+ }
+ pm.BroadcastTransactions(event.Txs, true) // First propagate transactions to peers
+ pm.BroadcastTransactions(event.Txs, false) // Only then announce to the rest
// Err() channel will be closed when unsubscribing.
case <-pm.txsSub.Err():
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 651a33a28c49..b8d0d9456723 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -17,6 +17,7 @@
package eth
import (
+ "fmt"
"math"
"math/big"
"math/rand"
@@ -38,38 +39,11 @@ import (
"github.com/XinFinOrg/XDPoSChain/params"
)
-// Tests that protocol versions and modes of operations are matched up properly.
-func TestProtocolCompatibility(t *testing.T) {
- // Define the compatibility chart
- tests := []struct {
- version uint
- mode downloader.SyncMode
- compatible bool
- }{
- {61, downloader.FullSync, true}, {62, downloader.FullSync, true}, {63, downloader.FullSync, true},
- {61, downloader.FastSync, false}, {62, downloader.FastSync, false}, {63, downloader.FastSync, true},
- }
- // Make sure anything we screw up is restored
- backup := ProtocolVersions
- defer func() { ProtocolVersions = backup }()
-
- // Try all available compatibility configs and check for errors
- for i, tt := range tests {
- ProtocolVersions = []uint{tt.version}
-
- pm, _, err := newTestProtocolManager(tt.mode, 0, nil, nil)
- if pm != nil {
- defer pm.Stop()
- }
- if (err == nil && !tt.compatible) || (err != nil && tt.compatible) {
- t.Errorf("test %d: compatibility mismatch: have error %v, want compatibility %v", i, err, tt.compatible)
- }
- }
-}
-
// Tests that block headers can be retrieved from a remote chain based on user queries.
-func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) }
-func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
+func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) }
+func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) }
+func TestGetBlockHeaders100(t *testing.T) { testGetBlockHeaders(t, 100) }
+func TestGetBlockHeaders101(t *testing.T) { testGetBlockHeaders(t, 101) }
func testGetBlockHeaders(t *testing.T, protocol int) {
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxHashFetch+15, nil, nil)
@@ -227,8 +201,10 @@ func testGetBlockHeaders(t *testing.T, protocol int) {
}
// Tests that block contents can be retrieved from a remote chain based on their hashes.
-func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) }
-func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) }
+func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) }
+func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) }
+func TestGetBlockBodies100(t *testing.T) { testGetBlockBodies(t, 100) }
+func TestGetBlockBodies101(t *testing.T) { testGetBlockBodies(t, 101) }
func testGetBlockBodies(t *testing.T, protocol int) {
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxBlockFetch+15, nil, nil)
@@ -299,7 +275,10 @@ func testGetBlockBodies(t *testing.T, protocol int) {
}
// Tests that the node state database can be retrieved based on hashes.
-func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) }
+func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) }
+func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) }
+func TestGetNodeData100(t *testing.T) { testGetNodeData(t, 100) }
+func TestGetNodeData101(t *testing.T) { testGetNodeData(t, 101) }
func testGetNodeData(t *testing.T, protocol int) {
// Define three accounts to simulate transactions with
@@ -393,7 +372,10 @@ func testGetNodeData(t *testing.T, protocol int) {
}
// Tests that the transaction receipts can be retrieved based on hashes.
-func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) }
+func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) }
+func TestGetReceipt64(t *testing.T) { testGetReceipt(t, 64) }
+func TestGetReceipt100(t *testing.T) { testGetReceipt(t, 100) }
+func TestGetReceipt101(t *testing.T) { testGetReceipt(t, 101) }
func testGetReceipt(t *testing.T, protocol int) {
// Define three accounts to simulate transactions with
@@ -451,75 +433,245 @@ func testGetReceipt(t *testing.T, protocol int) {
}
}
-// Tests that post eth protocol handshake, DAO fork-enabled clients also execute
-// a DAO "challenge" verifying each others' DAO fork headers to ensure they're on
-// compatible chains.
-func TestDAOChallengeNoVsNo(t *testing.T) { testDAOChallenge(t, false, false, false) }
-func TestDAOChallengeNoVsPro(t *testing.T) { testDAOChallenge(t, false, true, false) }
-func TestDAOChallengeProVsNo(t *testing.T) { testDAOChallenge(t, true, false, false) }
-func TestDAOChallengeProVsPro(t *testing.T) { testDAOChallenge(t, true, true, false) }
-func TestDAOChallengeNoVsTimeout(t *testing.T) { testDAOChallenge(t, false, false, true) }
-func TestDAOChallengeProVsTimeout(t *testing.T) { testDAOChallenge(t, true, true, true) }
-
-func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool) {
- // Reduce the DAO handshake challenge timeout
- if timeout {
- defer func(old time.Duration) { daoChallengeTimeout = old }(daoChallengeTimeout)
- daoChallengeTimeout = 500 * time.Millisecond
+// // Tests that post eth protocol handshake, DAO fork-enabled clients also execute
+// // a DAO "challenge" verifying each others' DAO fork headers to ensure they're on
+// // compatible chains.
+// func TestDAOChallengeNoVsNo(t *testing.T) { testDAOChallenge(t, false, false, false) }
+// func TestDAOChallengeNoVsPro(t *testing.T) { testDAOChallenge(t, false, true, false) }
+// func TestDAOChallengeProVsNo(t *testing.T) { testDAOChallenge(t, true, false, false) }
+// func TestDAOChallengeProVsPro(t *testing.T) { testDAOChallenge(t, true, true, false) }
+// func TestDAOChallengeNoVsTimeout(t *testing.T) { testDAOChallenge(t, false, false, true) }
+// func TestDAOChallengeProVsTimeout(t *testing.T) { testDAOChallenge(t, true, true, true) }
+
+// func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool) {
+// // Reduce the DAO handshake challenge timeout
+// if timeout {
+// defer func(old time.Duration) { daoChallengeTimeout = old }(daoChallengeTimeout)
+// daoChallengeTimeout = 500 * time.Millisecond
+// }
+// // Create a DAO aware protocol manager
+// var (
+// evmux = new(event.TypeMux)
+// pow = ethash.NewFaker()
+// db = rawdb.NewMemoryDatabase()
+// config = ¶ms.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked}
+// gspec = &core.Genesis{Config: config}
+// genesis = gspec.MustCommit(db)
+// blockchain, _ = core.NewBlockChain(db, nil, config, pow, vm.Config{})
+// )
+// (&core.Genesis{Config: config}).MustCommit(db) // Commit genesis block
+// // If checkpointing is enabled, create and inject a fake CHT and the corresponding
+// // chllenge response.
+// var response *types.Header
+// var cht *params.TrustedCheckpoint
+// if checkpoint {
+// index := uint64(rand.Intn(500))
+// number := (index+1)*params.CHTFrequency - 1
+// response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")}
+
+// cht = ¶ms.TrustedCheckpoint{
+// SectionIndex: index,
+// SectionHead: response.Hash(),
+// }
+// }
+// // Create a checkpoint aware protocol manager
+// blockchain, err := core.NewBlockChain(db, nil, config, ethash.NewFaker(), vm.Config{}, nil)
+// if err != nil {
+// t.Fatalf("failed to create new blockchain: %v", err)
+// }
+// // pm, err := NewProtocolManager(config, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db)
+// pm, err := NewProtocolManager(config, cht, syncmode, DefaultConfig.NetworkId, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, ethash.NewFaker(), blockchain, db, 1, nil)
+// if err != nil {
+// t.Fatalf("failed to start test protocol manager: %v", err)
+// }
+// pm.Start(1000)
+// defer pm.Stop()
+
+// // Connect a new peer and check that we receive the DAO challenge
+// peer, _ := newTestPeer("peer", eth63, pm, true)
+// defer peer.close()
+
+// challenge := &getBlockHeadersData{
+// Origin: hashOrNumber{Number: config.DAOForkBlock.Uint64()},
+// Amount: 1,
+// Skip: 0,
+// Reverse: false,
+// }
+// if err := p2p.ExpectMsg(peer.app, GetBlockHeadersMsg, challenge); err != nil {
+// t.Fatalf("challenge mismatch: %v", err)
+// }
+// // Create a block to reply to the challenge if no timeout is simulated
+// if !timeout {
+// blocks, _ := core.GenerateChain(¶ms.ChainConfig{}, genesis, ethash.NewFaker(), db, 1, func(i int, block *core.BlockGen) {
+// if remoteForked {
+// block.SetExtra(params.DAOForkBlockExtra)
+// }
+// })
+// if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{blocks[0].Header()}); err != nil {
+// t.Fatalf("failed to answer challenge: %v", err)
+// }
+// time.Sleep(100 * time.Millisecond) // Sleep to avoid the verification racing with the drops
+// } else {
+// // Otherwise wait until the test timeout passes
+// time.Sleep(daoChallengeTimeout + 500*time.Millisecond)
+// }
+// // Verify that depending on fork side, the remote peer is maintained or dropped
+// if localForked == remoteForked && !timeout {
+// if peers := pm.peers.Len(); peers != 1 {
+// t.Fatalf("peer count mismatch: have %d, want %d", peers, 1)
+// }
+// } else {
+// if peers := pm.peers.Len(); peers != 0 {
+// t.Fatalf("peer count mismatch: have %d, want %d", peers, 0)
+// }
+// }
+// }
+
+func TestBroadcastBlock(t *testing.T) {
+ var tests = []struct {
+ totalPeers int
+ broadcastExpected int
+ }{
+ {1, 1},
+ {2, 2},
+ {3, 3},
+ {4, 4},
+ {5, 4},
+ {9, 4},
+ {12, 4},
+ {16, 4},
+ {26, 5},
+ {100, 10},
+ }
+ for _, test := range tests {
+ testBroadcastBlock(t, test.totalPeers, test.broadcastExpected)
}
- // Create a DAO aware protocol manager
+}
+
+func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
var (
- evmux = new(event.TypeMux)
- pow = ethash.NewFaker()
- db = rawdb.NewMemoryDatabase()
- config = ¶ms.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked}
- gspec = &core.Genesis{Config: config}
- genesis = gspec.MustCommit(db)
- blockchain, _ = core.NewBlockChain(db, nil, config, pow, vm.Config{})
+ evmux = new(event.TypeMux)
+ pow = ethash.NewFaker()
+ db = rawdb.NewMemoryDatabase()
+ config = ¶ms.ChainConfig{}
+ gspec = &core.Genesis{Config: config}
+ genesis = gspec.MustCommit(db)
)
- pm, err := NewProtocolManager(config, downloader.FullSync, ethconfig.Defaults.NetworkId, evmux, new(testTxPool), pow, blockchain, db)
+ blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{})
+ if err != nil {
+ t.Fatalf("failed to create new blockchain: %v", err)
+ }
+ pm, err := NewProtocolManager(config, downloader.FullSync, ethconfig.Defaults.NetworkId, evmux, &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, pow, blockchain, db)
if err != nil {
t.Fatalf("failed to start test protocol manager: %v", err)
}
pm.Start(1000)
defer pm.Stop()
-
- // Connect a new peer and check that we receive the DAO challenge
- peer, _ := newTestPeer("peer", eth63, pm, true)
- defer peer.close()
-
- challenge := &getBlockHeadersData{
- Origin: hashOrNumber{Number: config.DAOForkBlock.Uint64()},
- Amount: 1,
- Skip: 0,
- Reverse: false,
+ var peers []*testPeer
+ for i := 0; i < totalPeers; i++ {
+ peer, _ := newTestPeer(fmt.Sprintf("peer %d", i), eth63, pm, true)
+ defer peer.close()
+ peers = append(peers, peer)
}
- if err := p2p.ExpectMsg(peer.app, GetBlockHeadersMsg, challenge); err != nil {
- t.Fatalf("challenge mismatch: %v", err)
+ chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) {})
+ pm.BroadcastBlock(chain[0], true /*propagate*/)
+
+ errCh := make(chan error, totalPeers)
+ doneCh := make(chan struct{}, totalPeers)
+ for _, peer := range peers {
+ go func(p *testPeer) {
+ if err := p2p.ExpectMsg(p.app, NewBlockMsg, &newBlockData{Block: chain[0], TD: big.NewInt(131136)}); err != nil {
+ errCh <- err
+ } else {
+ doneCh <- struct{}{}
+ }
+ }(peer)
}
- // Create a block to reply to the challenge if no timeout is simulated
- if !timeout {
- blocks, _ := core.GenerateChain(¶ms.ChainConfig{}, genesis, ethash.NewFaker(), db, 1, func(i int, block *core.BlockGen) {
- if remoteForked {
- block.SetExtra(params.DAOForkBlockExtra)
+ timeout := time.After(2 * time.Second)
+ var receivedCount int
+outer:
+ for {
+ select {
+ case err = <-errCh:
+ break outer
+ case <-doneCh:
+ receivedCount++
+ if receivedCount == totalPeers {
+ break outer
}
- })
- if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{blocks[0].Header()}); err != nil {
- t.Fatalf("failed to answer challenge: %v", err)
+ case <-timeout:
+ break outer
}
- time.Sleep(100 * time.Millisecond) // Sleep to avoid the verification racing with the drops
- } else {
- // Otherwise wait until the test timeout passes
- time.Sleep(daoChallengeTimeout + 500*time.Millisecond)
}
- // Verify that depending on fork side, the remote peer is maintained or dropped
- if localForked == remoteForked && !timeout {
- if peers := pm.peers.Len(); peers != 1 {
- t.Fatalf("peer count mismatch: have %d, want %d", peers, 1)
+ for _, peer := range peers {
+ peer.app.Close()
+ }
+ if err != nil {
+ t.Errorf("error matching block by peer: %v", err)
+ }
+ if receivedCount != broadcastExpected {
+ t.Errorf("block broadcast to %d peers, expected %d", receivedCount, broadcastExpected)
+ }
+}
+
+// Tests that a propagated malformed block (uncles or transactions don't match
+// with the hashes in the header) gets discarded and not broadcast forward.
+func TestBroadcastMalformedBlock(t *testing.T) {
+ // Create a live node to test propagation with
+ var (
+ engine = ethash.NewFaker()
+ db = rawdb.NewMemoryDatabase()
+ config = ¶ms.ChainConfig{}
+ gspec = &core.Genesis{Config: config}
+ genesis = gspec.MustCommit(db)
+ )
+ blockchain, err := core.NewBlockChain(db, nil, config, engine, vm.Config{})
+ if err != nil {
+ t.Fatalf("failed to create new blockchain: %v", err)
+ }
+ pm, err := NewProtocolManager(config, downloader.FullSync, ethconfig.Defaults.NetworkId, new(event.TypeMux), new(testTxPool), engine, blockchain, db)
+ if err != nil {
+ t.Fatalf("failed to start test protocol manager: %v", err)
+ }
+ pm.Start(2)
+ defer pm.Stop()
+
+ // Create two peers, one to send the malformed block with and one to check
+ // propagation
+ source, _ := newTestPeer("source", eth63, pm, true)
+ defer source.close()
+
+ sink, _ := newTestPeer("sink", eth63, pm, true)
+ defer sink.close()
+
+ // Create various combinations of malformed blocks
+ chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) {})
+
+ malformedUncles := chain[0].Header()
+ malformedUncles.UncleHash[0]++
+ malformedTransactions := chain[0].Header()
+ malformedTransactions.TxHash[0]++
+ malformedEverything := chain[0].Header()
+ malformedEverything.UncleHash[0]++
+ malformedEverything.TxHash[0]++
+
+ // Keep listening to broadcasts and notify if any arrives
+ notify := make(chan struct{})
+ go func() {
+ if _, err := sink.app.ReadMsg(); err == nil {
+ notify <- struct{}{}
+ }
+ }()
+ // Try to broadcast all malformations and ensure they all get discarded
+ for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} {
+ block := types.NewBlockWithHeader(header).WithBody(chain[0].Transactions(), chain[0].Uncles())
+ if err := p2p.Send(source.app, NewBlockMsg, []interface{}{block, big.NewInt(131136)}); err != nil {
+ t.Fatalf("failed to broadcast block: %v", err)
}
- } else {
- if peers := pm.peers.Len(); peers != 0 {
- t.Fatalf("peer count mismatch: have %d, want %d", peers, 0)
+ select {
+ case <-notify:
+ t.Fatalf("malformed block forwarded")
+ case <-time.After(100 * time.Millisecond):
}
}
}
diff --git a/eth/helper_test.go b/eth/helper_test.go
index 4963093c1aa0..9fbb460543ab 100644
--- a/eth/helper_test.go
+++ b/eth/helper_test.go
@@ -22,6 +22,7 @@ package eth
import (
"crypto/ecdsa"
"crypto/rand"
+ "fmt"
"math/big"
"sort"
"sync"
@@ -30,6 +31,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/consensus/ethash"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/forkid"
"github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/core/vm"
@@ -39,7 +41,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/params"
)
@@ -68,7 +70,8 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func
panic(err)
}
- pm, err := NewProtocolManager(gspec.Config, mode, ethconfig.Defaults.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db)
+ // pm, err := NewProtocolManager(gspec.Config, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db)
+ pm, err := NewProtocolManager(gspec.Config, mode, ethconfig.Defaults.NetworkId, evmux, &testTxPool{added: newtx, pool: make(map[common.Hash]*types.Transaction)}, engine, blockchain, db)
if err != nil {
return nil, nil, err
}
@@ -91,22 +94,43 @@ func newTestProtocolManagerMust(t *testing.T, mode downloader.SyncMode, blocks i
// testTxPool is a fake, helper transaction pool for testing purposes
type testTxPool struct {
txFeed event.Feed
- pool []*types.Transaction // Collection of all transactions
- added chan<- []*types.Transaction // Notification channel for new transactions
+ pool map[common.Hash]*types.Transaction // Hash map of collected transactions
+ added chan<- []*types.Transaction // Notification channel for new transactions
lock sync.RWMutex // Protects the transaction pool
}
+// Has returns an indicator whether txpool has a transaction
+// cached with the given hash.
+func (p *testTxPool) Has(hash common.Hash) bool {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ return p.pool[hash] != nil
+}
+
+// Get retrieves the transaction from local txpool with given
+// tx hash.
+func (p *testTxPool) Get(hash common.Hash) *types.Transaction {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ return p.pool[hash]
+}
+
// AddRemotes appends a batch of transactions to the pool, and notifies any
// listeners if the addition channel is non nil
func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error {
p.lock.Lock()
defer p.lock.Unlock()
- p.pool = append(p.pool, txs...)
+ for _, tx := range txs {
+ p.pool[tx.Hash()] = tx
+ }
if p.added != nil {
p.added <- txs
}
+ p.txFeed.Send(core.NewTxsEvent{Txs: txs})
return make([]error, len(txs))
}
@@ -150,10 +174,10 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
- var id discover.NodeID
+ var id enode.ID
rand.Read(id[:])
- peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net)
+ peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net, pm.txpool.Get)
// Start the peer on a new thread
errc := make(chan error, 1)
@@ -173,22 +197,38 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
head = pm.blockchain.CurrentHeader()
td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
)
- tp.handshake(nil, td, head.Hash(), genesis.Hash())
+ tp.handshake(nil, td, head.Hash(), genesis.Hash(), forkid.NewID(pm.blockchain), forkid.NewFilter(pm.blockchain))
}
return tp, errc
}
// handshake simulates a trivial handshake that expects the same state from the
// remote side as we are simulating locally.
-func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash) {
- msg := &statusData{
- ProtocolVersion: uint32(p.version),
- NetworkId: ethconfig.Defaults.NetworkId,
- TD: td,
- CurrentBlock: head,
- GenesisBlock: genesis,
+func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) {
+ var msg interface{}
+ switch {
+ case isEth63(p.version):
+ msg = &statusData63{
+ ProtocolVersion: uint32(p.version),
+ NetworkId: ethconfig.Defaults.NetworkId,
+ TD: td,
+ CurrentBlock: head,
+ GenesisBlock: genesis,
+ }
+ case isEth64OrHigher(p.version):
+ msg = &statusData{
+ ProtocolVersion: uint32(p.version),
+ NetworkID: ethconfig.Defaults.NetworkId,
+ TD: td,
+ Head: head,
+ Genesis: genesis,
+ ForkID: forkID,
+ }
+ default:
+ panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
}
if err := p2p.ExpectMsg(p.app, StatusMsg, msg); err != nil {
+ fmt.Println("p2p expect msg err", err)
t.Fatalf("status recv: %v", err)
}
if err := p2p.Send(p.app, StatusMsg, msg); err != nil {
diff --git a/eth/metrics.go b/eth/metrics.go
deleted file mode 100644
index 2f3bd6bfb839..000000000000
--- a/eth/metrics.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package eth
-
-import (
- "github.com/XinFinOrg/XDPoSChain/metrics"
- "github.com/XinFinOrg/XDPoSChain/p2p"
-)
-
-var (
- propTxnInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/txns/in/packets", nil)
- propTxnInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/txns/in/traffic", nil)
- propTxnOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/txns/out/packets", nil)
- propTxnOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/txns/out/traffic", nil)
- propHashInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/hashes/in/packets", nil)
- propHashInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/hashes/in/traffic", nil)
- propHashOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/hashes/out/packets", nil)
- propHashOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/hashes/out/traffic", nil)
- propBlockInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/blocks/in/packets", nil)
- propBlockInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/blocks/in/traffic", nil)
- propBlockOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/blocks/out/packets", nil)
- propBlockOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/blocks/out/traffic", nil)
- reqHeaderInPacketsMeter = metrics.NewRegisteredMeter("eth/req/headers/in/packets", nil)
- reqHeaderInTrafficMeter = metrics.NewRegisteredMeter("eth/req/headers/in/traffic", nil)
- reqHeaderOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/headers/out/packets", nil)
- reqHeaderOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/headers/out/traffic", nil)
- reqBodyInPacketsMeter = metrics.NewRegisteredMeter("eth/req/bodies/in/packets", nil)
- reqBodyInTrafficMeter = metrics.NewRegisteredMeter("eth/req/bodies/in/traffic", nil)
- reqBodyOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/bodies/out/packets", nil)
- reqBodyOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/bodies/out/traffic", nil)
- reqStateInPacketsMeter = metrics.NewRegisteredMeter("eth/req/states/in/packets", nil)
- reqStateInTrafficMeter = metrics.NewRegisteredMeter("eth/req/states/in/traffic", nil)
- reqStateOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/states/out/packets", nil)
- reqStateOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/states/out/traffic", nil)
- reqReceiptInPacketsMeter = metrics.NewRegisteredMeter("eth/req/receipts/in/packets", nil)
- reqReceiptInTrafficMeter = metrics.NewRegisteredMeter("eth/req/receipts/in/traffic", nil)
- reqReceiptOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/packets", nil)
- reqReceiptOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/traffic", nil)
- miscInPacketsMeter = metrics.NewRegisteredMeter("eth/misc/in/packets", nil)
- miscInTrafficMeter = metrics.NewRegisteredMeter("eth/misc/in/traffic", nil)
- miscOutPacketsMeter = metrics.NewRegisteredMeter("eth/misc/out/packets", nil)
- miscOutTrafficMeter = metrics.NewRegisteredMeter("eth/misc/out/traffic", nil)
-)
-
-// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
-// accumulating the above defined metrics based on the data stream contents.
-type meteredMsgReadWriter struct {
- p2p.MsgReadWriter // Wrapped message stream to meter
- version int // Protocol version to select correct meters
-}
-
-// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the
-// metrics system is disabled, this function returns the original object.
-func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter {
- if !metrics.Enabled {
- return rw
- }
- return &meteredMsgReadWriter{MsgReadWriter: rw}
-}
-
-// Init sets the protocol version used by the stream to know which meters to
-// increment in case of overlapping message ids between protocol versions.
-func (rw *meteredMsgReadWriter) Init(version int) {
- rw.version = version
-}
-
-func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
- // Read the message and short circuit in case of an error
- msg, err := rw.MsgReadWriter.ReadMsg()
- if err != nil {
- return msg, err
- }
- // Account for the data traffic
- packets, traffic := miscInPacketsMeter, miscInTrafficMeter
- switch {
- case msg.Code == BlockHeadersMsg:
- packets, traffic = reqHeaderInPacketsMeter, reqHeaderInTrafficMeter
- case msg.Code == BlockBodiesMsg:
- packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter
-
- case rw.version >= eth63 && msg.Code == NodeDataMsg:
- packets, traffic = reqStateInPacketsMeter, reqStateInTrafficMeter
- case rw.version >= eth63 && msg.Code == ReceiptsMsg:
- packets, traffic = reqReceiptInPacketsMeter, reqReceiptInTrafficMeter
-
- case msg.Code == NewBlockHashesMsg:
- packets, traffic = propHashInPacketsMeter, propHashInTrafficMeter
- case msg.Code == NewBlockMsg:
- packets, traffic = propBlockInPacketsMeter, propBlockInTrafficMeter
- case msg.Code == TxMsg:
- packets, traffic = propTxnInPacketsMeter, propTxnInTrafficMeter
- }
- packets.Mark(1)
- traffic.Mark(int64(msg.Size))
-
- return msg, err
-}
-
-func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error {
- // Account for the data traffic
- packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter
- switch {
- case msg.Code == BlockHeadersMsg:
- packets, traffic = reqHeaderOutPacketsMeter, reqHeaderOutTrafficMeter
- case msg.Code == BlockBodiesMsg:
- packets, traffic = reqBodyOutPacketsMeter, reqBodyOutTrafficMeter
-
- case rw.version >= eth63 && msg.Code == NodeDataMsg:
- packets, traffic = reqStateOutPacketsMeter, reqStateOutTrafficMeter
- case rw.version >= eth63 && msg.Code == ReceiptsMsg:
- packets, traffic = reqReceiptOutPacketsMeter, reqReceiptOutTrafficMeter
-
- case msg.Code == NewBlockHashesMsg:
- packets, traffic = propHashOutPacketsMeter, propHashOutTrafficMeter
- case msg.Code == NewBlockMsg:
- packets, traffic = propBlockOutPacketsMeter, propBlockOutTrafficMeter
- case msg.Code == TxMsg:
- packets, traffic = propTxnOutPacketsMeter, propTxnOutTrafficMeter
- }
- packets.Mark(1)
- traffic.Mark(int64(msg.Size))
-
- // Send the packet to the p2p layer
- return rw.MsgReadWriter.WriteMsg(msg)
-}
diff --git a/eth/peer.go b/eth/peer.go
index aa846a797e96..713e7ca6466d 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/core/forkid"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/p2p"
"github.com/XinFinOrg/XDPoSChain/rlp"
@@ -44,9 +45,38 @@ const (
maxKnownVote = 1024 // Maximum transactions hashes to keep in the known list (prevent DOS)
maxKnownTimeout = 1024 // Maximum transactions hashes to keep in the known list (prevent DOS)
maxKnownSyncInfo = 1024 // Maximum transactions hashes to keep in the known list (prevent DOS)
- handshakeTimeout = 5 * time.Second
+ // maxQueuedTxs is the maximum number of transactions to queue up before dropping
+ // older broadcasts.
+ maxQueuedTxs = 4096
+ // maxQueuedTxAnns is the maximum number of transaction announcements to queue up
+ // before dropping older announcements.
+ maxQueuedTxAnns = 4096
+ // maxQueuedBlocks is the maximum number of block propagations to queue up before
+ // dropping broadcasts. There's not much point in queueing stale blocks, so a few
+ // that might cover uncles should be enough.
+ maxQueuedBlocks = 4
+ // maxQueuedBlockAnns is the maximum number of block announcements to queue up before
+ // dropping broadcasts. Similarly to block propagations, there's no point to queue
+ // above some healthy uncle limit, so use that.
+ maxQueuedBlockAnns = 4
+
+ handshakeTimeout = 5 * time.Second
)
+// max is a helper function which returns the larger of the two given integers.
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// propEvent is a block propagation, waiting for its turn in the broadcast queue.
+type propEvent struct {
+ block *types.Block
+ td *big.Int
+}
+
// PeerInfo represents a short summary of the Ethereum sub-protocol metadata known
// about a connected peer.
type PeerInfo struct {
@@ -69,36 +99,199 @@ type peer struct {
td *big.Int
lock sync.RWMutex
- knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
- knownBlocks mapset.Set // Set of block hashes known to be known by this peer
-
+ knownBlocks mapset.Set // Set of block hashes known to be known by this peer
+ knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
knownOrderTxs mapset.Set // Set of order transaction hashes known to be known by this peer
knownLendingTxs mapset.Set // Set of lending transaction hashes known to be known by this peer
+ knownVote mapset.Set // Set of BFT Vote known to be known by this peer
+ knownTimeout mapset.Set // Set of BFT timeout known to be known by this peer
+ knownSyncInfo mapset.Set // Set of BFT Sync Info known to be known by this peer
- knownVote mapset.Set // Set of BFT Vote known to be known by this peer
- knownTimeout mapset.Set // Set of BFT timeout known to be known by this peer
- knownSyncInfo mapset.Set // Set of BFT Sync Info known to be known by this peer`
-}
+ queuedBlocks chan *propEvent // Queue of blocks to broadcast to the peer
+ queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer
-func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
- id := p.ID()
+ txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
+ txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
+ getPooledTx func(common.Hash) *types.Transaction // Callback used to retrieve transaction from txpool
+ term chan struct{} // Termination channel to stop the broadcaster
+}
+
+func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer {
return &peer{
Peer: p,
rw: rw,
version: version,
- id: fmt.Sprintf("%x", id[:8]),
+ id: fmt.Sprintf("%x", p.ID().Bytes()[:8]),
knownTxs: mapset.NewSet(),
knownBlocks: mapset.NewSet(),
knownOrderTxs: mapset.NewSet(),
knownLendingTxs: mapset.NewSet(),
+ knownVote: mapset.NewSet(),
+ knownTimeout: mapset.NewSet(),
+ knownSyncInfo: mapset.NewSet(),
+ queuedBlocks: make(chan *propEvent, maxQueuedBlocks),
+ queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),
+ txBroadcast: make(chan []common.Hash),
+ txAnnounce: make(chan []common.Hash),
+ getPooledTx: getPooledTx,
+ term: make(chan struct{}),
+ }
+}
+
+// broadcastBlocks is a write loop that multiplexes blocks and block accouncements
+// to the remote peer. The goal is to have an async writer that does not lock up
+// node internals and at the same time rate limits queued data.
+func (p *peer) broadcastBlocks() {
+ for {
+ select {
+ case prop := <-p.queuedBlocks:
+ if err := p.SendNewBlock(prop.block, prop.td); err != nil {
+ return
+ }
+ p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
+
+ case block := <-p.queuedBlockAnns:
+ if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
+ return
+ }
+ p.Log().Trace("Announced block", "number", block.Number(), "hash", block.Hash())
+
+ case <-p.term:
+ return
+ }
+ }
+}
+
+// broadcastTransactions is a write loop that schedules transaction broadcasts
+// to the remote peer. The goal is to have an async writer that does not lock up
+// node internals and at the same time rate limits queued data.
+func (p *peer) broadcastTransactions() {
+ var (
+ queue []common.Hash // Queue of hashes to broadcast as full transactions
+ done chan struct{} // Non-nil if background broadcaster is running
+ fail = make(chan error) // Channel used to receive network error
+ )
+ for {
+ // If there's no in-flight broadcast running, check if a new one is needed
+ if done == nil && len(queue) > 0 {
+ // Pile transaction until we reach our allowed network limit
+ var (
+ hashes []common.Hash
+ txs []*types.Transaction
+ size common.StorageSize
+ )
+ for i := 0; i < len(queue) && size < txsyncPackSize; i++ {
+ if tx := p.getPooledTx(queue[i]); tx != nil {
+ txs = append(txs, tx)
+ size += tx.Size()
+ }
+ hashes = append(hashes, queue[i])
+ }
+ queue = queue[:copy(queue, queue[len(hashes):])]
+
+ // If there's anything available to transfer, fire up an async writer
+ if len(txs) > 0 {
+ done = make(chan struct{})
+ go func() {
+ if err := p.sendTransactions(txs); err != nil {
+ fail <- err
+ return
+ }
+ close(done)
+ p.Log().Trace("Sent transactions", "count", len(txs))
+ }()
+ }
+ }
+ // Transfer goroutine may or may not have been started, listen for events
+ select {
+ case hashes := <-p.txBroadcast:
+ // New batch of transactions to be broadcast, queue them (with cap)
+ queue = append(queue, hashes...)
+ if len(queue) > maxQueuedTxs {
+ // Fancy copy and resize to ensure buffer doesn't grow indefinitely
+ queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
+ }
+
+ case <-done:
+ done = nil
+
+ case <-fail:
+ return
- knownVote: mapset.NewSet(),
- knownTimeout: mapset.NewSet(),
- knownSyncInfo: mapset.NewSet(),
+ case <-p.term:
+ return
+ }
+ }
+}
+
+// announceTransactions is a write loop that schedules transaction broadcasts
+// to the remote peer. The goal is to have an async writer that does not lock up
+// node internals and at the same time rate limits queued data.
+func (p *peer) announceTransactions() {
+ var (
+ queue []common.Hash // Queue of hashes to announce as transaction stubs
+ done chan struct{} // Non-nil if background announcer is running
+ fail = make(chan error) // Channel used to receive network error
+ )
+ for {
+ // If there's no in-flight announce running, check if a new one is needed
+ if done == nil && len(queue) > 0 {
+ // Pile transaction hashes until we reach our allowed network limit
+ var (
+ hashes []common.Hash
+ pending []common.Hash
+ size common.StorageSize
+ )
+ for i := 0; i < len(queue) && size < txsyncPackSize; i++ {
+ if p.getPooledTx(queue[i]) != nil {
+ pending = append(pending, queue[i])
+ size += common.HashLength
+ }
+ hashes = append(hashes, queue[i])
+ }
+ queue = queue[:copy(queue, queue[len(hashes):])]
+
+ // If there's anything available to transfer, fire up an async writer
+ if len(pending) > 0 {
+ done = make(chan struct{})
+ go func() {
+ if err := p.sendPooledTransactionHashes(pending); err != nil {
+ fail <- err
+ return
+ }
+ close(done)
+ p.Log().Trace("Sent transaction announcements", "count", len(pending))
+ }()
+ }
+ }
+ // Transfer goroutine may or may not have been started, listen for events
+ select {
+ case hashes := <-p.txAnnounce:
+ // New batch of transactions to be broadcast, queue them (with cap)
+ queue = append(queue, hashes...)
+ if len(queue) > maxQueuedTxAnns {
+ // Fancy copy and resize to ensure buffer doesn't grow indefinitely
+ queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
+ }
+
+ case <-done:
+ done = nil
+
+ case <-fail:
+ return
+
+ case <-p.term:
+ return
+ }
}
}
+// close signals the broadcast goroutine to terminate.
+func (p *peer) close() {
+ close(p.term)
+}
+
// Info gathers and returns a collection of metadata known about a peer.
func (p *peer) Info() *PeerInfo {
hash, td := p.Head()
@@ -199,16 +392,41 @@ func (p *peer) MarkSyncInfo(hash common.Hash) {
p.knownSyncInfo.Add(hash)
}
-// SendTransactions sends transactions to the peer and includes the hashes
+// SendTransactions64 sends transactions to the peer and includes the hashes
// in its transaction hash set for future reference.
-func (p *peer) SendTransactions(txs types.Transactions) error {
- for p.knownTxs.Cardinality() >= maxKnownTxs {
+//
+// This method is legacy support for initial transaction exchange in eth/64 and
+// prior. For eth/65 and higher use SendPooledTransactionHashes.
+func (p *peer) SendTransactions64(txs types.Transactions) error {
+ return p.sendTransactions(txs)
+}
+
+// // SendTransactions sends transactions to the peer and includes the hashes
+// // in its transaction hash set for future reference.
+// func (p *peer) SendTransactions(txs types.Transactions) error {
+// for p.knownTxs.Cardinality() >= maxKnownTxs {
+// p.knownTxs.Pop()
+// }
+// for _, tx := range txs {
+// p.knownTxs.Add(tx.Hash())
+// return p2p.Send(p.rw, TxMsg, txs)
+// }
+
+// sendTransactions sends transactions to the peer and includes the hashes
+// in its transaction hash set for future reference.
+//
+// This method is a helper used by the async transaction sender. Don't call it
+// directly as the queueing (memory) and transmission (bandwidth) costs should
+// not be managed directly.
+func (p *peer) sendTransactions(txs types.Transactions) error {
+ // Mark all the transactions as known, but ensure we don't overflow our limits
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) {
p.knownTxs.Pop()
}
for _, tx := range txs {
p.knownTxs.Add(tx.Hash())
}
- return p2p.Send(p.rw, TxMsg, txs)
+ return p2p.Send(p.rw, TransactionMsg, txs)
}
// SendTransactions sends transactions to the peer and includes the hashes
@@ -224,6 +442,24 @@ func (p *peer) SendOrderTransactions(txs types.OrderTransactions) error {
return p2p.Send(p.rw, OrderTxMsg, txs)
}
+// AsyncSendTransactions queues a list of transactions (by hash) to eventually
+// propagate to a remote peer. The number of pending sends are capped (new ones
+// will force old sends to be dropped)
+func (p *peer) AsyncSendTransactions(hashes []common.Hash) {
+ select {
+ case p.txBroadcast <- hashes:
+ // Mark all the transactions as known, but ensure we don't overflow our limits
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
+ p.knownTxs.Pop()
+ }
+ for _, hash := range hashes {
+ p.knownTxs.Add(hash)
+ }
+ case <-p.term:
+ p.Log().Debug("Dropping transaction propagation", "count", len(hashes))
+ }
+}
+
// SendTransactions sends transactions to the peer and includes the hashes
// in its transaction hash set for future reference.
func (p *peer) SendLendingTransactions(txs types.LendingTransactions) error {
@@ -237,13 +473,64 @@ func (p *peer) SendLendingTransactions(txs types.LendingTransactions) error {
return p2p.Send(p.rw, LendingTxMsg, txs)
}
+// sendPooledTransactionHashes sends transaction hashes to the peer and includes
+// them in its transaction hash set for future reference.
+//
+// This method is a helper used by the async transaction announcer. Don't call it
+// directly as the queueing (memory) and transmission (bandwidth) costs should
+// not be managed directly.
+func (p *peer) sendPooledTransactionHashes(hashes []common.Hash) error {
+ // Mark all the transactions as known, but ensure we don't overflow our limits
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
+ p.knownTxs.Pop()
+ }
+ for _, hash := range hashes {
+ p.knownTxs.Add(hash)
+ }
+ return p2p.Send(p.rw, NewPooledTransactionHashesMsg, hashes)
+}
+
+// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually
+// announce to a remote peer. The number of pending sends are capped (new ones
+// will force old sends to be dropped)
+func (p *peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
+ select {
+ case p.txAnnounce <- hashes:
+ // Mark all the transactions as known, but ensure we don't overflow our limits
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
+ p.knownTxs.Pop()
+ }
+ for _, hash := range hashes {
+ p.knownTxs.Add(hash)
+ }
+ case <-p.term:
+ p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
+ }
+}
+
+// SendPooledTransactionsRLP sends requested transactions to the peer and adds the
+// hashes in its transaction hash set for future reference.
+//
+// Note, the method assumes the hashes are correct and correspond to the list of
+// transactions being sent.
+func (p *peer) SendPooledTransactionsRLP(hashes []common.Hash, txs []rlp.RawValue) error {
+ // Mark all the transactions as known, but ensure we don't overflow our limits
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
+ p.knownTxs.Pop()
+ }
+ for _, hash := range hashes {
+ p.knownTxs.Add(hash)
+ }
+ return p2p.Send(p.rw, PooledTransactionsMsg, txs)
+}
+
// SendNewBlockHashes announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
- for p.knownBlocks.Cardinality() >= maxKnownBlocks {
+ // Mark all the block hashes as known, but ensure we don't overflow our limits
+ for p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) {
p.knownBlocks.Pop()
}
-
for _, hash := range hashes {
p.knownBlocks.Add(hash)
}
@@ -255,6 +542,16 @@ func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error
return p2p.Send(p.rw, NewBlockHashesMsg, request)
}
+// // SendNewBlock propagates an entire block to a remote peer.
+// func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
+// // Mark all the block hash as known, but ensure we don't overflow our limits
+// for p.knownBlocks.Cardinality() >= maxKnownBlocks {
+// p.knownBlocks.Pop()
+// }
+// p.knownBlocks.Add(block.Hash())
+// return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
+// }
+
// SendNewBlock propagates an entire block to a remote peer.
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
@@ -269,6 +566,37 @@ func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
}
}
+// AsyncSendNewBlockHash queues the availability of a block for propagation to a
+// remote peer. If the peer's broadcast queue is full, the event is silently
+// dropped.
+func (p *peer) AsyncSendNewBlockHash(block *types.Block) {
+ select {
+ case p.queuedBlockAnns <- block:
+ // Mark all the block hash as known, but ensure we don't overflow our limits
+ for p.knownBlocks.Cardinality() >= maxKnownBlocks {
+ p.knownBlocks.Pop()
+ }
+ p.knownBlocks.Add(block.Hash())
+ default:
+ p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
+ }
+}
+
+// AsyncSendNewBlock queues an entire block for propagation to a remote peer. If
+// the peer's broadcast queue is full, the event is silently dropped.
+func (p *peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
+ select {
+ case p.queuedBlocks <- &propEvent{block: block, td: td}:
+ // Mark all the block hash as known, but ensure we don't overflow our limits
+ for p.knownBlocks.Cardinality() >= maxKnownBlocks {
+ p.knownBlocks.Pop()
+ }
+ p.knownBlocks.Add(block.Hash())
+ default:
+ p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
+ }
+}
+
// SendBlockHeaders sends a batch of block headers to the remote peer.
func (p *peer) SendBlockHeaders(headers []*types.Header) error {
if p.pairRw != nil {
@@ -437,24 +765,54 @@ func (p *peer) RequestReceipts(hashes []common.Hash) error {
}
}
+// RequestTxs fetches a batch of transactions from a remote node.
+func (p *peer) RequestTxs(hashes []common.Hash) error {
+ p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
+ return p2p.Send(p.rw, GetPooledTransactionsMsg, hashes)
+}
+
// Handshake executes the eth protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
-func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash) error {
+func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error {
// Send out own handshake in a new thread
errc := make(chan error, 2)
- var status statusData // safe to read after two values have been received from errc
+ var (
+ status63 statusData63 // safe to read after two values have been received from errc
+ status statusData // safe to read after two values have been received from errc
+ )
go func() {
- errc <- p2p.Send(p.rw, StatusMsg, &statusData{
- ProtocolVersion: uint32(p.version),
- NetworkId: network,
- TD: td,
- CurrentBlock: head,
- GenesisBlock: genesis,
- })
+ switch {
+ case isEth63(p.version):
+ errc <- p2p.Send(p.rw, StatusMsg, &statusData63{
+ ProtocolVersion: uint32(p.version),
+ NetworkId: network,
+ TD: td,
+ CurrentBlock: head,
+ GenesisBlock: genesis,
+ })
+ case isEth64OrHigher(p.version):
+ errc <- p2p.Send(p.rw, StatusMsg, &statusData{
+ ProtocolVersion: uint32(p.version),
+ NetworkID: network,
+ TD: td,
+ Head: head,
+ Genesis: genesis,
+ ForkID: forkID,
+ })
+ default:
+ panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
+ }
}()
go func() {
- errc <- p.readStatus(network, &status, genesis)
+ switch {
+ case isEth63(p.version):
+ errc <- p.readStatusLegacy(network, &status63, genesis)
+ case isEth64OrHigher(p.version):
+ errc <- p.readStatus(network, &status, genesis, forkFilter)
+ default:
+ panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
+ }
}()
timeout := time.NewTimer(handshakeTimeout)
defer timeout.Stop()
@@ -468,11 +826,18 @@ func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
return p2p.DiscReadTimeout
}
}
- p.td, p.head = status.TD, status.CurrentBlock
+ switch {
+ case isEth63(p.version):
+ p.td, p.head = status63.TD, status63.CurrentBlock
+ case isEth64OrHigher(p.version):
+ p.td, p.head = status.TD, status.Head
+ default:
+ panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
+ }
return nil
}
-func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash) (err error) {
+func (p *peer) readStatusLegacy(network uint64, status *statusData63, genesis common.Hash) error {
msg, err := p.rw.ReadMsg()
if err != nil {
return err
@@ -480,18 +845,18 @@ func (p *peer) readStatus(network uint64, status *statusData, genesis common.Has
if msg.Code != StatusMsg {
return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
}
- if msg.Size > ProtocolMaxMsgSize {
- return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
+ if msg.Size > protocolMaxMsgSize {
+ return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
}
// Decode the handshake and make sure everything matches
if err := msg.Decode(&status); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
if status.GenesisBlock != genesis {
- return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8])
+ return errResp(ErrGenesisMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8])
}
if status.NetworkId != network {
- return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, network)
+ return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkId, network)
}
if int(status.ProtocolVersion) != p.version {
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
@@ -499,6 +864,36 @@ func (p *peer) readStatus(network uint64, status *statusData, genesis common.Has
return nil
}
+func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash, forkFilter forkid.Filter) error {
+ msg, err := p.rw.ReadMsg()
+ if err != nil {
+ return err
+ }
+ if msg.Code != StatusMsg {
+ return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
+ }
+ if msg.Size > protocolMaxMsgSize {
+ return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize)
+ }
+ // Decode the handshake and make sure everything matches
+ if err := msg.Decode(&status); err != nil {
+ return errResp(ErrDecode, "msg %v: %v", msg, err)
+ }
+ if status.NetworkID != network {
+ return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkID, network)
+ }
+ if int(status.ProtocolVersion) != p.version {
+ return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
+ }
+ if status.Genesis != genesis {
+ return errResp(ErrGenesisMismatch, "%x (!= %x)", status.Genesis, genesis)
+ }
+ if err := forkFilter(status.ForkID); err != nil {
+ return errResp(ErrForkIDRejected, "%v", err)
+ }
+ return nil
+}
+
// String implements fmt.Stringer.
func (p *peer) String() string {
return fmt.Sprintf("Peer %s [%s]", p.id,
@@ -540,6 +935,11 @@ func (ps *peerSet) Register(p *peer) error {
return p2p.ErrAddPairPeer
}
ps.peers[p.id] = p
+
+ go p.broadcastBlocks()
+ go p.broadcastTransactions()
+ go p.announceTransactions()
+
return nil
}
diff --git a/eth/protocol.go b/eth/protocol.go
index eb7297a28c10..b530f7237882 100644
--- a/eth/protocol.go
+++ b/eth/protocol.go
@@ -23,6 +23,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/forkid"
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/rlp"
@@ -30,28 +31,74 @@ import (
// Constants to match up protocol versions and messages
const (
- eth62 = 62
- eth63 = 63
- xdpos2 = 100
+ eth63 = 63
+ eth64 = 64
+ eth65 = 65
+ xdpos2 = 100 //xdpos2.1 = eth62+eth63
+ xdpos22 = 101 //xdpos2.2 = eth65
)
-// Official short name of the protocol used during capability negotiation.
-var ProtocolName = "eth"
+// XDC needs the below functions because direct number equality doesn't work (eg. version >= 63)
+// we should try to match protocols 1 to 1 from now on, bump xdpos along with any new eth (eg. eth66 = xdpos23 only)
+// try to follow the exact comparison from go-ethereum as much as possible (eg. version >= 63 <> isEth63OrHigher(version))
+
+func isEth63(version int) bool {
+ switch {
+ case version == 63:
+ return true
+ case version == 100:
+ return true
+ default:
+ return false
+ }
+}
+func isEth64(version int) bool {
+ switch {
+ case version == 64:
+ return true
+ default:
+ return false
+ }
+}
+func isEth65(version int) bool {
+ switch {
+ case version == 65:
+ return true
+ case version == 101:
+ return true
+ default:
+ return false
+ }
+}
+
+func isEth63OrHigher(version int) bool {
+ return isEth63(version) || isEth64(version) || isEth65(version)
+}
+
+func isEth64OrHigher(version int) bool {
+ return isEth64(version) || isEth65(version)
+}
+
+func isEth65OrHigher(version int) bool {
+ return isEth65(version)
+}
+
+// protocolName is the official short name of the protocol used during capability negotiation.
+const protocolName = "eth"
-// Supported versions of the eth protocol (first is primary).
-var ProtocolVersions = []uint{xdpos2, eth63, eth62}
+// ProtocolVersions are the supported versions of the eth protocol (first is primary).
+var ProtocolVersions = []uint{xdpos22, xdpos2, eth65, eth64, eth63}
-// Number of implemented message corresponding to different protocol versions.
-var ProtocolLengths = []uint64{227, 17, 8}
+// protocolLengths are the number of implemented message corresponding to different protocol versions.
+var protocolLengths = map[uint]uint64{xdpos22: 227, xdpos2: 227, eth65: 17, eth64: 17, eth63: 17}
-const ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
+const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
// eth protocol message codes
const (
- // Protocol messages belonging to eth/62
StatusMsg = 0x00
NewBlockHashesMsg = 0x01
- TxMsg = 0x02
+ TransactionMsg = 0x02
GetBlockHeadersMsg = 0x03
BlockHeadersMsg = 0x04
GetBlockBodiesMsg = 0x05
@@ -65,6 +112,14 @@ const (
GetReceiptsMsg = 0x0f
ReceiptsMsg = 0x10
+ // New protocol message codes introduced in eth65
+ //
+ // Previously these message ids were used by some legacy and unsupported
+ // eth protocols, reown them here.
+ NewPooledTransactionHashesMsg = 0x28 //originally 0x08 but clash with OrderTxMsg
+ GetPooledTransactionsMsg = 0x29 //originally 0x09 but clash with LendingTxMsg
+ PooledTransactionsMsg = 0x0a
+
// Protocol messages belonging to xdpos2/100
VoteMsg = 0xe0
TimeoutMsg = 0xe1
@@ -78,11 +133,11 @@ const (
ErrDecode
ErrInvalidMsgCode
ErrProtocolVersionMismatch
- ErrNetworkIdMismatch
- ErrGenesisBlockMismatch
+ ErrNetworkIDMismatch
+ ErrGenesisMismatch
+ ErrForkIDRejected
ErrNoStatusMsg
ErrExtraStatusMsg
- ErrSuspendedPeer
)
func (e errCode) String() string {
@@ -95,14 +150,22 @@ var errorToString = map[int]string{
ErrDecode: "Invalid message",
ErrInvalidMsgCode: "Invalid message code",
ErrProtocolVersionMismatch: "Protocol version mismatch",
- ErrNetworkIdMismatch: "NetworkId mismatch",
- ErrGenesisBlockMismatch: "Genesis block mismatch",
+ ErrNetworkIDMismatch: "Network ID mismatch",
+ ErrGenesisMismatch: "Genesis mismatch",
+ ErrForkIDRejected: "Fork ID rejected",
ErrNoStatusMsg: "No status message",
ErrExtraStatusMsg: "Extra status message",
- ErrSuspendedPeer: "Suspended peer",
}
type txPool interface {
+ // Has returns an indicator whether txpool has a transaction
+ // cached with the given hash.
+ Has(hash common.Hash) bool
+
+ // Get retrieves the transaction from local txpool with given
+ // tx hash.
+ Get(hash common.Hash) *types.Transaction
+
// AddRemotes should add the given transactions to the pool.
AddRemotes([]*types.Transaction) []error
@@ -141,8 +204,8 @@ type lendingPool interface {
SubscribeTxPreEvent(chan<- core.LendingTxPreEvent) event.Subscription
}
-// statusData is the network packet for the status message.
-type statusData struct {
+// statusData63 is the network packet for the status message for eth/63.
+type statusData63 struct {
ProtocolVersion uint32
NetworkId uint64
TD *big.Int
@@ -150,6 +213,16 @@ type statusData struct {
GenesisBlock common.Hash
}
+// statusData is the network packet for the status message for eth/64 and later.
+type statusData struct {
+ ProtocolVersion uint32
+ NetworkID uint64
+ TD *big.Int
+ Head common.Hash
+ Genesis common.Hash
+ ForkID forkid.ID
+}
+
// newBlockHashesData is the network packet for the block announcements.
type newBlockHashesData []struct {
Hash common.Hash // Hash of one particular block being announced
@@ -206,6 +279,19 @@ type newBlockData struct {
TD *big.Int
}
+// sanityCheck verifies that the values are reasonable, as a DoS protection
+func (request *newBlockData) sanityCheck() error {
+ if err := request.Block.SanityCheck(); err != nil {
+ return err
+ }
+ //TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times
+ // larger, it will still fit within 100 bits
+ if tdlen := request.TD.BitLen(); tdlen > 100 {
+ return fmt.Errorf("too large block TD: bitlen %d", tdlen)
+ }
+ return nil
+}
+
// blockBody represents the data content of a single block.
type blockBody struct {
Transactions []*types.Transaction // Transactions contained within a block
diff --git a/eth/protocol_test.go b/eth/protocol_test.go
index 8c5283cd8b18..0bfa26bb26ec 100644
--- a/eth/protocol_test.go
+++ b/eth/protocol_test.go
@@ -18,16 +18,26 @@ package eth
import (
"fmt"
+ "math/big"
"sync"
+ "sync/atomic"
"testing"
"time"
"github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/consensus/ethash"
+ "github.com/XinFinOrg/XDPoSChain/core"
+ "github.com/XinFinOrg/XDPoSChain/core/forkid"
+ "github.com/XinFinOrg/XDPoSChain/core/rawdb"
"github.com/XinFinOrg/XDPoSChain/core/types"
+ "github.com/XinFinOrg/XDPoSChain/core/vm"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/eth/downloader"
"github.com/XinFinOrg/XDPoSChain/eth/ethconfig"
+ "github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/p2p"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/params"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
@@ -38,10 +48,7 @@ func init() {
var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
// Tests that handshake failures are detected and reported correctly.
-func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) }
-func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) }
-
-func testStatusMsgErrors(t *testing.T, protocol int) {
+func TestStatusMsgErrors63(t *testing.T) {
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
var (
genesis = pm.blockchain.Genesis()
@@ -56,25 +63,24 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
wantError error
}{
{
- code: TxMsg, data: []interface{}{},
+ code: TransactionMsg, data: []interface{}{},
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
},
{
- code: StatusMsg, data: statusData{10, ethconfig.Defaults.NetworkId, td, head.Hash(), genesis.Hash()},
- wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", protocol),
+ code: StatusMsg, data: statusData63{10, ethconfig.Defaults.NetworkId, td, head.Hash(), genesis.Hash()},
+ wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 63),
},
{
- code: StatusMsg, data: statusData{uint32(protocol), 999, td, head.Hash(), genesis.Hash()},
- wantError: errResp(ErrNetworkIdMismatch, "999 (!= 88)"),
+ code: StatusMsg, data: statusData63{63, 999, td, head.Hash(), genesis.Hash()},
+ wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", ethconfig.Defaults.NetworkId),
},
{
- code: StatusMsg, data: statusData{uint32(protocol), ethconfig.Defaults.NetworkId, td, head.Hash(), common.Hash{3}},
- wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]),
+ code: StatusMsg, data: statusData63{63, ethconfig.Defaults.NetworkId, td, head.Hash(), common.Hash{3}},
+ wantError: errResp(ErrGenesisMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]),
},
}
-
for i, test := range tests {
- p, errc := newTestPeer("peer", protocol, pm, false)
+ p, errc := newTestPeer("peer", 63, pm, false)
// The send call might hang until reset because
// the protocol might not read the payload.
go p2p.Send(p.app, test.code, test.data)
@@ -93,9 +99,164 @@ func testStatusMsgErrors(t *testing.T, protocol int) {
}
}
+func TestStatusMsgErrors64(t *testing.T) {
+ pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
+ var (
+ genesis = pm.blockchain.Genesis()
+ head = pm.blockchain.CurrentHeader()
+ td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64())
+ forkID = forkid.NewID(pm.blockchain)
+ )
+ defer pm.Stop()
+
+ tests := []struct {
+ code uint64
+ data interface{}
+ wantError error
+ }{
+ {
+ code: TransactionMsg, data: []interface{}{},
+ wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
+ },
+ {
+ code: StatusMsg, data: statusData{10, ethconfig.Defaults.NetworkId, td, head.Hash(), genesis.Hash(), forkID},
+ wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 64),
+ },
+ {
+ code: StatusMsg, data: statusData{64, 999, td, head.Hash(), genesis.Hash(), forkID},
+ wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", ethconfig.Defaults.NetworkId),
+ },
+ {
+ code: StatusMsg, data: statusData{64, ethconfig.Defaults.NetworkId, td, head.Hash(), common.Hash{3}, forkID},
+ wantError: errResp(ErrGenesisMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis.Hash()),
+ },
+ {
+ code: StatusMsg, data: statusData{64, ethconfig.Defaults.NetworkId, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}},
+ wantError: errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()),
+ },
+ }
+ for i, test := range tests {
+ p, errc := newTestPeer("peer", 64, pm, false)
+ // The send call might hang until reset because
+ // the protocol might not read the payload.
+ go p2p.Send(p.app, test.code, test.data)
+
+ select {
+ case err := <-errc:
+ if err == nil {
+ t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError)
+ } else if err.Error() != test.wantError.Error() {
+ t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError)
+ }
+ case <-time.After(2 * time.Second):
+ t.Errorf("protocol did not shut down within 2 seconds")
+ }
+ p.close()
+ }
+}
+
+func TestForkIDSplit(t *testing.T) {
+ var (
+ engine = ethash.NewFaker()
+
+ configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)}
+ configProFork = ¶ms.ChainConfig{
+ HomesteadBlock: big.NewInt(1),
+ EIP150Block: big.NewInt(2),
+ EIP155Block: big.NewInt(2),
+ EIP158Block: big.NewInt(2),
+ ByzantiumBlock: big.NewInt(3),
+ }
+ dbNoFork = rawdb.NewMemoryDatabase()
+ dbProFork = rawdb.NewMemoryDatabase()
+
+ gspecNoFork = &core.Genesis{Config: configNoFork}
+ gspecProFork = &core.Genesis{Config: configProFork}
+
+ genesisNoFork = gspecNoFork.MustCommit(dbNoFork)
+ genesisProFork = gspecProFork.MustCommit(dbProFork)
+
+ chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{})
+ chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{})
+
+ blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
+ blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
+
+ ethNoFork, _ = NewProtocolManager(configNoFork, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainNoFork, dbNoFork)
+ ethProFork, _ = NewProtocolManager(configProFork, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainProFork, dbProFork)
+ )
+ ethNoFork.Start(1000)
+ ethProFork.Start(1000)
+
+ // Both nodes should allow the other to connect (same genesis, next fork is the same)
+ p2pNoFork, p2pProFork := p2p.MsgPipe()
+ peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
+ peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
+
+ errc := make(chan error, 2)
+ go func() { errc <- ethNoFork.handle(peerProFork) }()
+ go func() { errc <- ethProFork.handle(peerNoFork) }()
+
+ select {
+ case err := <-errc:
+ t.Fatalf("frontier nofork <-> profork failed: %v", err)
+ case <-time.After(250 * time.Millisecond):
+ p2pNoFork.Close()
+ p2pProFork.Close()
+ }
+ // Progress into Homestead. Fork's match, so we don't care what the future holds
+ chainNoFork.InsertChain(blocksNoFork[:1])
+ chainProFork.InsertChain(blocksProFork[:1])
+
+ p2pNoFork, p2pProFork = p2p.MsgPipe()
+ peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
+ peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
+
+ errc = make(chan error, 2)
+ go func() { errc <- ethNoFork.handle(peerProFork) }()
+ go func() { errc <- ethProFork.handle(peerNoFork) }()
+
+ select {
+ case err := <-errc:
+ t.Fatalf("homestead nofork <-> profork failed: %v", err)
+ case <-time.After(250 * time.Millisecond):
+ p2pNoFork.Close()
+ p2pProFork.Close()
+ }
+ // Progress into Spurious. Forks mismatch, signalling differing chains, reject
+ chainNoFork.InsertChain(blocksNoFork[1:2])
+ chainProFork.InsertChain(blocksProFork[1:2])
+
+ p2pNoFork, p2pProFork = p2p.MsgPipe()
+ peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
+ peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
+
+ errc = make(chan error, 2)
+ go func() { errc <- ethNoFork.handle(peerProFork) }()
+ go func() { errc <- ethProFork.handle(peerNoFork) }()
+
+ var successes int
+ for i := 0; i < 2; i++ {
+ select {
+ case err := <-errc:
+ if err == nil {
+ successes++
+ if successes == 2 { // Only one side disconnects
+ t.Fatalf("fork ID rejection didn't happen")
+ }
+ }
+ case <-time.After(250 * time.Millisecond):
+ t.Fatalf("split peers not rejected")
+ }
+ }
+}
+
// This test checks that received transactions are added to the local pool.
-func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) }
-func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
+func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
+func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
+func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, 65) }
+func TestRecvTransactions100(t *testing.T) { testRecvTransactions(t, 100) }
+func TestRecvTransactions101(t *testing.T) { testRecvTransactions(t, 101) }
func testRecvTransactions(t *testing.T, protocol int) {
txAdded := make(chan []*types.Transaction)
@@ -106,7 +267,7 @@ func testRecvTransactions(t *testing.T, protocol int) {
defer p.close()
tx := newTestTransaction(testAccount, 0, 0)
- if err := p2p.Send(p.app, TxMsg, []interface{}{tx}); err != nil {
+ if err := p2p.Send(p.app, TransactionMsg, []interface{}{tx}); err != nil {
t.Fatalf("send error: %v", err)
}
select {
@@ -122,20 +283,26 @@ func testRecvTransactions(t *testing.T, protocol int) {
}
// This test checks that pending transactions are sent.
-func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) }
-func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
+func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
+func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
+func TestSendTransactions65(t *testing.T) { testSendTransactions(t, 65) }
+func TestSendTransactions100(t *testing.T) { testSendTransactions(t, 100) }
+func TestSendTransactions101(t *testing.T) { testSendTransactions(t, 101) }
func testSendTransactions(t *testing.T, protocol int) {
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
defer pm.Stop()
- // Fill the pool with big transactions.
+ // Fill the pool with big transactions (use a subscription to wait until all
+ // the transactions are announced to avoid spurious events causing extra
+ // broadcasts).
const txsize = txsyncPackSize / 10
alltxs := make([]*types.Transaction, 100)
for nonce := range alltxs {
alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize)
}
pm.txpool.AddRemotes(alltxs)
+ time.Sleep(100 * time.Millisecond) // Wait until new tx even gets out of the system (lame)
// Connect several peers. They should all receive the pending transactions.
var wg sync.WaitGroup
@@ -147,18 +314,50 @@ func testSendTransactions(t *testing.T, protocol int) {
seen[tx.Hash()] = false
}
for n := 0; n < len(alltxs) && !t.Failed(); {
- var txs []*types.Transaction
- msg, err := p.app.ReadMsg()
- if err != nil {
- t.Errorf("%v: read error: %v", p.Peer, err)
- } else if msg.Code != TxMsg {
- t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code)
- }
- if err := msg.Decode(&txs); err != nil {
- t.Errorf("%v: %v", p.Peer, err)
+ var forAllHashes func(callback func(hash common.Hash))
+ switch {
+ case isEth63(protocol):
+ fallthrough
+ case isEth64(protocol):
+ msg, err := p.app.ReadMsg()
+ if err != nil {
+ t.Errorf("%v: read error: %v", p.Peer, err)
+ continue
+ } else if msg.Code != TransactionMsg {
+ t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code)
+ continue
+ }
+ var txs []*types.Transaction
+ if err := msg.Decode(&txs); err != nil {
+ t.Errorf("%v: %v", p.Peer, err)
+ continue
+ }
+ forAllHashes = func(callback func(hash common.Hash)) {
+ for _, tx := range txs {
+ callback(tx.Hash())
+ }
+ }
+ case isEth65(protocol):
+ msg, err := p.app.ReadMsg()
+ if err != nil {
+ t.Errorf("%v: read error: %v", p.Peer, err)
+ continue
+ } else if msg.Code != NewPooledTransactionHashesMsg {
+ t.Errorf("%v: got code %d, want NewPooledTransactionHashesMsg", p.Peer, msg.Code)
+ continue
+ }
+ var hashes []common.Hash
+ if err := msg.Decode(&hashes); err != nil {
+ t.Errorf("%v: %v", p.Peer, err)
+ continue
+ }
+ forAllHashes = func(callback func(hash common.Hash)) {
+ for _, h := range hashes {
+ callback(h)
+ }
+ }
}
- for _, tx := range txs {
- hash := tx.Hash()
+ forAllHashes(func(hash common.Hash) {
seentx, want := seen[hash]
if seentx {
t.Errorf("%v: got tx more than once: %x", p.Peer, hash)
@@ -168,7 +367,7 @@ func testSendTransactions(t *testing.T, protocol int) {
}
seen[hash] = true
n++
- }
+ })
}
}
for i := 0; i < 3; i++ {
@@ -178,6 +377,52 @@ func testSendTransactions(t *testing.T, protocol int) {
}
wg.Wait()
}
+func TestTransactionPropagation(t *testing.T) { testSyncTransaction(t, true) }
+func TestTransactionAnnouncement(t *testing.T) { testSyncTransaction(t, false) }
+
+func testSyncTransaction(t *testing.T, propagtion bool) {
+ // Create a protocol manager for transaction fetcher and sender
+ pmFetcher, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil)
+ defer pmFetcher.Stop()
+ pmSender, _ := newTestProtocolManagerMust(t, downloader.FastSync, 1024, nil, nil)
+ pmSender.broadcastTxAnnouncesOnly = !propagtion
+ defer pmSender.Stop()
+
+ // Sync up the two peers
+ io1, io2 := p2p.MsgPipe()
+
+ go pmSender.handle(pmSender.newPeer(65, p2p.NewPeer(enode.ID{}, "sender", nil), io2, pmSender.txpool.Get))
+ go pmFetcher.handle(pmFetcher.newPeer(65, p2p.NewPeer(enode.ID{}, "fetcher", nil), io1, pmFetcher.txpool.Get))
+
+ time.Sleep(250 * time.Millisecond)
+ pmFetcher.synchronise(pmFetcher.peers.BestPeer())
+ atomic.StoreUint32(&pmFetcher.acceptTxs, 1)
+
+ newTxs := make(chan core.NewTxsEvent, 1024)
+ sub := pmFetcher.txpool.SubscribeNewTxsEvent(newTxs)
+ defer sub.Unsubscribe()
+
+ // Fill the pool with new transactions
+ alltxs := make([]*types.Transaction, 1024)
+ for nonce := range alltxs {
+ alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), 0)
+ }
+ pmSender.txpool.AddRemotes(alltxs)
+
+ var got int
+loop:
+ for {
+ select {
+ case ev := <-newTxs:
+ got += len(ev.Txs)
+ if got == 1024 {
+ break loop
+ }
+ case <-time.NewTimer(time.Second).C:
+ t.Fatal("Failed to retrieve all transaction")
+ }
+ }
+}
// Tests that the custom union field encoder and decoder works correctly.
func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
diff --git a/eth/sync.go b/eth/sync.go
index cbe6d421c8bb..fa4a9224a31d 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -25,7 +25,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/core/types"
"github.com/XinFinOrg/XDPoSChain/eth/downloader"
"github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
)
const (
@@ -44,6 +44,12 @@ type txsync struct {
// syncTransactions starts sending all currently pending transactions to the given peer.
func (pm *ProtocolManager) syncTransactions(p *peer) {
+ // Assemble the set of transaction to broadcast or announce to the remote
+ // peer. Fun fact, this is quite an expensive operation as it needs to sort
+ // the transactions if the sorting is not cached yet. However, with a random
+ // order, insertions could overflow the non-executable queues and get dropped.
+ //
+ // TODO(karalabe): Figure out if we could get away with random order somehow
var txs types.Transactions
pending, _ := pm.txpool.Pending()
for _, batch := range pending {
@@ -52,26 +58,40 @@ func (pm *ProtocolManager) syncTransactions(p *peer) {
if len(txs) == 0 {
return
}
+ // The eth/65 protocol introduces proper transaction announcements, so instead
+ // of dripping transactions across multiple peers, just send the entire list as
+ // an announcement and let the remote side decide what they need (likely nothing).
+ if isEth65OrHigher(p.version) {
+ hashes := make([]common.Hash, len(txs))
+ for i, tx := range txs {
+ hashes[i] = tx.Hash()
+ }
+ p.AsyncSendPooledTransactionHashes(hashes)
+ return
+ }
+ // Out of luck, peer is running legacy protocols, drop the txs over
select {
- case pm.txsyncCh <- &txsync{p, txs}:
+ case pm.txsyncCh <- &txsync{p: p, txs: txs}:
case <-pm.quitSync:
}
}
-// txsyncLoop takes care of the initial transaction sync for each new
+// txsyncLoop64 takes care of the initial transaction sync for each new
// connection. When a new peer appears, we relay all currently pending
// transactions. In order to minimise egress bandwidth usage, we send
// the transactions in small packs to one peer at a time.
-func (pm *ProtocolManager) txsyncLoop() {
+func (pm *ProtocolManager) txsyncLoop64() {
var (
- pending = make(map[discover.NodeID]*txsync)
+ pending = make(map[enode.ID]*txsync)
sending = false // whether a send is active
pack = new(txsync) // the pack that is being sent
done = make(chan error, 1) // result of the send
)
-
// send starts a sending a pack of transactions from the sync.
send := func(s *txsync) {
+ if isEth65OrHigher(s.p.version) {
+ panic("initial transaction syncer running on eth/65+")
+ }
// Fill pack with transactions up to the target size.
size := common.StorageSize(0)
pack.p = s.p
@@ -88,7 +108,7 @@ func (pm *ProtocolManager) txsyncLoop() {
// Send the pack in the background.
s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
sending = true
- go func() { done <- pack.p.SendTransactions(pack.txs) }()
+ go func() { done <- pack.p.SendTransactions64(pack.txs) }()
}
// pick chooses the next pending sync.
@@ -133,9 +153,11 @@ func (pm *ProtocolManager) txsyncLoop() {
// downloading hashes and blocks as well as handling the announcement handler.
func (pm *ProtocolManager) syncer() {
// Start and ensure cleanup of sync mechanisms
- pm.fetcher.Start()
+ pm.blockFetcher.Start()
+ pm.txFetcher.Start()
pm.bft.Start()
- defer pm.fetcher.Stop()
+ defer pm.blockFetcher.Stop()
+ defer pm.txFetcher.Stop()
defer pm.bft.Stop()
defer pm.downloader.Terminate()
diff --git a/eth/sync_test.go b/eth/sync_test.go
index cd5b85e94155..20a51723d39a 100644
--- a/eth/sync_test.go
+++ b/eth/sync_test.go
@@ -23,12 +23,18 @@ import (
"github.com/XinFinOrg/XDPoSChain/eth/downloader"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
)
+func TestFastSyncDisabling63(t *testing.T) { testFastSyncDisabling(t, 63) }
+func TestFastSyncDisabling64(t *testing.T) { testFastSyncDisabling(t, 64) }
+func TestFastSyncDisabling65(t *testing.T) { testFastSyncDisabling(t, 65) }
+func TestFastSyncDisabling100(t *testing.T) { testFastSyncDisabling(t, 100) }
+func TestFastSyncDisabling101(t *testing.T) { testFastSyncDisabling(t, 101) }
+
// Tests that fast sync gets disabled as soon as a real block is successfully
// imported into the blockchain.
-func TestFastSyncDisabling(t *testing.T) {
+func testFastSyncDisabling(t *testing.T, protocol int) {
// Create a pristine protocol manager, check that fast sync is left enabled
pmEmpty, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil)
if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
@@ -42,8 +48,8 @@ func TestFastSyncDisabling(t *testing.T) {
// Sync up the two peers
io1, io2 := p2p.MsgPipe()
- go pmFull.handle(pmFull.newPeer(63, p2p.NewPeer(discover.NodeID{}, "empty", nil), io2))
- go pmEmpty.handle(pmEmpty.newPeer(63, p2p.NewPeer(discover.NodeID{}, "full", nil), io1))
+ go pmFull.handle(pmFull.newPeer(protocol, p2p.NewPeer(enode.ID{}, "empty", nil), io2, pmFull.txpool.Get))
+ go pmEmpty.handle(pmEmpty.newPeer(protocol, p2p.NewPeer(enode.ID{}, "full", nil), io1, pmEmpty.txpool.Get))
time.Sleep(250 * time.Millisecond)
pmEmpty.synchronise(pmEmpty.peers.BestPeer())
diff --git a/fuzzbuzz.yaml b/fuzzbuzz.yaml
new file mode 100644
index 000000000000..2a4f0c296fe7
--- /dev/null
+++ b/fuzzbuzz.yaml
@@ -0,0 +1,44 @@
+# bmt keystore rlp trie whisperv6
+
+base: ubuntu:16.04
+targets:
+ - name: rlp
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/rlp/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/rlp
+ checkout: github.com/ethereum/go-ethereum/
+ - name: keystore
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/keystore/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/keystore
+ checkout: github.com/ethereum/go-ethereum/
+ - name: trie
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/trie/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/trie
+ checkout: github.com/ethereum/go-ethereum/
+ - name: txfetcher
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/txfetcher/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/txfetcher
+ checkout: github.com/ethereum/go-ethereum/
+ - name: whisperv6
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/whisperv6/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/whisperv6
+ checkout: github.com/ethereum/go-ethereum/
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index dbb3f41df4ad..57917263e2f2 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -186,6 +186,16 @@ web3._extend({
call: 'admin_removePeer',
params: 1
}),
+ new web3._extend.Method({
+ name: 'addTrustedPeer',
+ call: 'admin_addTrustedPeer',
+ params: 1
+ }),
+ new web3._extend.Method({
+ name: 'removeTrustedPeer',
+ call: 'admin_removeTrustedPeer',
+ params: 1
+ }),
new web3._extend.Method({
name: 'exportChain',
call: 'admin_exportChain',
diff --git a/les/backend.go b/les/backend.go
index 13bc90a3b07e..304dfc7bd331 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -112,7 +112,6 @@ func New(ctx *node.ServiceContext, config *ethconfig.Config) (*LightEthereum, er
}
leth.relay = NewLesTxRelay(peers, leth.reqDist)
- leth.serverPool = newServerPool(chainDb, quitSync, &leth.wg)
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool)
leth.odr = NewLesOdr(chainDb, leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer, leth.retriever)
if leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine); err != nil {
diff --git a/les/handler.go b/les/handler.go
index 6a4ba688ea3b..d7c3e84302be 100644
--- a/les/handler.go
+++ b/les/handler.go
@@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"math/big"
- "net"
"sync"
"time"
@@ -40,7 +39,6 @@ import (
"github.com/XinFinOrg/XDPoSChain/light"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
"github.com/XinFinOrg/XDPoSChain/p2p/discv5"
"github.com/XinFinOrg/XDPoSChain/params"
"github.com/XinFinOrg/XDPoSChain/rlp"
@@ -167,8 +165,7 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protoco
var entry *poolEntry
peer := manager.newPeer(int(version), networkId, p, rw)
if manager.serverPool != nil {
- addr := p.RemoteAddr().(*net.TCPAddr)
- entry = manager.serverPool.connect(peer, addr.IP, uint16(addr.Port))
+ entry = manager.serverPool.connect(peer, peer.Node())
}
peer.poolEntry = entry
select {
@@ -190,12 +187,6 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protoco
NodeInfo: func() interface{} {
return manager.NodeInfo()
},
- PeerInfo: func(id discover.NodeID) interface{} {
- if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
- return p.Info()
- }
- return nil
- },
})
}
if len(manager.SubProtocols) == 0 {
@@ -394,9 +385,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
-
+ if err := req.sanityCheck(); err != nil {
+ return err
+ }
if p.requestAnnounceType == announceTypeSigned {
- if err := req.checkSignature(p.pubKey); err != nil {
+ if err := req.checkSignature(p.ID()); err != nil {
p.Log().Trace("Invalid announcement signature", "err", err)
return err
}
diff --git a/les/helper_test.go b/les/helper_test.go
index 19e054626a0d..62a291db7279 100644
--- a/les/helper_test.go
+++ b/les/helper_test.go
@@ -37,7 +37,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/les/flowcontrol"
"github.com/XinFinOrg/XDPoSChain/light"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/params"
)
@@ -223,7 +223,7 @@ func newTestPeer(t *testing.T, name string, version int, pm *ProtocolManager, sh
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
- var id discover.NodeID
+ var id enode.ID
rand.Read(id[:])
peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
@@ -260,7 +260,7 @@ func newTestPeerPair(name string, version int, pm, pm2 *ProtocolManager) (*peer,
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
- var id discover.NodeID
+ var id enode.ID
rand.Read(id[:])
peer := pm.newPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
diff --git a/les/peer.go b/les/peer.go
index cbc7b9957020..3162517cd2bf 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -18,7 +18,6 @@
package les
import (
- "crypto/ecdsa"
"encoding/binary"
"errors"
"fmt"
@@ -51,7 +50,6 @@ const (
type peer struct {
*p2p.Peer
- pubKey *ecdsa.PublicKey
rw p2p.MsgReadWriter
@@ -80,11 +78,9 @@ type peer struct {
func newPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID()
- pubKey, _ := id.Pubkey()
return &peer{
Peer: p,
- pubKey: pubKey,
rw: rw,
version: version,
network: network,
diff --git a/les/protocol.go b/les/protocol.go
index 273ccfcce974..c3ef953051fc 100644
--- a/les/protocol.go
+++ b/les/protocol.go
@@ -18,9 +18,7 @@
package les
import (
- "bytes"
"crypto/ecdsa"
- "crypto/elliptic"
"errors"
"fmt"
"io"
@@ -29,7 +27,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/core"
"github.com/XinFinOrg/XDPoSChain/crypto"
- "github.com/XinFinOrg/XDPoSChain/crypto/secp256k1"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
@@ -139,6 +137,14 @@ type announceData struct {
Update keyValueList
}
+// sanityCheck verifies that the values are reasonable, as a DoS protection
+func (a *announceData) sanityCheck() error {
+ if tdlen := a.Td.BitLen(); tdlen > 100 {
+ return fmt.Errorf("too large block TD: bitlen %d", tdlen)
+ }
+ return nil
+}
+
// sign adds a signature to the block announcement by the given privKey
func (a *announceData) sign(privKey *ecdsa.PrivateKey) {
rlp, _ := rlp.EncodeToBytes(announceBlock{a.Hash, a.Number, a.Td})
@@ -147,22 +153,20 @@ func (a *announceData) sign(privKey *ecdsa.PrivateKey) {
}
// checkSignature verifies if the block announcement has a valid signature by the given pubKey
-func (a *announceData) checkSignature(pubKey *ecdsa.PublicKey) error {
+func (a *announceData) checkSignature(id enode.ID) error {
var sig []byte
if err := a.Update.decode().get("sign", &sig); err != nil {
return err
}
rlp, _ := rlp.EncodeToBytes(announceBlock{a.Hash, a.Number, a.Td})
- recPubkey, err := secp256k1.RecoverPubkey(crypto.Keccak256(rlp), sig)
+ recPubkey, err := crypto.SigToPub(crypto.Keccak256(rlp), sig)
if err != nil {
return err
}
- pbytes := elliptic.Marshal(pubKey.Curve, pubKey.X, pubKey.Y)
- if bytes.Equal(pbytes, recPubkey) {
+ if id == enode.PubkeyToIDV4(recPubkey) {
return nil
- } else {
- return errors.New("Wrong signature")
}
+ return errors.New("wrong signature")
}
type blockInfo struct {
diff --git a/les/serverpool.go b/les/serverpool.go
index 0b17f4b63872..4dc64d987940 100644
--- a/les/serverpool.go
+++ b/les/serverpool.go
@@ -14,10 +14,10 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// Package les implements the Light Ethereum Subprotocol.
package les
import (
+ "crypto/ecdsa"
"fmt"
"io"
"math"
@@ -28,11 +28,12 @@ import (
"time"
"github.com/XinFinOrg/XDPoSChain/common/mclock"
+ "github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/ethdb"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
"github.com/XinFinOrg/XDPoSChain/p2p/discv5"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
@@ -73,7 +74,6 @@ const (
// and a short term value which is adjusted exponentially with a factor of
// pstatRecentAdjust with each dial/connection and also returned exponentially
// to the average with the time constant pstatReturnToMeanTC
- pstatRecentAdjust = 0.1
pstatReturnToMeanTC = time.Hour
// node address selection weight is dropped by a factor of exp(-addrFailDropLn) after
// each unsuccessful connection (restored after a successful one)
@@ -83,14 +83,31 @@ const (
responseScoreTC = time.Millisecond * 100
delayScoreTC = time.Second * 5
timeoutPow = 10
- // peerSelectMinWeight is added to calculated weights at request peer selection
- // to give poorly performing peers a little chance of coming back
- peerSelectMinWeight = 0.005
// initStatsWeight is used to initialize previously unknown peers with good
// statistics to give a chance to prove themselves
initStatsWeight = 1
)
+// connReq represents a request for peer connection.
+type connReq struct {
+ p *peer
+ node *enode.Node
+ result chan *poolEntry
+}
+
+// disconnReq represents a request for peer disconnection.
+type disconnReq struct {
+ entry *poolEntry
+ stopped bool
+ done chan struct{}
+}
+
+// registerReq represents a request for peer registration.
+type registerReq struct {
+ entry *poolEntry
+ done chan struct{}
+}
+
// serverPool implements a pool for storing and selecting newly discovered and already
// known light server nodes. It received discovered nodes, stores statistics about
// known nodes and takes care of always having enough good quality servers connected.
@@ -98,18 +115,16 @@ type serverPool struct {
db ethdb.Database
dbKey []byte
server *p2p.Server
- quit chan struct{}
- wg *sync.WaitGroup
connWg sync.WaitGroup
topic discv5.Topic
discSetPeriod chan time.Duration
- discNodes chan *discv5.Node
+ discNodes chan *enode.Node
discLookups chan bool
- entries map[discover.NodeID]*poolEntry
- lock sync.Mutex
+ trustedNodes map[enode.ID]*enode.Node
+ entries map[enode.ID]*poolEntry
timeout, enableRetry chan *poolEntry
adjustStats chan poolStatAdjust
@@ -117,22 +132,32 @@ type serverPool struct {
knownSelect, newSelect *weightedRandomSelect
knownSelected, newSelected int
fastDiscover bool
+ connCh chan *connReq
+ disconnCh chan *disconnReq
+ registerCh chan *registerReq
+
+ closeCh chan struct{}
+ wg sync.WaitGroup
}
// newServerPool creates a new serverPool instance
-func newServerPool(db ethdb.Database, quit chan struct{}, wg *sync.WaitGroup) *serverPool {
+func newServerPool(db ethdb.Database, ulcServers []string) *serverPool {
pool := &serverPool{
db: db,
- quit: quit,
- wg: wg,
- entries: make(map[discover.NodeID]*poolEntry),
+ entries: make(map[enode.ID]*poolEntry),
timeout: make(chan *poolEntry, 1),
adjustStats: make(chan poolStatAdjust, 100),
enableRetry: make(chan *poolEntry, 1),
+ connCh: make(chan *connReq),
+ disconnCh: make(chan *disconnReq),
+ registerCh: make(chan *registerReq),
+ closeCh: make(chan struct{}),
knownSelect: newWeightedRandomSelect(),
newSelect: newWeightedRandomSelect(),
fastDiscover: true,
+ trustedNodes: parseTrustedNodes(ulcServers),
}
+
pool.knownQueue = newPoolEntryQueue(maxKnownEntries, pool.removeEntry)
pool.newQueue = newPoolEntryQueue(maxNewEntries, pool.removeEntry)
return pool
@@ -142,18 +167,52 @@ func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) {
pool.server = server
pool.topic = topic
pool.dbKey = append([]byte("serverPool/"), []byte(topic)...)
- pool.wg.Add(1)
pool.loadNodes()
+ pool.connectToTrustedNodes()
if pool.server.DiscV5 != nil {
pool.discSetPeriod = make(chan time.Duration, 1)
- pool.discNodes = make(chan *discv5.Node, 100)
+ pool.discNodes = make(chan *enode.Node, 100)
pool.discLookups = make(chan bool, 100)
- go pool.server.DiscV5.SearchTopic(pool.topic, pool.discSetPeriod, pool.discNodes, pool.discLookups)
+ go pool.discoverNodes()
}
-
- go pool.eventLoop()
pool.checkDial()
+ pool.wg.Add(1)
+ go pool.eventLoop()
+
+ // Inject the bootstrap nodes as initial dial candiates.
+ pool.wg.Add(1)
+ go func() {
+ defer pool.wg.Done()
+ for _, n := range server.BootstrapNodes {
+ select {
+ case pool.discNodes <- n:
+ case <-pool.closeCh:
+ return
+ }
+ }
+ }()
+}
+
+func (pool *serverPool) stop() {
+ close(pool.closeCh)
+ pool.wg.Wait()
+}
+
+// discoverNodes wraps SearchTopic, converting result nodes to enode.Node.
+func (pool *serverPool) discoverNodes() {
+ ch := make(chan *discv5.Node)
+ go func() {
+ pool.server.DiscV5.SearchTopic(pool.topic, pool.discSetPeriod, ch, pool.discLookups)
+ close(ch)
+ }()
+ for n := range ch {
+ pubkey, err := decodePubkey64(n.ID[:])
+ if err != nil {
+ continue
+ }
+ pool.discNodes <- enode.NewV4(pubkey, n.IP, int(n.TCP), int(n.UDP))
+ }
}
// connect should be called upon any incoming connection. If the connection has been
@@ -161,84 +220,45 @@ func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) {
// Otherwise, the connection should be rejected.
// Note that whenever a connection has been accepted and a pool entry has been returned,
// disconnect should also always be called.
-func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
- pool.lock.Lock()
- defer pool.lock.Unlock()
- entry := pool.entries[p.ID()]
- if entry == nil {
- entry = pool.findOrNewNode(p.ID(), ip, port)
- }
- p.Log().Debug("Connecting to new peer", "state", entry.state)
- if entry.state == psConnected || entry.state == psRegistered {
+func (pool *serverPool) connect(p *peer, node *enode.Node) *poolEntry {
+ log.Debug("Connect new entry", "enode", p.id)
+ req := &connReq{p: p, node: node, result: make(chan *poolEntry, 1)}
+ select {
+ case pool.connCh <- req:
+ case <-pool.closeCh:
return nil
}
- pool.connWg.Add(1)
- entry.peer = p
- entry.state = psConnected
- addr := &poolEntryAddress{
- ip: ip,
- port: port,
- lastSeen: mclock.Now(),
- }
- entry.lastConnected = addr
- entry.addr = make(map[string]*poolEntryAddress)
- entry.addr[addr.strKey()] = addr
- entry.addrSelect = *newWeightedRandomSelect()
- entry.addrSelect.update(addr)
- return entry
+ return <-req.result
}
// registered should be called after a successful handshake
func (pool *serverPool) registered(entry *poolEntry) {
- log.Debug("Registered new entry", "enode", entry.id)
- pool.lock.Lock()
- defer pool.lock.Unlock()
-
- entry.state = psRegistered
- entry.regTime = mclock.Now()
- if !entry.known {
- pool.newQueue.remove(entry)
- entry.known = true
+ log.Debug("Registered new entry", "enode", entry.node.ID())
+ req := ®isterReq{entry: entry, done: make(chan struct{})}
+ select {
+ case pool.registerCh <- req:
+ case <-pool.closeCh:
+ return
}
- pool.knownQueue.setLatest(entry)
- entry.shortRetry = shortRetryCnt
+ <-req.done
}
// disconnect should be called when ending a connection. Service quality statistics
// can be updated optionally (not updated if no registration happened, in this case
// only connection statistics are updated, just like in case of timeout)
func (pool *serverPool) disconnect(entry *poolEntry) {
- log.Debug("Disconnected old entry", "enode", entry.id)
- pool.lock.Lock()
- defer pool.lock.Unlock()
-
- if entry.state == psRegistered {
- connTime := mclock.Now() - entry.regTime
- connAdjust := float64(connTime) / float64(targetConnTime)
- if connAdjust > 1 {
- connAdjust = 1
- }
- stopped := false
- select {
- case <-pool.quit:
- stopped = true
- default:
- }
- if stopped {
- entry.connectStats.add(1, connAdjust)
- } else {
- entry.connectStats.add(connAdjust, 1)
- }
+ stopped := false
+ select {
+ case <-pool.closeCh:
+ stopped = true
+ default:
}
+ log.Debug("Disconnected old entry", "enode", entry.node.ID())
+ req := &disconnReq{entry: entry, stopped: stopped, done: make(chan struct{})}
- entry.state = psNotConnected
- if entry.knownSelected {
- pool.knownSelected--
- } else {
- pool.newSelected--
- }
- pool.setRetryDial(entry)
- pool.connWg.Done()
+ // Block until disconnection request is served.
+ pool.disconnCh <- req
+ <-req.done
}
const (
@@ -276,30 +296,57 @@ func (pool *serverPool) adjustResponseTime(entry *poolEntry, time time.Duration,
// eventLoop handles pool events and mutex locking for all internal functions
func (pool *serverPool) eventLoop() {
+ defer pool.wg.Done()
lookupCnt := 0
var convTime mclock.AbsTime
if pool.discSetPeriod != nil {
pool.discSetPeriod <- time.Millisecond * 100
}
+
+ // disconnect updates service quality statistics depending on the connection time
+ // and disconnection initiator.
+ disconnect := func(req *disconnReq, stopped bool) {
+ // Handle peer disconnection requests.
+ entry := req.entry
+ if entry.state == psRegistered {
+ connAdjust := float64(mclock.Now()-entry.regTime) / float64(targetConnTime)
+ if connAdjust > 1 {
+ connAdjust = 1
+ }
+ if stopped {
+ // disconnect requested by ourselves.
+ entry.connectStats.add(1, connAdjust)
+ } else {
+ // disconnect requested by server side.
+ entry.connectStats.add(connAdjust, 1)
+ }
+ }
+ entry.state = psNotConnected
+
+ if entry.knownSelected {
+ pool.knownSelected--
+ } else {
+ pool.newSelected--
+ }
+ pool.setRetryDial(entry)
+ pool.connWg.Done()
+ close(req.done)
+ }
+
for {
select {
case entry := <-pool.timeout:
- pool.lock.Lock()
if !entry.removed {
pool.checkDialTimeout(entry)
}
- pool.lock.Unlock()
case entry := <-pool.enableRetry:
- pool.lock.Lock()
if !entry.removed {
entry.delayedRetry = false
pool.updateCheckDial(entry)
}
- pool.lock.Unlock()
case adj := <-pool.adjustStats:
- pool.lock.Lock()
switch adj.adjustType {
case pseBlockDelay:
adj.entry.delayStats.add(float64(adj.time), 1)
@@ -309,13 +356,12 @@ func (pool *serverPool) eventLoop() {
case pseResponseTimeout:
adj.entry.timeoutStats.add(1, 1)
}
- pool.lock.Unlock()
case node := <-pool.discNodes:
- pool.lock.Lock()
- entry := pool.findOrNewNode(discover.NodeID(node.ID), node.IP, node.TCP)
- pool.updateCheckDial(entry)
- pool.lock.Unlock()
+ if pool.trustedNodes[node.ID()] == nil {
+ entry := pool.findOrNewNode(node)
+ pool.updateCheckDial(entry)
+ }
case conv := <-pool.discLookups:
if conv {
@@ -331,31 +377,92 @@ func (pool *serverPool) eventLoop() {
}
}
- case <-pool.quit:
+ case req := <-pool.connCh:
+ if pool.trustedNodes[req.p.ID()] != nil {
+ // ignore trusted nodes
+ req.result <- &poolEntry{trusted: true}
+ } else {
+ // Handle peer connection requests.
+ entry := pool.entries[req.p.ID()]
+ if entry == nil {
+ entry = pool.findOrNewNode(req.node)
+ }
+ if entry.state == psConnected || entry.state == psRegistered {
+ req.result <- nil
+ continue
+ }
+ pool.connWg.Add(1)
+ entry.peer = req.p
+ entry.state = psConnected
+ addr := &poolEntryAddress{
+ ip: req.node.IP(),
+ port: uint16(req.node.TCP()),
+ lastSeen: mclock.Now(),
+ }
+ entry.lastConnected = addr
+ entry.addr = make(map[string]*poolEntryAddress)
+ entry.addr[addr.strKey()] = addr
+ entry.addrSelect = *newWeightedRandomSelect()
+ entry.addrSelect.update(addr)
+ req.result <- entry
+ }
+
+ case req := <-pool.registerCh:
+ if req.entry.trusted {
+ continue
+ }
+ // Handle peer registration requests.
+ entry := req.entry
+ entry.state = psRegistered
+ entry.regTime = mclock.Now()
+ if !entry.known {
+ pool.newQueue.remove(entry)
+ entry.known = true
+ }
+ pool.knownQueue.setLatest(entry)
+ entry.shortRetry = shortRetryCnt
+ close(req.done)
+
+ case req := <-pool.disconnCh:
+ if req.entry.trusted {
+ continue
+ }
+ // Handle peer disconnection requests.
+ disconnect(req, req.stopped)
+
+ case <-pool.closeCh:
if pool.discSetPeriod != nil {
close(pool.discSetPeriod)
}
- pool.connWg.Wait()
+
+ // Spawn a goroutine to close the disconnCh after all connections are disconnected.
+ go func() {
+ pool.connWg.Wait()
+ close(pool.disconnCh)
+ }()
+
+ // Handle all remaining disconnection requests before exit.
+ for req := range pool.disconnCh {
+ disconnect(req, true)
+ }
pool.saveNodes()
- pool.wg.Done()
return
-
}
}
}
-func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16) *poolEntry {
+func (pool *serverPool) findOrNewNode(node *enode.Node) *poolEntry {
now := mclock.Now()
- entry := pool.entries[id]
+ entry := pool.entries[node.ID()]
if entry == nil {
- log.Debug("Discovered new entry", "id", id)
+ log.Debug("Discovered new entry", "id", node.ID())
entry = &poolEntry{
- id: id,
+ node: node,
addr: make(map[string]*poolEntryAddress),
addrSelect: *newWeightedRandomSelect(),
shortRetry: shortRetryCnt,
}
- pool.entries[id] = entry
+ pool.entries[node.ID()] = entry
// initialize previously unknown peers with good statistics to give a chance to prove themselves
entry.connectStats.add(1, initStatsWeight)
entry.delayStats.add(0, initStatsWeight)
@@ -363,10 +470,7 @@ func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16
entry.timeoutStats.add(0, initStatsWeight)
}
entry.lastDiscovered = now
- addr := &poolEntryAddress{
- ip: ip,
- port: port,
- }
+ addr := &poolEntryAddress{ip: node.IP(), port: uint16(node.TCP())}
if a, ok := entry.addr[addr.strKey()]; ok {
addr = a
} else {
@@ -393,15 +497,46 @@ func (pool *serverPool) loadNodes() {
return
}
for _, e := range list {
- log.Debug("Loaded server stats", "id", e.id, "fails", e.lastConnected.fails,
+ log.Debug("Loaded server stats", "id", e.node.ID(), "fails", e.lastConnected.fails,
"conn", fmt.Sprintf("%v/%v", e.connectStats.avg, e.connectStats.weight),
"delay", fmt.Sprintf("%v/%v", time.Duration(e.delayStats.avg), e.delayStats.weight),
"response", fmt.Sprintf("%v/%v", time.Duration(e.responseStats.avg), e.responseStats.weight),
"timeout", fmt.Sprintf("%v/%v", e.timeoutStats.avg, e.timeoutStats.weight))
- pool.entries[e.id] = e
- pool.knownQueue.setLatest(e)
- pool.knownSelect.update((*knownEntry)(e))
+ pool.entries[e.node.ID()] = e
+ if pool.trustedNodes[e.node.ID()] == nil {
+ pool.knownQueue.setLatest(e)
+ pool.knownSelect.update((*knownEntry)(e))
+ }
+ }
+}
+
+// connectToTrustedNodes adds trusted server nodes as static trusted peers.
+//
+// Note: trusted nodes are not handled by the server pool logic, they are not
+// added to either the known or new selection pools. They are connected/reconnected
+// by p2p.Server whenever possible.
+func (pool *serverPool) connectToTrustedNodes() {
+ //connect to trusted nodes
+ for _, node := range pool.trustedNodes {
+ pool.server.AddTrustedPeer(node)
+ pool.server.AddPeer(node)
+ log.Debug("Added trusted node", "id", node.ID().String())
+ }
+}
+
+// parseTrustedNodes returns valid and parsed enodes
+func parseTrustedNodes(trustedNodes []string) map[enode.ID]*enode.Node {
+ nodes := make(map[enode.ID]*enode.Node)
+
+ for _, node := range trustedNodes {
+ node, err := enode.ParseV4(node)
+ if err != nil {
+ log.Warn("Trusted node URL invalid", "enode", node, "err", err)
+ continue
+ }
+ nodes[node.ID()] = node
}
+ return nodes
}
// saveNodes saves known nodes and their statistics into the database. Nodes are
@@ -424,7 +559,7 @@ func (pool *serverPool) removeEntry(entry *poolEntry) {
pool.newSelect.remove((*discoveredEntry)(entry))
pool.knownSelect.remove((*knownEntry)(entry))
entry.removed = true
- delete(pool.entries, entry.id)
+ delete(pool.entries, entry.node.ID())
}
// setRetryDial starts the timer which will enable dialing a certain node again
@@ -438,10 +573,10 @@ func (pool *serverPool) setRetryDial(entry *poolEntry) {
entry.delayedRetry = true
go func() {
select {
- case <-pool.quit:
+ case <-pool.closeCh:
case <-time.After(delay):
select {
- case <-pool.quit:
+ case <-pool.closeCh:
case pool.enableRetry <- entry:
}
}
@@ -502,15 +637,15 @@ func (pool *serverPool) dial(entry *poolEntry, knownSelected bool) {
pool.newSelected++
}
addr := entry.addrSelect.choose().(*poolEntryAddress)
- log.Debug("Dialing new peer", "lesaddr", entry.id.String()+"@"+addr.strKey(), "set", len(entry.addr), "known", knownSelected)
+ log.Debug("Dialing new peer", "lesaddr", entry.node.ID().String()+"@"+addr.strKey(), "set", len(entry.addr), "known", knownSelected)
entry.dialed = addr
go func() {
- pool.server.AddPeer(discover.NewNode(entry.id, addr.ip, addr.port, addr.port))
+ pool.server.AddPeer(entry.node)
select {
- case <-pool.quit:
+ case <-pool.closeCh:
case <-time.After(dialTimeout):
select {
- case <-pool.quit:
+ case <-pool.closeCh:
case pool.timeout <- entry:
}
}
@@ -523,7 +658,7 @@ func (pool *serverPool) checkDialTimeout(entry *poolEntry) {
if entry.state != psDialed {
return
}
- log.Debug("Dial timeout", "lesaddr", entry.id.String()+"@"+entry.dialed.strKey())
+ log.Debug("Dial timeout", "lesaddr", entry.node.ID().String()+"@"+entry.dialed.strKey())
entry.state = psNotConnected
if entry.knownSelected {
pool.knownSelected--
@@ -545,41 +680,58 @@ const (
// poolEntry represents a server node and stores its current state and statistics.
type poolEntry struct {
peer *peer
- id discover.NodeID
+ pubkey [64]byte // secp256k1 key of the node
addr map[string]*poolEntryAddress
+ node *enode.Node
lastConnected, dialed *poolEntryAddress
addrSelect weightedRandomSelect
- lastDiscovered mclock.AbsTime
- known, knownSelected bool
- connectStats, delayStats poolStats
- responseStats, timeoutStats poolStats
- state int
- regTime mclock.AbsTime
- queueIdx int
- removed bool
+ lastDiscovered mclock.AbsTime
+ known, knownSelected, trusted bool
+ connectStats, delayStats poolStats
+ responseStats, timeoutStats poolStats
+ state int
+ regTime mclock.AbsTime
+ queueIdx int
+ removed bool
delayedRetry bool
shortRetry int
}
+// poolEntryEnc is the RLP encoding of poolEntry.
+type poolEntryEnc struct {
+ Pubkey []byte
+ IP net.IP
+ Port uint16
+ Fails uint
+ CStat, DStat, RStat, TStat poolStats
+}
+
func (e *poolEntry) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, []interface{}{e.id, e.lastConnected.ip, e.lastConnected.port, e.lastConnected.fails, &e.connectStats, &e.delayStats, &e.responseStats, &e.timeoutStats})
+ return rlp.Encode(w, &poolEntryEnc{
+ Pubkey: encodePubkey64(e.node.Pubkey()),
+ IP: e.lastConnected.ip,
+ Port: e.lastConnected.port,
+ Fails: e.lastConnected.fails,
+ CStat: e.connectStats,
+ DStat: e.delayStats,
+ RStat: e.responseStats,
+ TStat: e.timeoutStats,
+ })
}
func (e *poolEntry) DecodeRLP(s *rlp.Stream) error {
- var entry struct {
- ID discover.NodeID
- IP net.IP
- Port uint16
- Fails uint
- CStat, DStat, RStat, TStat poolStats
- }
+ var entry poolEntryEnc
if err := s.Decode(&entry); err != nil {
return err
}
+ pubkey, err := decodePubkey64(entry.Pubkey)
+ if err != nil {
+ return err
+ }
addr := &poolEntryAddress{ip: entry.IP, port: entry.Port, fails: entry.Fails, lastSeen: mclock.Now()}
- e.id = entry.ID
+ e.node = enode.NewV4(pubkey, entry.IP, int(entry.Port), int(entry.Port))
e.addr = make(map[string]*poolEntryAddress)
e.addr[addr.strKey()] = addr
e.addrSelect = *newWeightedRandomSelect()
@@ -594,6 +746,14 @@ func (e *poolEntry) DecodeRLP(s *rlp.Stream) error {
return nil
}
+func encodePubkey64(pub *ecdsa.PublicKey) []byte {
+ return crypto.FromECDSAPub(pub)[1:]
+}
+
+func decodePubkey64(b []byte) (*ecdsa.PublicKey, error) {
+ return crypto.UnmarshalPubkey(append([]byte{0x04}, b...))
+}
+
// discoveredEntry implements wrsItem
type discoveredEntry poolEntry
@@ -605,9 +765,8 @@ func (e *discoveredEntry) Weight() int64 {
t := time.Duration(mclock.Now() - e.lastDiscovered)
if t <= discoverExpireStart {
return 1000000000
- } else {
- return int64(1000000000 * math.Exp(-float64(t-discoverExpireStart)/float64(discoverExpireConst)))
}
+ return int64(1000000000 * math.Exp(-float64(t-discoverExpireStart)/float64(discoverExpireConst)))
}
// knownEntry implements wrsItem
diff --git a/metrics/doc.go b/metrics/doc.go
new file mode 100644
index 000000000000..13f429c1689d
--- /dev/null
+++ b/metrics/doc.go
@@ -0,0 +1,4 @@
+package metrics
+
+const epsilon = 0.0000000000000001
+const epsilonPercentile = .00000000001
diff --git a/metrics/ewma_test.go b/metrics/ewma_test.go
index 0430fbd24725..5b244191616e 100644
--- a/metrics/ewma_test.go
+++ b/metrics/ewma_test.go
@@ -1,6 +1,9 @@
package metrics
-import "testing"
+import (
+ "math"
+ "testing"
+)
func BenchmarkEWMA(b *testing.B) {
a := NewEWMA1()
@@ -15,67 +18,67 @@ func TestEWMA1(t *testing.T) {
a := NewEWMA1()
a.Update(3)
a.Tick()
- if rate := a.Rate(); 0.6 != rate {
+ if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.22072766470286553 != rate {
+ if rate := a.Rate(); math.Abs(0.22072766470286553-rate) > epsilon {
t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.08120116994196772 != rate {
+ if rate := a.Rate(); math.Abs(0.08120116994196772-rate) > epsilon {
t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.029872241020718428 != rate {
+ if rate := a.Rate(); math.Abs(0.029872241020718428-rate) > epsilon {
t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.01098938333324054 != rate {
+ if rate := a.Rate(); math.Abs(0.01098938333324054-rate) > epsilon {
t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.004042768199451294 != rate {
+ if rate := a.Rate(); math.Abs(0.004042768199451294-rate) > epsilon {
t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.0014872513059998212 != rate {
+ if rate := a.Rate(); math.Abs(0.0014872513059998212-rate) > epsilon {
t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.0005471291793327122 != rate {
+ if rate := a.Rate(); math.Abs(0.0005471291793327122-rate) > epsilon {
t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.00020127757674150815 != rate {
+ if rate := a.Rate(); math.Abs(0.00020127757674150815-rate) > epsilon {
t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 7.404588245200814e-05 != rate {
+ if rate := a.Rate(); math.Abs(7.404588245200814e-05-rate) > epsilon {
t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 2.7239957857491083e-05 != rate {
+ if rate := a.Rate(); math.Abs(2.7239957857491083e-05-rate) > epsilon {
t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 1.0021020474147462e-05 != rate {
+ if rate := a.Rate(); math.Abs(1.0021020474147462e-05-rate) > epsilon {
t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 3.6865274119969525e-06 != rate {
+ if rate := a.Rate(); math.Abs(3.6865274119969525e-06-rate) > epsilon {
t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 1.3561976441886433e-06 != rate {
+ if rate := a.Rate(); math.Abs(1.3561976441886433e-06-rate) > epsilon {
t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 4.989172314621449e-07 != rate {
+ if rate := a.Rate(); math.Abs(4.989172314621449e-07-rate) > epsilon {
t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 1.8354139230109722e-07 != rate {
+ if rate := a.Rate(); math.Abs(1.8354139230109722e-07-rate) > epsilon {
t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate)
}
}
@@ -84,67 +87,67 @@ func TestEWMA5(t *testing.T) {
a := NewEWMA5()
a.Update(3)
a.Tick()
- if rate := a.Rate(); 0.6 != rate {
+ if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.49123845184678905 != rate {
+ if rate := a.Rate(); math.Abs(0.49123845184678905-rate) > epsilon {
t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.4021920276213837 != rate {
+ if rate := a.Rate(); math.Abs(0.4021920276213837-rate) > epsilon {
t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.32928698165641596 != rate {
+ if rate := a.Rate(); math.Abs(0.32928698165641596-rate) > epsilon {
t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.269597378470333 != rate {
+ if rate := a.Rate(); math.Abs(0.269597378470333-rate) > epsilon {
t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.2207276647028654 != rate {
+ if rate := a.Rate(); math.Abs(0.2207276647028654-rate) > epsilon {
t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.18071652714732128 != rate {
+ if rate := a.Rate(); math.Abs(0.18071652714732128-rate) > epsilon {
t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.14795817836496392 != rate {
+ if rate := a.Rate(); math.Abs(0.14795817836496392-rate) > epsilon {
t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.12113791079679326 != rate {
+ if rate := a.Rate(); math.Abs(0.12113791079679326-rate) > epsilon {
t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.09917933293295193 != rate {
+ if rate := a.Rate(); math.Abs(0.09917933293295193-rate) > epsilon {
t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.08120116994196763 != rate {
+ if rate := a.Rate(); math.Abs(0.08120116994196763-rate) > epsilon {
t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.06648189501740036 != rate {
+ if rate := a.Rate(); math.Abs(0.06648189501740036-rate) > epsilon {
t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.05443077197364752 != rate {
+ if rate := a.Rate(); math.Abs(0.05443077197364752-rate) > epsilon {
t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.04456414692860035 != rate {
+ if rate := a.Rate(); math.Abs(0.04456414692860035-rate) > epsilon {
t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.03648603757513079 != rate {
+ if rate := a.Rate(); math.Abs(0.03648603757513079-rate) > epsilon {
t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.0298722410207183831020718428 != rate {
+ if rate := a.Rate(); math.Abs(0.0298722410207183831020718428-rate) > epsilon {
t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate)
}
}
@@ -153,67 +156,67 @@ func TestEWMA15(t *testing.T) {
a := NewEWMA15()
a.Update(3)
a.Tick()
- if rate := a.Rate(); 0.6 != rate {
+ if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.5613041910189706 != rate {
+ if rate := a.Rate(); math.Abs(0.5613041910189706-rate) > epsilon {
t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.5251039914257684 != rate {
+ if rate := a.Rate(); math.Abs(0.5251039914257684-rate) > epsilon {
t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.4912384518467888184678905 != rate {
+ if rate := a.Rate(); math.Abs(0.4912384518467888184678905-rate) > epsilon {
t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.459557003018789 != rate {
+ if rate := a.Rate(); math.Abs(0.459557003018789-rate) > epsilon {
t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.4299187863442732 != rate {
+ if rate := a.Rate(); math.Abs(0.4299187863442732-rate) > epsilon {
t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.4021920276213831 != rate {
+ if rate := a.Rate(); math.Abs(0.4021920276213831-rate) > epsilon {
t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.37625345116383313 != rate {
+ if rate := a.Rate(); math.Abs(0.37625345116383313-rate) > epsilon {
t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.3519877317060185 != rate {
+ if rate := a.Rate(); math.Abs(0.3519877317060185-rate) > epsilon {
t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.3292869816564153165641596 != rate {
+ if rate := a.Rate(); math.Abs(0.3292869816564153165641596-rate) > epsilon {
t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.3080502714195546 != rate {
+ if rate := a.Rate(); math.Abs(0.3080502714195546-rate) > epsilon {
t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.2881831806538789 != rate {
+ if rate := a.Rate(); math.Abs(0.2881831806538789-rate) > epsilon {
t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.26959737847033216 != rate {
+ if rate := a.Rate(); math.Abs(0.26959737847033216-rate) > epsilon {
t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.2522102307052083 != rate {
+ if rate := a.Rate(); math.Abs(0.2522102307052083-rate) > epsilon {
t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.23594443252115815 != rate {
+ if rate := a.Rate(); math.Abs(0.23594443252115815-rate) > epsilon {
t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate)
}
elapseMinute(a)
- if rate := a.Rate(); 0.2207276647028646247028654470286553 != rate {
+ if rate := a.Rate(); math.Abs(0.2207276647028646247028654470286553-rate) > epsilon {
t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate)
}
}
diff --git a/node/api.go b/node/api.go
index 8a3611524418..e519921817d2 100644
--- a/node/api.go
+++ b/node/api.go
@@ -27,7 +27,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/metrics"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/rpc"
)
@@ -52,7 +52,7 @@ func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) {
return false, ErrNodeStopped
}
// Try to add the url as a static peer and return
- node, err := discover.ParseNode(url)
+ node, err := enode.ParseV4(url)
if err != nil {
return false, fmt.Errorf("invalid enode: %v", err)
}
@@ -60,7 +60,7 @@ func (api *PrivateAdminAPI) AddPeer(url string) (bool, error) {
return true, nil
}
-// RemovePeer disconnects from a a remote node if the connection exists
+// RemovePeer disconnects from a remote node if the connection exists
func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
// Make sure the server is running, fail otherwise
server := api.node.Server()
@@ -68,7 +68,7 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
return false, ErrNodeStopped
}
// Try to remove the url as a static peer and return
- node, err := discover.ParseNode(url)
+ node, err := enode.ParseV4(url)
if err != nil {
return false, fmt.Errorf("invalid enode: %v", err)
}
@@ -76,6 +76,37 @@ func (api *PrivateAdminAPI) RemovePeer(url string) (bool, error) {
return true, nil
}
+// AddTrustedPeer allows a remote node to always connect, even if slots are full
+func (api *PrivateAdminAPI) AddTrustedPeer(url string) (bool, error) {
+ // Make sure the server is running, fail otherwise
+ server := api.node.Server()
+ if server == nil {
+ return false, ErrNodeStopped
+ }
+ node, err := enode.ParseV4(url)
+ if err != nil {
+ return false, fmt.Errorf("invalid enode: %v", err)
+ }
+ server.AddTrustedPeer(node)
+ return true, nil
+}
+
+// RemoveTrustedPeer removes a remote node from the trusted peer set, but it
+// does not disconnect it automatically.
+func (api *PrivateAdminAPI) RemoveTrustedPeer(url string) (bool, error) {
+ // Make sure the server is running, fail otherwise
+ server := api.node.Server()
+ if server == nil {
+ return false, ErrNodeStopped
+ }
+ node, err := enode.ParseV4(url)
+ if err != nil {
+ return false, fmt.Errorf("invalid enode: %v", err)
+ }
+ server.RemoveTrustedPeer(node)
+ return true, nil
+}
+
// PeerEvents creates an RPC subscription which receives peer events from the
// node's p2p.Server
func (api *PrivateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, error) {
diff --git a/node/config.go b/node/config.go
index 848d563e9fa2..417a8a6c687f 100644
--- a/node/config.go
+++ b/node/config.go
@@ -32,7 +32,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
)
const (
@@ -336,18 +336,18 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
}
// StaticNodes returns a list of node enode URLs configured as static nodes.
-func (c *Config) StaticNodes() []*discover.Node {
+func (c *Config) StaticNodes() []*enode.Node {
return c.parsePersistentNodes(c.resolvePath(datadirStaticNodes))
}
// TrustedNodes returns a list of node enode URLs configured as trusted nodes.
-func (c *Config) TrustedNodes() []*discover.Node {
+func (c *Config) TrustedNodes() []*enode.Node {
return c.parsePersistentNodes(c.resolvePath(datadirTrustedNodes))
}
// parsePersistentNodes parses a list of discovery node URLs loaded from a .json
// file from within the data directory.
-func (c *Config) parsePersistentNodes(path string) []*discover.Node {
+func (c *Config) parsePersistentNodes(path string) []*enode.Node {
// Short circuit if no node config is present
if c.DataDir == "" {
return nil
@@ -362,12 +362,12 @@ func (c *Config) parsePersistentNodes(path string) []*discover.Node {
return nil
}
// Interpret the list as a discovery node array
- var nodes []*discover.Node
+ var nodes []*enode.Node
for _, url := range nodelist {
if url == "" {
continue
}
- node, err := discover.ParseNode(url)
+ node, err := enode.ParseV4(url)
if err != nil {
log.Error(fmt.Sprintf("Node URL %s: %v\n", url, err))
continue
diff --git a/node/node_test.go b/node/node_test.go
index 2bff5a68cc1a..627159dbcc07 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -453,9 +453,9 @@ func TestProtocolGather(t *testing.T) {
Count int
Maker InstrumentingWrapper
}{
- "Zero Protocols": {0, InstrumentedServiceMakerA},
- "Single Protocol": {1, InstrumentedServiceMakerB},
- "Many Protocols": {25, InstrumentedServiceMakerC},
+ "zero": {0, InstrumentedServiceMakerA},
+ "one": {1, InstrumentedServiceMakerB},
+ "many": {10, InstrumentedServiceMakerC},
}
for id, config := range services {
protocols := make([]p2p.Protocol, config.Count)
@@ -479,7 +479,7 @@ func TestProtocolGather(t *testing.T) {
defer stack.Stop()
protocols := stack.Server().Protocols
- if len(protocols) != 26 {
+ if len(protocols) != 11 {
t.Fatalf("mismatching number of protocols launched: have %d, want %d", len(protocols), 26)
}
for id, config := range services {
diff --git a/p2p/dial.go b/p2p/dial.go
index 14d9e222ee8c..4251457c7c3e 100644
--- a/p2p/dial.go
+++ b/p2p/dial.go
@@ -18,14 +18,13 @@ package p2p
import (
"container/heap"
- "crypto/rand"
"errors"
"fmt"
"net"
"time"
"github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/netutil"
)
@@ -50,7 +49,7 @@ const (
// NodeDialer is used to connect to nodes in the network, typically by using
// an underlying net.Dialer but also using net.Pipe in tests
type NodeDialer interface {
- Dial(*discover.Node) (net.Conn, error)
+ Dial(*enode.Node) (net.Conn, error)
}
// TCPDialer implements the NodeDialer interface by using a net.Dialer to
@@ -60,8 +59,8 @@ type TCPDialer struct {
}
// Dial creates a TCP connection to the node
-func (t TCPDialer) Dial(dest *discover.Node) (net.Conn, error) {
- addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)}
+func (t TCPDialer) Dial(dest *enode.Node) (net.Conn, error) {
+ addr := &net.TCPAddr{IP: dest.IP(), Port: dest.TCP()}
return t.Dialer.Dial("tcp", addr.String())
}
@@ -72,24 +71,24 @@ type dialstate struct {
maxDynDials int
ntab discoverTable
netrestrict *netutil.Netlist
+ self enode.ID
lookupRunning bool
- dialing map[discover.NodeID]connFlag
- lookupBuf []*discover.Node // current discovery lookup results
- randomNodes []*discover.Node // filled from Table
- static map[discover.NodeID]*dialTask
+ dialing map[enode.ID]connFlag
+ lookupBuf []*enode.Node // current discovery lookup results
+ randomNodes []*enode.Node // filled from Table
+ static map[enode.ID]*dialTask
hist *dialHistory
- start time.Time // time when the dialer was first used
- bootnodes []*discover.Node // default dials when there are no peers
+ start time.Time // time when the dialer was first used
+ bootnodes []*enode.Node // default dials when there are no peers
}
type discoverTable interface {
- Self() *discover.Node
Close()
- Resolve(target discover.NodeID) *discover.Node
- Lookup(target discover.NodeID) []*discover.Node
- ReadRandomNodes([]*discover.Node) int
+ Resolve(*enode.Node) *enode.Node
+ LookupRandom() []*enode.Node
+ ReadRandomNodes([]*enode.Node) int
}
// the dial history remembers recent dials.
@@ -97,7 +96,7 @@ type dialHistory []pastDial
// pastDial is an entry in the dial history.
type pastDial struct {
- id discover.NodeID
+ id enode.ID
exp time.Time
}
@@ -109,7 +108,7 @@ type task interface {
// fields cannot be accessed while the task is running.
type dialTask struct {
flags connFlag
- dest *discover.Node
+ dest *enode.Node
lastResolved time.Time
resolveDelay time.Duration
}
@@ -118,7 +117,7 @@ type dialTask struct {
// Only one discoverTask is active at any time.
// discoverTask.Do performs a random lookup.
type discoverTask struct {
- results []*discover.Node
+ results []*enode.Node
}
// A waitExpireTask is generated if there are no other tasks
@@ -127,15 +126,16 @@ type waitExpireTask struct {
time.Duration
}
-func newDialState(static []*discover.Node, bootnodes []*discover.Node, ntab discoverTable, maxdyn int, netrestrict *netutil.Netlist) *dialstate {
+func newDialState(self enode.ID, static []*enode.Node, bootnodes []*enode.Node, ntab discoverTable, maxdyn int, netrestrict *netutil.Netlist) *dialstate {
s := &dialstate{
maxDynDials: maxdyn,
ntab: ntab,
+ self: self,
netrestrict: netrestrict,
- static: make(map[discover.NodeID]*dialTask),
- dialing: make(map[discover.NodeID]connFlag),
- bootnodes: make([]*discover.Node, len(bootnodes)),
- randomNodes: make([]*discover.Node, maxdyn/2),
+ static: make(map[enode.ID]*dialTask),
+ dialing: make(map[enode.ID]connFlag),
+ bootnodes: make([]*enode.Node, len(bootnodes)),
+ randomNodes: make([]*enode.Node, maxdyn/2),
hist: new(dialHistory),
}
copy(s.bootnodes, bootnodes)
@@ -145,32 +145,32 @@ func newDialState(static []*discover.Node, bootnodes []*discover.Node, ntab disc
return s
}
-func (s *dialstate) addStatic(n *discover.Node) {
- // This overwites the task instead of updating an existing
+func (s *dialstate) addStatic(n *enode.Node) {
+ // This overwrites the task instead of updating an existing
// entry, giving users the opportunity to force a resolve operation.
- s.static[n.ID] = &dialTask{flags: staticDialedConn, dest: n}
+ s.static[n.ID()] = &dialTask{flags: staticDialedConn, dest: n}
}
-func (s *dialstate) removeStatic(n *discover.Node) {
+func (s *dialstate) removeStatic(n *enode.Node) {
// This removes a task so future attempts to connect will not be made.
- delete(s.static, n.ID)
+ delete(s.static, n.ID())
// This removes a previous dial timestamp so that application
// can force a server to reconnect with chosen peer immediately.
- s.hist.remove(n.ID)
+ s.hist.remove(n.ID())
}
-func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now time.Time) []task {
+func (s *dialstate) newTasks(nRunning int, peers map[enode.ID]*Peer, now time.Time) []task {
if s.start.IsZero() {
s.start = now
}
var newtasks []task
- addDial := func(flag connFlag, n *discover.Node) bool {
+ addDial := func(flag connFlag, n *enode.Node) bool {
if err := s.checkDial(n, peers); err != nil {
- log.Trace("Skipping dial candidate", "id", n.ID, "addr", &net.TCPAddr{IP: n.IP, Port: int(n.TCP)}, "err", err)
+ log.Trace("Skipping dial candidate", "id", n.ID(), "addr", &net.TCPAddr{IP: n.IP(), Port: n.TCP()}, "err", err)
return false
}
- s.dialing[n.ID] = flag
+ s.dialing[n.ID()] = flag
newtasks = append(newtasks, &dialTask{flags: flag, dest: n})
return true
}
@@ -196,8 +196,8 @@ func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now
err := s.checkDial(t.dest, peers)
switch err {
case errNotWhitelisted, errSelf:
- log.Warn("Removing static dial candidate", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP, Port: int(t.dest.TCP)}, "err", err)
- delete(s.static, t.dest.ID)
+ log.Warn("Removing static dial candidate", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()}, "err", err)
+ delete(s.static, t.dest.ID())
case nil:
s.dialing[id] = t.flags
newtasks = append(newtasks, t)
@@ -260,21 +260,18 @@ var (
errNotWhitelisted = errors.New("not contained in netrestrict whitelist")
)
-func (s *dialstate) checkDial(n *discover.Node, peers map[discover.NodeID]*Peer) error {
- _, dialing := s.dialing[n.ID]
+func (s *dialstate) checkDial(n *enode.Node, peers map[enode.ID]*Peer) error {
+ _, dialing := s.dialing[n.ID()]
switch {
case dialing:
return errAlreadyDialing
- case peers[n.ID] != nil:
- exitsPeer := peers[n.ID]
- if exitsPeer.PairPeer != nil {
- return errAlreadyConnected
- }
- case s.ntab != nil && n.ID == s.ntab.Self().ID:
+ case peers[n.ID()] != nil:
+ return errAlreadyConnected
+ case n.ID() == s.self:
return errSelf
- case s.netrestrict != nil && !s.netrestrict.Contains(n.IP):
+ case s.netrestrict != nil && !s.netrestrict.Contains(n.IP()):
return errNotWhitelisted
- case s.hist.contains(n.ID):
+ case s.hist.contains(n.ID()):
return errRecentlyDialed
}
return nil
@@ -283,8 +280,8 @@ func (s *dialstate) checkDial(n *discover.Node, peers map[discover.NodeID]*Peer)
func (s *dialstate) taskDone(t task, now time.Time) {
switch t := t.(type) {
case *dialTask:
- s.hist.add(t.dest.ID, now.Add(dialHistoryExpiration))
- delete(s.dialing, t.dest.ID)
+ s.hist.add(t.dest.ID(), now.Add(dialHistoryExpiration))
+ delete(s.dialing, t.dest.ID())
case *discoverTask:
s.lookupRunning = false
s.lookupBuf = append(s.lookupBuf, t.results...)
@@ -342,7 +339,7 @@ func (t *dialTask) resolve(srv *Server) bool {
if time.Since(t.lastResolved) < t.resolveDelay {
return false
}
- resolved := srv.ntab.Resolve(t.dest.ID)
+ resolved := srv.ntab.Resolve(t.dest)
t.lastResolved = time.Now()
if resolved == nil {
t.resolveDelay *= 2
@@ -355,7 +352,7 @@ func (t *dialTask) resolve(srv *Server) bool {
// The node was found.
t.resolveDelay = initialResolveDelay
t.dest = resolved
- log.Debug("Resolved node", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP, Port: int(t.dest.TCP)})
+ log.Debug("Resolved node", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()})
return true
}
@@ -364,7 +361,7 @@ type dialError struct {
}
// dial performs the actual connection attempt.
-func (t *dialTask) dial(srv *Server, dest *discover.Node) error {
+func (t *dialTask) dial(srv *Server, dest *enode.Node) error {
fd, err := srv.Dialer.Dial(dest)
if err != nil {
return &dialError{err}
@@ -374,7 +371,8 @@ func (t *dialTask) dial(srv *Server, dest *discover.Node) error {
}
func (t *dialTask) String() string {
- return fmt.Sprintf("%v %x %v:%d", t.flags, t.dest.ID[:8], t.dest.IP, t.dest.TCP)
+ id := t.dest.ID()
+ return fmt.Sprintf("%v %x %v:%d", t.flags, id[:8], t.dest.IP(), t.dest.TCP())
}
func (t *discoverTask) Do(srv *Server) {
@@ -386,9 +384,7 @@ func (t *discoverTask) Do(srv *Server) {
time.Sleep(next.Sub(now))
}
srv.lastLookup = time.Now()
- var target discover.NodeID
- rand.Read(target[:])
- t.results = srv.ntab.Lookup(target)
+ t.results = srv.ntab.LookupRandom()
}
func (t *discoverTask) String() string {
@@ -410,11 +406,11 @@ func (t waitExpireTask) String() string {
func (h dialHistory) min() pastDial {
return h[0]
}
-func (h *dialHistory) add(id discover.NodeID, exp time.Time) {
+func (h *dialHistory) add(id enode.ID, exp time.Time) {
heap.Push(h, pastDial{id, exp})
}
-func (h *dialHistory) remove(id discover.NodeID) bool {
+func (h *dialHistory) remove(id enode.ID) bool {
for i, v := range *h {
if v.id == id {
heap.Remove(h, i)
@@ -423,7 +419,7 @@ func (h *dialHistory) remove(id discover.NodeID) bool {
}
return false
}
-func (h dialHistory) contains(id discover.NodeID) bool {
+func (h dialHistory) contains(id enode.ID) bool {
for _, v := range h {
if v.id == id {
return true
diff --git a/p2p/dial_test.go b/p2p/dial_test.go
index 0e9928782654..ca202fc842cf 100644
--- a/p2p/dial_test.go
+++ b/p2p/dial_test.go
@@ -23,7 +23,8 @@ import (
"testing"
"time"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
"github.com/XinFinOrg/XDPoSChain/p2p/netutil"
"github.com/davecgh/go-spew/spew"
)
@@ -48,10 +49,10 @@ func runDialTest(t *testing.T, test dialtest) {
vtime time.Time
running int
)
- pm := func(ps []*Peer) map[discover.NodeID]*Peer {
- m := make(map[discover.NodeID]*Peer)
+ pm := func(ps []*Peer) map[enode.ID]*Peer {
+ m := make(map[enode.ID]*Peer)
for _, p := range ps {
- m[p.rw.id] = p
+ m[p.ID()] = p
}
return m
}
@@ -69,6 +70,7 @@ func runDialTest(t *testing.T, test dialtest) {
t.Errorf("round %d: new tasks mismatch:\ngot %v\nwant %v\nstate: %v\nrunning: %v\n",
i, spew.Sdump(new), spew.Sdump(round.new), spew.Sdump(test.init), spew.Sdump(running))
}
+ t.Log("tasks:", spew.Sdump(new))
// Time advances by 16 seconds on every round.
vtime = vtime.Add(16 * time.Second)
@@ -76,79 +78,79 @@ func runDialTest(t *testing.T, test dialtest) {
}
}
-type fakeTable []*discover.Node
+type fakeTable []*enode.Node
-func (t fakeTable) Self() *discover.Node { return new(discover.Node) }
-func (t fakeTable) Close() {}
-func (t fakeTable) Lookup(discover.NodeID) []*discover.Node { return nil }
-func (t fakeTable) Resolve(discover.NodeID) *discover.Node { return nil }
-func (t fakeTable) ReadRandomNodes(buf []*discover.Node) int { return copy(buf, t) }
+func (t fakeTable) Self() *enode.Node { return new(enode.Node) }
+func (t fakeTable) Close() {}
+func (t fakeTable) LookupRandom() []*enode.Node { return nil }
+func (t fakeTable) Resolve(*enode.Node) *enode.Node { return nil }
+func (t fakeTable) ReadRandomNodes(buf []*enode.Node) int { return copy(buf, t) }
// This test checks that dynamic dials are launched from discovery results.
func TestDialStateDynDial(t *testing.T) {
runDialTest(t, dialtest{
- init: newDialState(nil, nil, fakeTable{}, 5, nil),
+ init: newDialState(enode.ID{}, nil, nil, fakeTable{}, 5, nil),
rounds: []round{
// A discovery query is launched.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(0)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
new: []task{&discoverTask{}},
},
// Dynamic dials are launched when it completes.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(0)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
- &discoverTask{results: []*discover.Node{
- {ID: uintID(2)}, // this one is already connected and not dialed.
- {ID: uintID(3)},
- {ID: uintID(4)},
- {ID: uintID(5)},
- {ID: uintID(6)}, // these are not tried because max dyn dials is 5
- {ID: uintID(7)}, // ...
+ &discoverTask{results: []*enode.Node{
+ newNode(uintID(2), nil), // this one is already connected and not dialed.
+ newNode(uintID(3), nil),
+ newNode(uintID(4), nil),
+ newNode(uintID(5), nil),
+ newNode(uintID(6), nil), // these are not tried because max dyn dials is 5
+ newNode(uintID(7), nil), // ...
}},
},
new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
// Some of the dials complete but no new ones are launched yet because
// the sum of active dial count and dynamic peer count is == maxDynDials.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(0)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(3)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(4)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(3), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
},
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
},
},
// No new dial tasks are launched in the this round because
// maxDynDials has been reached.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(0)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(3)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(4)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(5)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(3), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
},
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
new: []task{
&waitExpireTask{Duration: 14 * time.Second},
@@ -158,29 +160,31 @@ func TestDialStateDynDial(t *testing.T) {
// results from last discovery lookup are reused.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(0)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(3)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(4)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(5)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(3), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
+ },
+ new: []task{
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(6), nil)},
},
- new: []task{},
},
// More peers (3,4) drop off and dial for ID 6 completes.
// The last query result from the discovery lookup is reused
// and a new one is spawned because more candidates are needed.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(0)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(5)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
},
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(6)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(6), nil)},
},
new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(7)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(7), nil)},
+ &discoverTask{},
},
},
// Peer 7 is connected, but there still aren't enough dynamic peers
@@ -188,29 +192,29 @@ func TestDialStateDynDial(t *testing.T) {
// no new is started.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(0)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(5)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(7)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(7), nil)}},
},
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(7)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(7), nil)},
},
},
// Finish the running node discovery with an empty set. A new lookup
// should be immediately requested.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(0)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(5)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(7)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(7), nil)}},
},
done: []task{
&discoverTask{},
},
new: []task{
- &waitExpireTask{Duration: 14 * time.Second},
+ &discoverTask{},
},
},
},
@@ -219,34 +223,34 @@ func TestDialStateDynDial(t *testing.T) {
// Tests that bootnodes are dialed if no peers are connectd, but not otherwise.
func TestDialStateDynDialBootnode(t *testing.T) {
- bootnodes := []*discover.Node{
- {ID: uintID(1)},
- {ID: uintID(2)},
- {ID: uintID(3)},
+ bootnodes := []*enode.Node{
+ newNode(uintID(1), nil),
+ newNode(uintID(2), nil),
+ newNode(uintID(3), nil),
}
table := fakeTable{
- {ID: uintID(4)},
- {ID: uintID(5)},
- {ID: uintID(6)},
- {ID: uintID(7)},
- {ID: uintID(8)},
+ newNode(uintID(4), nil),
+ newNode(uintID(5), nil),
+ newNode(uintID(6), nil),
+ newNode(uintID(7), nil),
+ newNode(uintID(8), nil),
}
runDialTest(t, dialtest{
- init: newDialState(nil, bootnodes, table, 5, nil),
+ init: newDialState(enode.ID{}, nil, bootnodes, table, 5, nil),
rounds: []round{
// 2 dynamic dials attempted, bootnodes pending fallback interval
{
new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
&discoverTask{},
},
},
// No dials succeed, bootnodes still pending fallback interval
{
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
// No dials succeed, bootnodes still pending fallback interval
@@ -254,54 +258,51 @@ func TestDialStateDynDialBootnode(t *testing.T) {
// No dials succeed, 2 dynamic dials attempted and 1 bootnode too as fallback interval was reached
{
new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
// No dials succeed, 2nd bootnode is attempted
{
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
},
},
// No dials succeed, 3rd bootnode is attempted
{
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
},
new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
},
},
// No dials succeed, 1st bootnode is attempted again, expired random nodes retried
{
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
},
new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
// Random dial succeeds, no more bootnodes are attempted
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(4)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
},
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
},
},
},
@@ -312,83 +313,79 @@ func TestDialStateDynDialFromTable(t *testing.T) {
// This table always returns the same random nodes
// in the order given below.
table := fakeTable{
- {ID: uintID(1)},
- {ID: uintID(2)},
- {ID: uintID(3)},
- {ID: uintID(4)},
- {ID: uintID(5)},
- {ID: uintID(6)},
- {ID: uintID(7)},
- {ID: uintID(8)},
+ newNode(uintID(1), nil),
+ newNode(uintID(2), nil),
+ newNode(uintID(3), nil),
+ newNode(uintID(4), nil),
+ newNode(uintID(5), nil),
+ newNode(uintID(6), nil),
+ newNode(uintID(7), nil),
+ newNode(uintID(8), nil),
}
runDialTest(t, dialtest{
- init: newDialState(nil, nil, table, 10, nil),
+ init: newDialState(enode.ID{}, nil, nil, table, 10, nil),
rounds: []round{
// 5 out of 8 of the nodes returned by ReadRandomNodes are dialed.
{
new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
&discoverTask{},
},
},
// Dialing nodes 1,2 succeeds. Dials from the lookup are launched.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
- &discoverTask{results: []*discover.Node{
- {ID: uintID(10)},
- {ID: uintID(11)},
- {ID: uintID(12)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
+ &discoverTask{results: []*enode.Node{
+ newNode(uintID(10), nil),
+ newNode(uintID(11), nil),
+ newNode(uintID(12), nil),
}},
},
new: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(2)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(10)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(11)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(12)}},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(10), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(11), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(12), nil)},
+ &discoverTask{},
},
},
// Dialing nodes 3,4,5 fails. The dials from the lookup succeed.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(10)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(11)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(12)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(10), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(11), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(12), nil)}},
},
done: []task{
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(3)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(5)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(10)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(11)}},
- &dialTask{flags: dynDialedConn, dest: &discover.Node{ID: uintID(12)}},
- },
- new: []task{
- &discoverTask{},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(10), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(11), nil)},
+ &dialTask{flags: dynDialedConn, dest: newNode(uintID(12), nil)},
},
},
// Waiting for expiry. No waitExpireTask is launched because the
// discovery query is still running.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(10)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(11)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(12)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(10), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(11), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(12), nil)}},
},
},
// Nodes 3,4 are not tried again because only the first two
@@ -396,36 +393,44 @@ func TestDialStateDynDialFromTable(t *testing.T) {
// already connected.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(10)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(11)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(12)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(10), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(11), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(12), nil)}},
},
},
},
})
}
+func newNode(id enode.ID, ip net.IP) *enode.Node {
+ var r enr.Record
+ if ip != nil {
+ r.Set(enr.IP(ip))
+ }
+ return enode.SignNull(&r, id)
+}
+
// This test checks that candidates that do not match the netrestrict list are not dialed.
func TestDialStateNetRestrict(t *testing.T) {
// This table always returns the same random nodes
// in the order given below.
table := fakeTable{
- {ID: uintID(1), IP: net.ParseIP("127.0.0.1")},
- {ID: uintID(2), IP: net.ParseIP("127.0.0.2")},
- {ID: uintID(3), IP: net.ParseIP("127.0.0.3")},
- {ID: uintID(4), IP: net.ParseIP("127.0.0.4")},
- {ID: uintID(5), IP: net.ParseIP("127.0.2.5")},
- {ID: uintID(6), IP: net.ParseIP("127.0.2.6")},
- {ID: uintID(7), IP: net.ParseIP("127.0.2.7")},
- {ID: uintID(8), IP: net.ParseIP("127.0.2.8")},
+ newNode(uintID(1), net.ParseIP("127.0.0.1")),
+ newNode(uintID(2), net.ParseIP("127.0.0.2")),
+ newNode(uintID(3), net.ParseIP("127.0.0.3")),
+ newNode(uintID(4), net.ParseIP("127.0.0.4")),
+ newNode(uintID(5), net.ParseIP("127.0.2.5")),
+ newNode(uintID(6), net.ParseIP("127.0.2.6")),
+ newNode(uintID(7), net.ParseIP("127.0.2.7")),
+ newNode(uintID(8), net.ParseIP("127.0.2.8")),
}
restrict := new(netutil.Netlist)
restrict.Add("127.0.2.0/24")
runDialTest(t, dialtest{
- init: newDialState(nil, nil, table, 10, restrict),
+ init: newDialState(enode.ID{}, nil, nil, table, 10, restrict),
rounds: []round{
{
new: []task{
@@ -439,85 +444,82 @@ func TestDialStateNetRestrict(t *testing.T) {
// This test checks that static dials are launched.
func TestDialStateStaticDial(t *testing.T) {
- wantStatic := []*discover.Node{
- {ID: uintID(1)},
- {ID: uintID(2)},
- {ID: uintID(3)},
- {ID: uintID(4)},
- {ID: uintID(5)},
+ wantStatic := []*enode.Node{
+ newNode(uintID(1), nil),
+ newNode(uintID(2), nil),
+ newNode(uintID(3), nil),
+ newNode(uintID(4), nil),
+ newNode(uintID(5), nil),
}
runDialTest(t, dialtest{
- init: newDialState(wantStatic, nil, fakeTable{}, 0, nil),
+ init: newDialState(enode.ID{}, wantStatic, nil, fakeTable{}, 0, nil),
rounds: []round{
// Static dials are launched for the nodes that
// aren't yet connected.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
new: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(5), nil)},
},
},
// No new tasks are launched in this round because all static
// nodes are either connected or still being dialed.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(3)}},
- },
- new: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
},
done: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
},
},
// No new dial tasks are launched because all static
// nodes are now connected.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(3)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(4)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(5)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(4), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(5), nil)}},
},
done: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(4), nil)},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(5), nil)},
},
new: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(4)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(5)}},
+ &waitExpireTask{Duration: 14 * time.Second},
},
},
// Wait a round for dial history to expire, no new tasks should spawn.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(3)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(4)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(5)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(4), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(5), nil)}},
},
},
// If a static node is dropped, it should be immediately redialed,
// irrespective whether it was originally static or dynamic.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(3)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(5)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(5), nil)}},
+ },
+ new: []task{
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(4), nil)},
},
- new: []task{},
},
},
})
@@ -525,9 +527,9 @@ func TestDialStateStaticDial(t *testing.T) {
// This test checks that static peers will be redialed immediately if they were re-added to a static list.
func TestDialStaticAfterReset(t *testing.T) {
- wantStatic := []*discover.Node{
- {ID: uintID(1)},
- {ID: uintID(2)},
+ wantStatic := []*enode.Node{
+ newNode(uintID(1), nil),
+ newNode(uintID(2), nil),
}
rounds := []round{
@@ -535,104 +537,100 @@ func TestDialStaticAfterReset(t *testing.T) {
{
peers: nil,
new: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
},
},
// No new dial tasks, all peers are connected.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(1)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(2)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
},
new: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
+ &waitExpireTask{Duration: 30 * time.Second},
},
},
}
dTest := dialtest{
- init: newDialState(wantStatic, nil, fakeTable{}, 0, nil),
+ init: newDialState(enode.ID{}, wantStatic, nil, fakeTable{}, 0, nil),
rounds: rounds,
}
runDialTest(t, dTest)
for _, n := range wantStatic {
dTest.init.removeStatic(n)
dTest.init.addStatic(n)
- delete(dTest.init.dialing, n.ID)
}
-
// without removing peers they will be considered recently dialed
runDialTest(t, dTest)
}
// This test checks that past dials are not retried for some time.
func TestDialStateCache(t *testing.T) {
- wantStatic := []*discover.Node{
- {ID: uintID(1)},
- {ID: uintID(2)},
- {ID: uintID(3)},
+ wantStatic := []*enode.Node{
+ newNode(uintID(1), nil),
+ newNode(uintID(2), nil),
+ newNode(uintID(3), nil),
}
runDialTest(t, dialtest{
- init: newDialState(wantStatic, nil, fakeTable{}, 0, nil),
+ init: newDialState(enode.ID{}, wantStatic, nil, fakeTable{}, 0, nil),
rounds: []round{
// Static dials are launched for the nodes that
// aren't yet connected.
{
peers: nil,
new: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
},
},
// No new tasks are launched in this round because all static
// nodes are either connected or still being dialed.
{
peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, id: uintID(1)}},
- {rw: &conn{flags: staticDialedConn, id: uintID(2)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: staticDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
- },
- new: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(1)}},
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(2)}},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
},
},
// A salvage task is launched to wait for node 3's history
// entry to expire.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
done: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
+ },
+ new: []task{
+ &waitExpireTask{Duration: 14 * time.Second},
},
},
// Still waiting for node 3's entry to expire in the cache.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
},
// The cache entry for node 3 has expired and is retried.
{
peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, id: uintID(1)}},
- {rw: &conn{flags: dynDialedConn, id: uintID(2)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
+ {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
},
new: []task{
- &dialTask{flags: staticDialedConn, dest: &discover.Node{ID: uintID(3)}},
+ &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
},
},
},
@@ -640,12 +638,12 @@ func TestDialStateCache(t *testing.T) {
}
func TestDialResolve(t *testing.T) {
- resolved := discover.NewNode(uintID(1), net.IP{127, 0, 55, 234}, 3333, 4444)
+ resolved := newNode(uintID(1), net.IP{127, 0, 55, 234})
table := &resolveMock{answer: resolved}
- state := newDialState(nil, nil, table, 0, nil)
+ state := newDialState(enode.ID{}, nil, nil, table, 0, nil)
// Check that the task is generated with an incomplete ID.
- dest := discover.NewNode(uintID(1), nil, 0, 0)
+ dest := newNode(uintID(1), nil)
state.addStatic(dest)
tasks := state.newTasks(0, nil, time.Time{})
if !reflect.DeepEqual(tasks, []task{&dialTask{flags: staticDialedConn, dest: dest}}) {
@@ -656,7 +654,7 @@ func TestDialResolve(t *testing.T) {
config := Config{Dialer: TCPDialer{&net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}}}
srv := &Server{ntab: table, Config: config}
tasks[0].Do(srv)
- if !reflect.DeepEqual(table.resolveCalls, []discover.NodeID{dest.ID}) {
+ if !reflect.DeepEqual(table.resolveCalls, []*enode.Node{dest}) {
t.Fatalf("wrong resolve calls, got %v", table.resolveCalls)
}
@@ -684,25 +682,25 @@ next:
return true
}
-func uintID(i uint32) discover.NodeID {
- var id discover.NodeID
+func uintID(i uint32) enode.ID {
+ var id enode.ID
binary.BigEndian.PutUint32(id[:], i)
return id
}
// implements discoverTable for TestDialResolve
type resolveMock struct {
- resolveCalls []discover.NodeID
- answer *discover.Node
+ resolveCalls []*enode.Node
+ answer *enode.Node
}
-func (t *resolveMock) Resolve(id discover.NodeID) *discover.Node {
- t.resolveCalls = append(t.resolveCalls, id)
+func (t *resolveMock) Resolve(n *enode.Node) *enode.Node {
+ t.resolveCalls = append(t.resolveCalls, n)
return t.answer
}
-func (t *resolveMock) Self() *discover.Node { return new(discover.Node) }
-func (t *resolveMock) Close() {}
-func (t *resolveMock) Bootstrap([]*discover.Node) {}
-func (t *resolveMock) Lookup(discover.NodeID) []*discover.Node { return nil }
-func (t *resolveMock) ReadRandomNodes(buf []*discover.Node) int { return 0 }
+func (t *resolveMock) Self() *enode.Node { return new(enode.Node) }
+func (t *resolveMock) Close() {}
+func (t *resolveMock) LookupRandom() []*enode.Node { return nil }
+func (t *resolveMock) ReadRandomNodes(buf []*enode.Node) int { return 0 }
+func (t *resolveMock) Bootstrap([]*enode.Node) {}
\ No newline at end of file
diff --git a/p2p/discover/node.go b/p2p/discover/node.go
index 48fc2edd1bcf..51e3d3fc3bf3 100644
--- a/p2p/discover/node.go
+++ b/p2p/discover/node.go
@@ -18,415 +18,87 @@ package discover
import (
"crypto/ecdsa"
- "crypto/elliptic"
- "encoding/hex"
"errors"
- "fmt"
"math/big"
- "math/rand"
"net"
- "net/url"
- "regexp"
- "strconv"
- "strings"
"time"
- "github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/common/math"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/crypto/secp256k1"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
)
-const NodeIDBits = 512
-
-// Node represents a host on the network.
+// node represents a host on the network.
// The fields of Node may not be modified.
-type Node struct {
- IP net.IP // len 4 for IPv4 or 16 for IPv6
- UDP, TCP uint16 // port numbers
- ID NodeID // the node's public key
-
- // This is a cached copy of sha3(ID) which is used for node
- // distance calculations. This is part of Node in order to make it
- // possible to write tests that need a node at a certain distance.
- // In those tests, the content of sha will not actually correspond
- // with ID.
- sha common.Hash
-
- // Time when the node was added to the table.
- addedAt time.Time
-}
-
-// NewNode creates a new node. It is mostly meant to be used for
-// testing purposes.
-func NewNode(id NodeID, ip net.IP, udpPort, tcpPort uint16) *Node {
- if ipv4 := ip.To4(); ipv4 != nil {
- ip = ipv4
- }
- return &Node{
- IP: ip,
- UDP: udpPort,
- TCP: tcpPort,
- ID: id,
- sha: crypto.Keccak256Hash(id[:]),
- }
-}
-
-func (n *Node) addr() *net.UDPAddr {
- return &net.UDPAddr{IP: n.IP, Port: int(n.UDP)}
-}
-
-// Incomplete returns true for nodes with no IP address.
-func (n *Node) Incomplete() bool {
- return n.IP == nil
-}
-
-// checks whether n is a valid complete node.
-func (n *Node) validateComplete() error {
- if n.Incomplete() {
- return errors.New("incomplete node")
- }
- if n.UDP == 0 {
- return errors.New("missing UDP port")
- }
- if n.TCP == 0 {
- return errors.New("missing TCP port")
- }
- if n.IP.IsMulticast() || n.IP.IsUnspecified() {
- return errors.New("invalid IP (multicast/unspecified)")
- }
- _, err := n.ID.Pubkey() // validate the key (on curve, etc.)
- return err
-}
-
-// The string representation of a Node is a URL.
-// Please see ParseNode for a description of the format.
-func (n *Node) String() string {
- u := url.URL{Scheme: "enode"}
- if n.Incomplete() {
- u.Host = fmt.Sprintf("%x", n.ID[:])
- } else {
- addr := net.TCPAddr{IP: n.IP, Port: int(n.TCP)}
- u.User = url.User(fmt.Sprintf("%x", n.ID[:]))
- u.Host = addr.String()
- if n.UDP != n.TCP {
- u.RawQuery = "discport=" + strconv.Itoa(int(n.UDP))
- }
- }
- return u.String()
-}
-
-var incompleteNodeURL = regexp.MustCompile("(?i)^(?:enode://)?([0-9a-f]+)$")
-
-// ParseNode parses a node designator.
-//
-// There are two basic forms of node designators
-// - incomplete nodes, which only have the public key (node ID)
-// - complete nodes, which contain the public key and IP/Port information
-//
-// For incomplete nodes, the designator must look like one of these
-//
-// enode://
-//
-//
-// For complete nodes, the node ID is encoded in the username portion
-// of the URL, separated from the host by an @ sign. The hostname can
-// only be given as an IP address, DNS domain names are not allowed.
-// The port in the host name section is the TCP listening port. If the
-// TCP and UDP (discovery) ports differ, the UDP port is specified as
-// query parameter "discport".
-//
-// In the following example, the node URL describes
-// a node with IP address 10.3.58.6, TCP listening port 30303
-// and UDP discovery port 30301.
-//
-// enode://@10.3.58.6:30303?discport=30301
-func ParseNode(rawurl string) (*Node, error) {
- if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil {
- id, err := HexID(m[1])
- if err != nil {
- return nil, fmt.Errorf("invalid node ID (%v)", err)
- }
- return NewNode(id, nil, 0, 0), nil
- }
- return parseComplete(rawurl)
+type node struct {
+ enode.Node
+ addedAt time.Time // time when the node was added to the table
}
-func parseComplete(rawurl string) (*Node, error) {
- var (
- id NodeID
- ip net.IP
- tcpPort, udpPort uint64
- )
- u, err := url.Parse(rawurl)
- if err != nil {
- return nil, err
- }
- if u.Scheme != "enode" {
- return nil, errors.New("invalid URL scheme, want \"enode\"")
- }
- // Parse the Node ID from the user portion.
- if u.User == nil {
- return nil, errors.New("does not contain node ID")
- }
- if id, err = HexID(u.User.String()); err != nil {
- return nil, fmt.Errorf("invalid node ID (%v)", err)
- }
- // Parse the IP address.
- host, port, err := net.SplitHostPort(u.Host)
- if err != nil {
- return nil, fmt.Errorf("invalid host: %v", err)
- }
- if ip = net.ParseIP(host); ip == nil {
- return nil, errors.New("invalid IP address")
- }
- // Ensure the IP is 4 bytes long for IPv4 addresses.
- if ipv4 := ip.To4(); ipv4 != nil {
- ip = ipv4
- }
- // Parse the port numbers.
- if tcpPort, err = strconv.ParseUint(port, 10, 16); err != nil {
- return nil, errors.New("invalid port")
- }
- udpPort = tcpPort
- qv := u.Query()
- if qv.Get("discport") != "" {
- udpPort, err = strconv.ParseUint(qv.Get("discport"), 10, 16)
- if err != nil {
- return nil, errors.New("invalid discport in query")
- }
- }
- return NewNode(id, ip, uint16(udpPort), uint16(tcpPort)), nil
-}
-
-// MustParseNode parses a node URL. It panics if the URL is not valid.
-func MustParseNode(rawurl string) *Node {
- n, err := ParseNode(rawurl)
- if err != nil {
- panic("invalid node URL: " + err.Error())
- }
- return n
-}
+type encPubkey [64]byte
-// MarshalText implements encoding.TextMarshaler.
-func (n *Node) MarshalText() ([]byte, error) {
- return []byte(n.String()), nil
+func encodePubkey(key *ecdsa.PublicKey) encPubkey {
+ var e encPubkey
+ math.ReadBits(key.X, e[:len(e)/2])
+ math.ReadBits(key.Y, e[len(e)/2:])
+ return e
}
-// UnmarshalText implements encoding.TextUnmarshaler.
-func (n *Node) UnmarshalText(text []byte) error {
- dec, err := ParseNode(string(text))
- if err == nil {
- *n = *dec
- }
- return err
-}
-
-// NodeID is a unique identifier for each node.
-// The node identifier is a marshaled elliptic curve public key.
-type NodeID [NodeIDBits / 8]byte
-
-// Bytes returns a byte slice representation of the NodeID
-func (n NodeID) Bytes() []byte {
- return n[:]
-}
-
-// NodeID prints as a long hexadecimal number.
-func (n NodeID) String() string {
- return fmt.Sprintf("%x", n[:])
-}
-
-// The Go syntax representation of a NodeID is a call to HexID.
-func (n NodeID) GoString() string {
- return fmt.Sprintf("discover.HexID(\"%x\")", n[:])
-}
-
-// TerminalString returns a shortened hex string for terminal logging.
-func (n NodeID) TerminalString() string {
- return hex.EncodeToString(n[:8])
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-func (n NodeID) MarshalText() ([]byte, error) {
- return []byte(hex.EncodeToString(n[:])), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (n *NodeID) UnmarshalText(text []byte) error {
- id, err := HexID(string(text))
- if err != nil {
- return err
- }
- *n = id
- return nil
-}
-
-// BytesID converts a byte slice to a NodeID
-func BytesID(b []byte) (NodeID, error) {
- var id NodeID
- if len(b) != len(id) {
- return id, fmt.Errorf("wrong length, want %d bytes", len(id))
- }
- copy(id[:], b)
- return id, nil
-}
-
-// MustBytesID converts a byte slice to a NodeID.
-// It panics if the byte slice is not a valid NodeID.
-func MustBytesID(b []byte) NodeID {
- id, err := BytesID(b)
- if err != nil {
- panic(err)
+func decodePubkey(e encPubkey) (*ecdsa.PublicKey, error) {
+ p := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)}
+ half := len(e) / 2
+ p.X.SetBytes(e[:half])
+ p.Y.SetBytes(e[half:])
+ if !p.Curve.IsOnCurve(p.X, p.Y) {
+ return nil, errors.New("invalid secp256k1 curve point")
}
- return id
+ return p, nil
}
-// HexID converts a hex string to a NodeID.
-// The string may be prefixed with 0x.
-func HexID(in string) (NodeID, error) {
- var id NodeID
- b, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
- if err != nil {
- return id, err
- } else if len(b) != len(id) {
- return id, fmt.Errorf("wrong length, want %d hex chars", len(id)*2)
- }
- copy(id[:], b)
- return id, nil
+func (e encPubkey) id() enode.ID {
+ return enode.ID(crypto.Keccak256Hash(e[:]))
}
-// MustHexID converts a hex string to a NodeID.
-// It panics if the string is not a valid NodeID.
-func MustHexID(in string) NodeID {
- id, err := HexID(in)
+// recoverNodeKey computes the public key used to sign the
+// given hash from the signature.
+func recoverNodeKey(hash, sig []byte) (key encPubkey, err error) {
+ pubkey, err := secp256k1.RecoverPubkey(hash, sig)
if err != nil {
- panic(err)
+ return key, err
}
- return id
+ copy(key[:], pubkey[1:])
+ return key, nil
}
-// PubkeyID returns a marshaled representation of the given public key.
-func PubkeyID(pub *ecdsa.PublicKey) NodeID {
- var id NodeID
- pbytes := elliptic.Marshal(pub.Curve, pub.X, pub.Y)
- if len(pbytes)-1 != len(id) {
- panic(fmt.Errorf("need %d bit pubkey, got %d bits", (len(id)+1)*8, len(pbytes)))
- }
- copy(id[:], pbytes[1:])
- return id
+func wrapNode(n *enode.Node) *node {
+ return &node{Node: *n}
}
-// Pubkey returns the public key represented by the node ID.
-// It returns an error if the ID is not a point on the curve.
-func (id NodeID) Pubkey() (*ecdsa.PublicKey, error) {
- p := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)}
- half := len(id) / 2
- p.X.SetBytes(id[:half])
- p.Y.SetBytes(id[half:])
- if !p.Curve.IsOnCurve(p.X, p.Y) {
- return nil, errors.New("id is invalid secp256k1 curve point")
+func wrapNodes(ns []*enode.Node) []*node {
+ result := make([]*node, len(ns))
+ for i, n := range ns {
+ result[i] = wrapNode(n)
}
- return p, nil
+ return result
}
-// recoverNodeID computes the public key used to sign the
-// given hash from the signature.
-func recoverNodeID(hash, sig []byte) (id NodeID, err error) {
- pubkey, err := secp256k1.RecoverPubkey(hash, sig)
- if err != nil {
- return id, err
- }
- if len(pubkey)-1 != len(id) {
- return id, fmt.Errorf("recovered pubkey has %d bits, want %d bits", len(pubkey)*8, (len(id)+1)*8)
- }
- for i := range id {
- id[i] = pubkey[i+1]
- }
- return id, nil
+func unwrapNode(n *node) *enode.Node {
+ return &n.Node
}
-// distcmp compares the distances a->target and b->target.
-// Returns -1 if a is closer to target, 1 if b is closer to target
-// and 0 if they are equal.
-func distcmp(target, a, b common.Hash) int {
- for i := range target {
- da := a[i] ^ target[i]
- db := b[i] ^ target[i]
- if da > db {
- return 1
- } else if da < db {
- return -1
- }
+func unwrapNodes(ns []*node) []*enode.Node {
+ result := make([]*enode.Node, len(ns))
+ for i, n := range ns {
+ result[i] = unwrapNode(n)
}
- return 0
+ return result
}
-// table of leading zero counts for bytes [0..255]
-var lzcount = [256]int{
- 8, 7, 6, 6, 5, 5, 5, 5,
- 4, 4, 4, 4, 4, 4, 4, 4,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+func (n *node) addr() *net.UDPAddr {
+ return &net.UDPAddr{IP: n.IP(), Port: n.UDP()}
}
-// logdist returns the logarithmic distance between a and b, log2(a ^ b).
-func logdist(a, b common.Hash) int {
- lz := 0
- for i := range a {
- x := a[i] ^ b[i]
- if x == 0 {
- lz += 8
- } else {
- lz += lzcount[x]
- break
- }
- }
- return len(a)*8 - lz
-}
-
-// hashAtDistance returns a random hash such that logdist(a, b) == n
-func hashAtDistance(a common.Hash, n int) (b common.Hash) {
- if n == 0 {
- return a
- }
- // flip bit at position n, fill the rest with random bits
- b = a
- pos := len(a) - n/8 - 1
- bit := byte(0x01) << (byte(n%8) - 1)
- if bit == 0 {
- pos++
- bit = 0x80
- }
- b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
- for i := pos + 1; i < len(a); i++ {
- b[i] = byte(rand.Intn(255))
- }
- return b
+func (n *node) String() string {
+ return n.Node.String()
}
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index dff614c1d66e..0afc7e54c154 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -23,9 +23,9 @@
package discover
import (
+ "crypto/ecdsa"
crand "crypto/rand"
"encoding/binary"
- "errors"
"fmt"
mrand "math/rand"
"net"
@@ -36,13 +36,14 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/log"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/netutil"
)
const (
- alpha = 3 // Kademlia concurrency factor
- bucketSize = 200 // Kademlia bucket size
- maxReplacements = 10 // Size of per-bucket replacement list
+ alpha = 3 // Kademlia concurrency factor
+ bucketSize = 16 // Kademlia bucket size
+ maxReplacements = 10 // Size of per-bucket replacement list
// We keep buckets for the upper 1/15 of distances because
// it's very unlikely we'll ever encounter a node that's closer.
@@ -54,76 +55,54 @@ const (
bucketIPLimit, bucketSubnet = 2, 24 // at most 2 addresses from the same /24
tableIPLimit, tableSubnet = 10, 24
- maxBondingPingPongs = 16 // Limit on the number of concurrent ping/pong interactions
- maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
-
- refreshInterval = 30 * time.Minute
- revalidateInterval = 10 * time.Second
- copyNodesInterval = 30 * time.Second
- seedMinTableTime = 5 * time.Minute
- seedCount = 30
- seedMaxAge = 5 * 24 * time.Hour
+ maxFindnodeFailures = 5 // Nodes exceeding this limit are dropped
+ refreshInterval = 30 * time.Minute
+ revalidateInterval = 10 * time.Second
+ copyNodesInterval = 30 * time.Second
+ seedMinTableTime = 5 * time.Minute
+ seedCount = 30
+ seedMaxAge = 5 * 24 * time.Hour
)
type Table struct {
mutex sync.Mutex // protects buckets, bucket content, nursery, rand
buckets [nBuckets]*bucket // index of known nodes by distance
- nursery []*Node // bootstrap nodes
+ nursery []*node // bootstrap nodes
rand *mrand.Rand // source of randomness, periodically reseeded
ips netutil.DistinctNetSet
- db *nodeDB // database of known nodes
+ db *enode.DB // database of known nodes
+ net transport
refreshReq chan chan struct{}
initDone chan struct{}
closeReq chan struct{}
closed chan struct{}
- bondmu sync.Mutex
- bonding map[NodeID]*bondproc
- bondslots chan struct{} // limits total number of active bonding processes
-
- nodeAddedHook func(*Node) // for testing
-
- net transport
- self *Node // metadata of the local node
-}
-
-type bondproc struct {
- err error
- n *Node
- done chan struct{}
+ nodeAddedHook func(*node) // for testing
}
// transport is implemented by the UDP transport.
// it is an interface so we can test without opening lots of UDP
// sockets and without generating a private key.
type transport interface {
- ping(NodeID, *net.UDPAddr) error
- waitping(NodeID) error
- findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error)
+ self() *enode.Node
+ ping(enode.ID, *net.UDPAddr) error
+ findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error)
close()
}
// bucket contains nodes, ordered by their last activity. the entry
// that was most recently active is the first element in entries.
type bucket struct {
- entries []*Node // live entries, sorted by time of last contact
- replacements []*Node // recently seen nodes to be used if revalidation fails
+ entries []*node // live entries, sorted by time of last contact
+ replacements []*node // recently seen nodes to be used if revalidation fails
ips netutil.DistinctNetSet
}
-func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) {
- // If no node database was given, use an in-memory one
- db, err := newNodeDB(nodeDBPath, Version, ourID)
- if err != nil {
- return nil, err
- }
+func newTable(t transport, db *enode.DB, bootnodes []*enode.Node) (*Table, error) {
tab := &Table{
net: t,
db: db,
- self: NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
- bonding: make(map[NodeID]*bondproc),
- bondslots: make(chan struct{}, maxBondingPingPongs),
refreshReq: make(chan chan struct{}),
initDone: make(chan struct{}),
closeReq: make(chan struct{}),
@@ -134,24 +113,22 @@ func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string
if err := tab.setFallbackNodes(bootnodes); err != nil {
return nil, err
}
- for i := 0; i < cap(tab.bondslots); i++ {
- tab.bondslots <- struct{}{}
- }
for i := range tab.buckets {
tab.buckets[i] = &bucket{
ips: netutil.DistinctNetSet{Subnet: bucketSubnet, Limit: bucketIPLimit},
}
}
tab.seedRand()
- tab.loadSeedNodes(false)
- // Start the background expiration goroutine after loading seeds so that the search for
- // seed nodes also considers older nodes that would otherwise be removed by the
- // expiration.
- tab.db.ensureExpirer()
+ tab.loadSeedNodes()
+
go tab.loop()
return tab, nil
}
+func (tab *Table) self() *enode.Node {
+ return tab.net.self()
+}
+
func (tab *Table) seedRand() {
var b [8]byte
crand.Read(b[:])
@@ -161,16 +138,9 @@ func (tab *Table) seedRand() {
tab.mutex.Unlock()
}
-// Self returns the local node.
-// The returned node should not be modified by the caller.
-func (tab *Table) Self() *Node {
- return tab.self
-}
-
-// ReadRandomNodes fills the given slice with random nodes from the
-// table. It will not write the same node more than once. The nodes in
-// the slice are copies and can be modified by the caller.
-func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
+// ReadRandomNodes fills the given slice with random nodes from the table. The results
+// are guaranteed to be unique for a single invocation, no node will appear twice.
+func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
if !tab.isInitDone() {
return 0
}
@@ -178,10 +148,10 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
defer tab.mutex.Unlock()
// Find all non-empty buckets and get a fresh slice of their entries.
- var buckets [][]*Node
- for _, b := range tab.buckets {
+ var buckets [][]*node
+ for _, b := range &tab.buckets {
if len(b.entries) > 0 {
- buckets = append(buckets, b.entries[:])
+ buckets = append(buckets, b.entries)
}
}
if len(buckets) == 0 {
@@ -196,7 +166,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
var i, j int
for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
b := buckets[j]
- buf[i] = &(*b[0])
+ buf[i] = unwrapNode(b[0])
buckets[j] = b[1:]
if len(b) == 1 {
buckets = append(buckets[:j], buckets[j+1:]...)
@@ -210,6 +180,10 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
// Close terminates the network listener and flushes the node database.
func (tab *Table) Close() {
+ if tab.net != nil {
+ tab.net.close()
+ }
+
select {
case <-tab.closed:
// already closed.
@@ -221,20 +195,13 @@ func (tab *Table) Close() {
// setFallbackNodes sets the initial points of contact. These nodes
// are used to connect to the network if the table is empty and there
// are no known nodes in the database.
-func (tab *Table) setFallbackNodes(nodes []*Node) error {
+func (tab *Table) setFallbackNodes(nodes []*enode.Node) error {
for _, n := range nodes {
- if err := n.validateComplete(); err != nil {
- return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
+ if err := n.ValidateComplete(); err != nil {
+ return fmt.Errorf("bad bootstrap node %q: %v", n, err)
}
}
- tab.nursery = make([]*Node, 0, len(nodes))
- for _, n := range nodes {
- cpy := *n
- // Recompute cpy.sha because the node might not have been
- // created by NewNode or ParseNode.
- cpy.sha = crypto.Keccak256Hash(n.ID[:])
- tab.nursery = append(tab.nursery, &cpy)
- }
+ tab.nursery = wrapNodes(nodes)
return nil
}
@@ -250,47 +217,48 @@ func (tab *Table) isInitDone() bool {
// Resolve searches for a specific node with the given ID.
// It returns nil if the node could not be found.
-func (tab *Table) Resolve(targetID NodeID) *Node {
+func (tab *Table) Resolve(n *enode.Node) *enode.Node {
// If the node is present in the local table, no
// network interaction is required.
- hash := crypto.Keccak256Hash(targetID[:])
+ hash := n.ID()
tab.mutex.Lock()
cl := tab.closest(hash, 1)
tab.mutex.Unlock()
- if len(cl.entries) > 0 && cl.entries[0].ID == targetID {
- return cl.entries[0]
+ if len(cl.entries) > 0 && cl.entries[0].ID() == hash {
+ return unwrapNode(cl.entries[0])
}
// Otherwise, do a network lookup.
- result := tab.Lookup(targetID)
+ result := tab.lookup(encodePubkey(n.Pubkey()), true)
for _, n := range result {
- if n.ID == targetID {
- return n
+ if n.ID() == hash {
+ return unwrapNode(n)
}
}
return nil
}
-// Lookup performs a network search for nodes close
-// to the given target. It approaches the target by querying
-// nodes that are closer to it on each iteration.
-// The given target does not need to be an actual node
-// identifier.
-func (tab *Table) Lookup(targetID NodeID) []*Node {
- return tab.lookup(targetID, true)
+// LookupRandom finds random nodes in the network.
+func (tab *Table) LookupRandom() []*enode.Node {
+ var target encPubkey
+ crand.Read(target[:])
+ return unwrapNodes(tab.lookup(target, true))
}
-func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
+// lookup performs a network search for nodes close to the given target. It approaches the
+// target by querying nodes that are closer to it on each iteration. The given target does
+// not need to be an actual node identifier.
+func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
var (
- target = crypto.Keccak256Hash(targetID[:])
- asked = make(map[NodeID]bool)
- seen = make(map[NodeID]bool)
- reply = make(chan []*Node, alpha)
+ target = enode.ID(crypto.Keccak256Hash(targetKey[:]))
+ asked = make(map[enode.ID]bool)
+ seen = make(map[enode.ID]bool)
+ reply = make(chan []*node, alpha)
pendingQueries = 0
result *nodesByDistance
)
// don't query further if we hit ourself.
// unlikely to happen often in practice.
- asked[tab.self.ID] = true
+ asked[tab.self().ID()] = true
for {
tab.mutex.Lock()
@@ -312,25 +280,10 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
// ask the alpha closest nodes that we haven't asked yet
for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
n := result.entries[i]
- if !asked[n.ID] {
- asked[n.ID] = true
+ if !asked[n.ID()] {
+ asked[n.ID()] = true
pendingQueries++
- go func() {
- // Find potential neighbors to bond with
- r, err := tab.net.findnode(n.ID, n.addr(), targetID)
- if err != nil {
- // Bump the failure counter to detect and evacuate non-bonded entries
- fails := tab.db.findFails(n.ID) + 1
- tab.db.updateFindFails(n.ID, fails)
- log.Trace("Bumping findnode failure counter", "id", n.ID, "failcount", fails)
-
- if fails >= maxFindnodeFailures {
- log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails)
- tab.delete(n)
- }
- }
- reply <- tab.bondall(r)
- }()
+ go tab.findnode(n, targetKey, reply)
}
}
if pendingQueries == 0 {
@@ -339,8 +292,8 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
}
// wait for the next reply
for _, n := range <-reply {
- if n != nil && !seen[n.ID] {
- seen[n.ID] = true
+ if n != nil && !seen[n.ID()] {
+ seen[n.ID()] = true
result.push(n, bucketSize)
}
}
@@ -349,6 +302,29 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
return result.entries
}
+func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
+ fails := tab.db.FindFails(n.ID())
+ r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
+ if err != nil || len(r) == 0 {
+ fails++
+ tab.db.UpdateFindFails(n.ID(), fails)
+ log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
+ if fails >= maxFindnodeFailures {
+ log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
+ tab.delete(n)
+ }
+ } else if fails > 0 {
+ tab.db.UpdateFindFails(n.ID(), fails-1)
+ }
+
+ // Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
+ // just remove those again during revalidation.
+ for _, n := range r {
+ tab.add(n)
+ }
+ reply <- r
+}
+
func (tab *Table) refresh() <-chan struct{} {
done := make(chan struct{})
select {
@@ -365,8 +341,8 @@ func (tab *Table) loop() {
revalidate = time.NewTimer(tab.nextRevalidateTime())
refresh = time.NewTicker(refreshInterval)
copyNodes = time.NewTicker(copyNodesInterval)
- revalidateDone = make(chan struct{})
refreshDone = make(chan struct{}) // where doRefresh reports completion
+ revalidateDone chan struct{} // where doRevalidate reports completion
waiting = []chan struct{}{tab.initDone} // holds waiting callers while doRefresh runs
)
defer refresh.Stop()
@@ -397,26 +373,27 @@ loop:
}
waiting, refreshDone = nil, nil
case <-revalidate.C:
+ revalidateDone = make(chan struct{})
go tab.doRevalidate(revalidateDone)
case <-revalidateDone:
revalidate.Reset(tab.nextRevalidateTime())
+ revalidateDone = nil
case <-copyNodes.C:
- go tab.copyBondedNodes()
+ go tab.copyLiveNodes()
case <-tab.closeReq:
break loop
}
}
- if tab.net != nil {
- tab.net.close()
- }
if refreshDone != nil {
<-refreshDone
}
for _, ch := range waiting {
close(ch)
}
- tab.db.close()
+ if revalidateDone != nil {
+ <-revalidateDone
+ }
close(tab.closed)
}
@@ -429,10 +406,14 @@ func (tab *Table) doRefresh(done chan struct{}) {
// Load nodes from the database and insert
// them. This should yield a few previously seen nodes that are
// (hopefully) still alive.
- tab.loadSeedNodes(true)
+ tab.loadSeedNodes()
// Run self lookup to discover new neighbor nodes.
- tab.lookup(tab.self.ID, false)
+ // We can only do this if we have a secp256k1 identity.
+ var key ecdsa.PublicKey
+ if err := tab.self().Load((*enode.Secp256k1)(&key)); err == nil {
+ tab.lookup(encodePubkey(&key), false)
+ }
// The Kademlia paper specifies that the bucket refresh should
// perform a lookup in the least recently used bucket. We cannot
@@ -441,22 +422,19 @@ func (tab *Table) doRefresh(done chan struct{}) {
// sha3 preimage that falls into a chosen bucket.
// We perform a few lookups with a random target instead.
for i := 0; i < 3; i++ {
- var target NodeID
+ var target encPubkey
crand.Read(target[:])
tab.lookup(target, false)
}
}
-func (tab *Table) loadSeedNodes(bond bool) {
- seeds := tab.db.querySeeds(seedCount, seedMaxAge)
+func (tab *Table) loadSeedNodes() {
+ seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge))
seeds = append(seeds, tab.nursery...)
- if bond {
- seeds = tab.bondall(seeds)
- }
for i := range seeds {
seed := seeds[i]
- age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.bondTime(seed.ID)) }}
- log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age)
+ age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
+ log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.add(seed)
}
}
@@ -473,28 +451,28 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
}
// Ping the selected node and wait for a pong.
- err := tab.ping(last.ID, last.addr())
+ err := tab.net.ping(last.ID(), last.addr())
tab.mutex.Lock()
defer tab.mutex.Unlock()
b := tab.buckets[bi]
if err == nil {
// The node responded, move it to the front.
- log.Debug("Revalidated node", "b", bi, "id", last.ID)
+ log.Debug("Revalidated node", "b", bi, "id", last.ID())
b.bump(last)
return
}
// No reply received, pick a replacement or delete the node if there aren't
// any replacements.
if r := tab.replace(b, last); r != nil {
- log.Debug("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP)
+ log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "r", r.ID(), "rip", r.IP())
} else {
- log.Debug("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP)
+ log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP())
}
}
// nodeToRevalidate returns the last node in a random, non-empty bucket.
-func (tab *Table) nodeToRevalidate() (n *Node, bi int) {
+func (tab *Table) nodeToRevalidate() (n *node, bi int) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
@@ -515,17 +493,17 @@ func (tab *Table) nextRevalidateTime() time.Duration {
return time.Duration(tab.rand.Int63n(int64(revalidateInterval)))
}
-// copyBondedNodes adds nodes from the table to the database if they have been in the table
+// copyLiveNodes adds nodes from the table to the database if they have been in the table
// longer then minTableTime.
-func (tab *Table) copyBondedNodes() {
+func (tab *Table) copyLiveNodes() {
tab.mutex.Lock()
defer tab.mutex.Unlock()
now := time.Now()
- for _, b := range tab.buckets {
+ for _, b := range &tab.buckets {
for _, n := range b.entries {
if now.Sub(n.addedAt) >= seedMinTableTime {
- tab.db.updateNode(n)
+ tab.db.UpdateNode(unwrapNode(n))
}
}
}
@@ -533,12 +511,12 @@ func (tab *Table) copyBondedNodes() {
// closest returns the n nodes in the table that are closest to the
// given id. The caller must hold tab.mutex.
-func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
+func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
// This is a very wasteful way to find the closest nodes but
// obviously correct. I believe that tree-based buckets would make
// this easier to implement efficiently.
close := &nodesByDistance{target: target}
- for _, b := range tab.buckets {
+ for _, b := range &tab.buckets {
for _, n := range b.entries {
close.push(n, nresults)
}
@@ -547,176 +525,76 @@ func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
}
func (tab *Table) len() (n int) {
- for _, b := range tab.buckets {
+ for _, b := range &tab.buckets {
n += len(b.entries)
}
return n
}
-// bondall bonds with all given nodes concurrently and returns
-// those nodes for which bonding has probably succeeded.
-func (tab *Table) bondall(nodes []*Node) (result []*Node) {
- rc := make(chan *Node, len(nodes))
- for i := range nodes {
- go func(n *Node) {
- nn, _ := tab.bond(false, n.ID, n.addr(), n.TCP)
- rc <- nn
- }(nodes[i])
- }
- for range nodes {
- if n := <-rc; n != nil {
- result = append(result, n)
- }
- }
- return result
-}
-
-// bond ensures the local node has a bond with the given remote node.
-// It also attempts to insert the node into the table if bonding succeeds.
-// The caller must not hold tab.mutex.
-//
-// A bond is must be established before sending findnode requests.
-// Both sides must have completed a ping/pong exchange for a bond to
-// exist. The total number of active bonding processes is limited in
-// order to restrain network use.
-//
-// bond is meant to operate idempotently in that bonding with a remote
-// node which still remembers a previously established bond will work.
-// The remote node will simply not send a ping back, causing waitping
-// to time out.
-//
-// If pinged is true, the remote node has just pinged us and one half
-// of the process can be skipped.
-func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) (*Node, error) {
- if id == tab.self.ID {
- return nil, errors.New("is self")
- }
- if pinged && !tab.isInitDone() {
- return nil, errors.New("still initializing")
- }
- // Start bonding if we haven't seen this node for a while or if it failed findnode too often.
- node, fails := tab.db.node(id), tab.db.findFails(id)
- age := time.Since(tab.db.bondTime(id))
- var result error
- if fails > 0 || age > nodeDBNodeExpiration {
- log.Trace("Starting bonding ping/pong", "id", id, "known", node != nil, "failcount", fails, "age", age)
-
- tab.bondmu.Lock()
- w := tab.bonding[id]
- if w != nil {
- // Wait for an existing bonding process to complete.
- tab.bondmu.Unlock()
- <-w.done
- } else {
- // Register a new bonding process.
- w = &bondproc{done: make(chan struct{})}
- tab.bonding[id] = w
- tab.bondmu.Unlock()
- // Do the ping/pong. The result goes into w.
- tab.pingpong(w, pinged, id, addr, tcpPort)
- // Unregister the process after it's done.
- tab.bondmu.Lock()
- delete(tab.bonding, id)
- tab.bondmu.Unlock()
- }
- // Retrieve the bonding results
- result = w.err
- if result == nil {
- node = w.n
- }
- }
- // Add the node to the table even if the bonding ping/pong
- // fails. It will be relaced quickly if it continues to be
- // unresponsive.
- if node != nil {
- tab.add(node)
- tab.db.updateFindFails(id, 0)
- }
- return node, result
-}
-
-func (tab *Table) pingpong(w *bondproc, pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16) {
- // Request a bonding slot to limit network usage
- <-tab.bondslots
- defer func() { tab.bondslots <- struct{}{} }()
-
- // Ping the remote side and wait for a pong.
- if w.err = tab.ping(id, addr); w.err != nil {
- close(w.done)
- return
- }
- if !pinged {
- // Give the remote node a chance to ping us before we start
- // sending findnode requests. If they still remember us,
- // waitping will simply time out.
- tab.net.waitping(id)
- }
- // Bonding succeeded, update the node database.
- w.n = NewNode(id, addr.IP, uint16(addr.Port), tcpPort)
- close(w.done)
-}
-
-// ping a remote endpoint and wait for a reply, also updating the node
-// database accordingly.
-func (tab *Table) ping(id NodeID, addr *net.UDPAddr) error {
- tab.db.updateLastPing(id, time.Now())
- if err := tab.net.ping(id, addr); err != nil {
- return err
- }
- tab.db.updateBondTime(id, time.Now())
- return nil
-}
-
// bucket returns the bucket for the given node ID hash.
-func (tab *Table) bucket(sha common.Hash) *bucket {
- d := logdist(tab.self.sha, sha)
+func (tab *Table) bucket(id enode.ID) *bucket {
+ d := enode.LogDist(tab.self().ID(), id)
if d <= bucketMinDistance {
return tab.buckets[0]
}
return tab.buckets[d-bucketMinDistance-1]
}
-// add attempts to add the given node its corresponding bucket. If the
-// bucket has space available, adding the node succeeds immediately.
-// Otherwise, the node is added if the least recently active node in
-// the bucket does not respond to a ping packet.
+// add attempts to add the given node to its corresponding bucket. If the bucket has space
+// available, adding the node succeeds immediately. Otherwise, the node is added if the
+// least recently active node in the bucket does not respond to a ping packet.
//
// The caller must not hold tab.mutex.
-func (tab *Table) add(new *Node) {
+func (tab *Table) add(n *node) {
+ if n.ID() == tab.self().ID() {
+ return
+ }
+
tab.mutex.Lock()
defer tab.mutex.Unlock()
-
- b := tab.bucket(new.sha)
- if !tab.bumpOrAdd(b, new) {
+ b := tab.bucket(n.ID())
+ if !tab.bumpOrAdd(b, n) {
// Node is not in table. Add it to the replacement list.
- tab.addReplacement(b, new)
+ tab.addReplacement(b, n)
+ }
+}
+
+// addThroughPing adds the given node to the table. Compared to plain
+// 'add' there is an additional safety measure: if the table is still
+// initializing the node is not added. This prevents an attack where the
+// table could be filled by just sending ping repeatedly.
+//
+// The caller must not hold tab.mutex.
+func (tab *Table) addThroughPing(n *node) {
+ if !tab.isInitDone() {
+ return
}
+ tab.add(n)
}
// stuff adds nodes the table to the end of their corresponding bucket
// if the bucket is not full. The caller must not hold tab.mutex.
-func (tab *Table) stuff(nodes []*Node) {
+func (tab *Table) stuff(nodes []*node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
for _, n := range nodes {
- if n.ID == tab.self.ID {
+ if n.ID() == tab.self().ID() {
continue // don't add self
}
- b := tab.bucket(n.sha)
+ b := tab.bucket(n.ID())
if len(b.entries) < bucketSize {
tab.bumpOrAdd(b, n)
}
}
}
-// delete removes an entry from the node table (used to evacuate
-// failed/non-bonded discovery peers).
-func (tab *Table) delete(node *Node) {
+// delete removes an entry from the node table. It is used to evacuate dead nodes.
+func (tab *Table) delete(node *node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
- tab.deleteInBucket(tab.bucket(node.sha), node)
+ tab.deleteInBucket(tab.bucket(node.ID()), node)
}
func (tab *Table) addIP(b *bucket, ip net.IP) bool {
@@ -743,27 +621,27 @@ func (tab *Table) removeIP(b *bucket, ip net.IP) {
b.ips.Remove(ip)
}
-func (tab *Table) addReplacement(b *bucket, n *Node) {
+func (tab *Table) addReplacement(b *bucket, n *node) {
for _, e := range b.replacements {
- if e.ID == n.ID {
+ if e.ID() == n.ID() {
return // already in list
}
}
- if !tab.addIP(b, n.IP) {
+ if !tab.addIP(b, n.IP()) {
return
}
- var removed *Node
+ var removed *node
b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
if removed != nil {
- tab.removeIP(b, removed.IP)
+ tab.removeIP(b, removed.IP())
}
}
// replace removes n from the replacement list and replaces 'last' with it if it is the
// last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
// with someone else or became active.
-func (tab *Table) replace(b *bucket, last *Node) *Node {
- if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID {
+func (tab *Table) replace(b *bucket, last *node) *node {
+ if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() {
// Entry has moved, don't replace it.
return nil
}
@@ -775,15 +653,15 @@ func (tab *Table) replace(b *bucket, last *Node) *Node {
r := b.replacements[tab.rand.Intn(len(b.replacements))]
b.replacements = deleteNode(b.replacements, r)
b.entries[len(b.entries)-1] = r
- tab.removeIP(b, last.IP)
+ tab.removeIP(b, last.IP())
return r
}
// bump moves the given node to the front of the bucket entry list
// if it is contained in that list.
-func (b *bucket) bump(n *Node) bool {
+func (b *bucket) bump(n *node) bool {
for i := range b.entries {
- if b.entries[i].ID == n.ID {
+ if b.entries[i].ID() == n.ID() {
// move it to the front
copy(b.entries[1:], b.entries[:i])
b.entries[0] = n
@@ -795,11 +673,11 @@ func (b *bucket) bump(n *Node) bool {
// bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
// full. The return value is true if n is in the bucket.
-func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
+func (tab *Table) bumpOrAdd(b *bucket, n *node) bool {
if b.bump(n) {
return true
}
- if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) {
+ if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP()) {
return false
}
b.entries, _ = pushNode(b.entries, n, bucketSize)
@@ -811,13 +689,13 @@ func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
return true
}
-func (tab *Table) deleteInBucket(b *bucket, n *Node) {
+func (tab *Table) deleteInBucket(b *bucket, n *node) {
b.entries = deleteNode(b.entries, n)
- tab.removeIP(b, n.IP)
+ tab.removeIP(b, n.IP())
}
// pushNode adds n to the front of list, keeping at most max items.
-func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
+func pushNode(list []*node, n *node, max int) ([]*node, *node) {
if len(list) < max {
list = append(list, nil)
}
@@ -828,9 +706,9 @@ func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
}
// deleteNode removes n from list.
-func deleteNode(list []*Node, n *Node) []*Node {
+func deleteNode(list []*node, n *node) []*node {
for i := range list {
- if list[i].ID == n.ID {
+ if list[i].ID() == n.ID() {
return append(list[:i], list[i+1:]...)
}
}
@@ -840,14 +718,14 @@ func deleteNode(list []*Node, n *Node) []*Node {
// nodesByDistance is a list of nodes, ordered by
// distance to target.
type nodesByDistance struct {
- entries []*Node
- target common.Hash
+ entries []*node
+ target enode.ID
}
// push adds the given node to the list, keeping the total size below maxElems.
-func (h *nodesByDistance) push(n *Node, maxElems int) {
+func (h *nodesByDistance) push(n *node, maxElems int) {
ix := sort.Search(len(h.entries), func(i int) bool {
- return distcmp(h.target, h.entries[i].sha, n.sha) > 0
+ return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0
})
if len(h.entries) < maxElems {
h.entries = append(h.entries, n)
diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go
index 38f22880c853..6967d4244175 100644
--- a/p2p/discover/table_test.go
+++ b/p2p/discover/table_test.go
@@ -20,7 +20,6 @@ import (
"crypto/ecdsa"
"fmt"
"math/rand"
- "sync"
"net"
"reflect"
@@ -28,8 +27,9 @@ import (
"testing/quick"
"time"
- "github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/crypto"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
)
func TestTable_pingReplace(t *testing.T) {
@@ -49,30 +49,28 @@ func TestTable_pingReplace(t *testing.T) {
func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding bool) {
transport := newPingRecorder()
- tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
+ tab, db := newTestTable(transport)
defer tab.Close()
+ defer db.Close()
// Wait for init so bond is accepted.
<-tab.initDone
- // fill up the sender's bucket.
- pingSender := NewNode(MustHexID("a502af0f59b2aab7746995408c79e9ca312d2793cc997e44fc55eda62f0150bbb8c59a6f9269ba3a081518b62699ee807c7c19c20125ddfccca872608af9e370"), net.IP{}, 99, 99)
+ // Fill up the sender's bucket.
+ pingKey, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8")
+ pingSender := wrapNode(enode.NewV4(&pingKey.PublicKey, net.IP{}, 99, 99))
last := fillBucket(tab, pingSender)
- // this call to bond should replace the last node
- // in its bucket if the node is not responding.
- transport.dead[last.ID] = !lastInBucketIsResponding
- transport.dead[pingSender.ID] = !newNodeIsResponding
- tab.bond(true, pingSender.ID, &net.UDPAddr{}, 0)
+ // Add the sender as if it just pinged us. Revalidate should replace the last node in
+ // its bucket if it is unresponsive. Revalidate again to ensure that
+ transport.dead[last.ID()] = !lastInBucketIsResponding
+ transport.dead[pingSender.ID()] = !newNodeIsResponding
+ tab.add(pingSender)
+ tab.doRevalidate(make(chan struct{}, 1))
tab.doRevalidate(make(chan struct{}, 1))
- // first ping goes to sender (bonding pingback)
- if !transport.pinged[pingSender.ID] {
- t.Error("table did not ping back sender")
- }
- if !transport.pinged[last.ID] {
- // second ping goes to oldest node in bucket
- // to see whether it is still alive.
+ if !transport.pinged[last.ID()] {
+ // Oldest node in bucket is pinged to see whether it is still alive.
t.Error("table did not ping last node in bucket")
}
@@ -82,14 +80,14 @@ func testPingReplace(t *testing.T, newNodeIsResponding, lastInBucketIsResponding
if !lastInBucketIsResponding && !newNodeIsResponding {
wantSize--
}
- if l := len(tab.bucket(pingSender.sha).entries); l != wantSize {
+ if l := len(tab.bucket(pingSender.ID()).entries); l != wantSize {
t.Errorf("wrong bucket size after bond: got %d, want %d", l, wantSize)
}
- if found := contains(tab.bucket(pingSender.sha).entries, last.ID); found != lastInBucketIsResponding {
+ if found := contains(tab.bucket(pingSender.ID()).entries, last.ID()); found != lastInBucketIsResponding {
t.Errorf("last entry found: %t, want: %t", found, lastInBucketIsResponding)
}
wantNewEntry := newNodeIsResponding && !lastInBucketIsResponding
- if found := contains(tab.bucket(pingSender.sha).entries, pingSender.ID); found != wantNewEntry {
+ if found := contains(tab.bucket(pingSender.ID()).entries, pingSender.ID()); found != wantNewEntry {
t.Errorf("new entry found: %t, want: %t", found, wantNewEntry)
}
}
@@ -102,9 +100,9 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
Values: func(args []reflect.Value, rand *rand.Rand) {
// generate a random list of nodes. this will be the content of the bucket.
n := rand.Intn(bucketSize-1) + 1
- nodes := make([]*Node, n)
+ nodes := make([]*node, n)
for i := range nodes {
- nodes[i] = nodeAtDistance(common.Hash{}, 200)
+ nodes[i] = nodeAtDistance(enode.ID{}, 200, intIP(200))
}
args[0] = reflect.ValueOf(nodes)
// generate random bump positions.
@@ -116,8 +114,8 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
},
}
- prop := func(nodes []*Node, bumps []int) (ok bool) {
- b := &bucket{entries: make([]*Node, len(nodes))}
+ prop := func(nodes []*node, bumps []int) (ok bool) {
+ b := &bucket{entries: make([]*node, len(nodes))}
copy(b.entries, nodes)
for i, pos := range bumps {
b.bump(b.entries[pos])
@@ -139,12 +137,12 @@ func TestBucket_bumpNoDuplicates(t *testing.T) {
// This checks that the table-wide IP limit is applied correctly.
func TestTable_IPLimit(t *testing.T) {
transport := newPingRecorder()
- tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
+ tab, db := newTestTable(transport)
defer tab.Close()
+ defer db.Close()
for i := 0; i < tableIPLimit+1; i++ {
- n := nodeAtDistance(tab.self.sha, i)
- n.IP = net.IP{172, 0, 1, byte(i)}
+ n := nodeAtDistance(tab.self().ID(), i, net.IP{172, 0, 1, byte(i)})
tab.add(n)
}
if tab.len() > tableIPLimit {
@@ -152,16 +150,16 @@ func TestTable_IPLimit(t *testing.T) {
}
}
-// This checks that the table-wide IP limit is applied correctly.
+// This checks that the per-bucket IP limit is applied correctly.
func TestTable_BucketIPLimit(t *testing.T) {
transport := newPingRecorder()
- tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
+ tab, db := newTestTable(transport)
defer tab.Close()
+ defer db.Close()
d := 3
for i := 0; i < bucketIPLimit+1; i++ {
- n := nodeAtDistance(tab.self.sha, d)
- n.IP = net.IP{172, 0, 1, byte(i)}
+ n := nodeAtDistance(tab.self().ID(), d, net.IP{172, 0, 1, byte(i)})
tab.add(n)
}
if tab.len() > bucketIPLimit {
@@ -169,70 +167,18 @@ func TestTable_BucketIPLimit(t *testing.T) {
}
}
-// fillBucket inserts nodes into the given bucket until
-// it is full. The node's IDs dont correspond to their
-// hashes.
-func fillBucket(tab *Table, n *Node) (last *Node) {
- ld := logdist(tab.self.sha, n.sha)
- b := tab.bucket(n.sha)
- for len(b.entries) < bucketSize {
- b.entries = append(b.entries, nodeAtDistance(tab.self.sha, ld))
- }
- return b.entries[bucketSize-1]
-}
-
-// nodeAtDistance creates a node for which logdist(base, n.sha) == ld.
-// The node's ID does not correspond to n.sha.
-func nodeAtDistance(base common.Hash, ld int) (n *Node) {
- n = new(Node)
- n.sha = hashAtDistance(base, ld)
- n.IP = net.IP{byte(ld), 0, 2, byte(ld)}
- copy(n.ID[:], n.sha[:]) // ensure the node still has a unique ID
- return n
-}
-
-type pingRecorder struct {
- mu sync.Mutex
- dead, pinged map[NodeID]bool
-}
-
-func newPingRecorder() *pingRecorder {
- return &pingRecorder{
- dead: make(map[NodeID]bool),
- pinged: make(map[NodeID]bool),
- }
-}
-
-func (t *pingRecorder) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
- return nil, nil
-}
-func (t *pingRecorder) close() {}
-func (t *pingRecorder) waitping(from NodeID) error {
- return nil // remote always pings
-}
-func (t *pingRecorder) ping(toid NodeID, toaddr *net.UDPAddr) error {
- t.mu.Lock()
- defer t.mu.Unlock()
-
- t.pinged[toid] = true
- if t.dead[toid] {
- return errTimeout
- } else {
- return nil
- }
-}
-
func TestTable_closest(t *testing.T) {
t.Parallel()
test := func(test *closeTest) bool {
// for any node table, Target and N
transport := newPingRecorder()
- tab, _ := newTable(transport, test.Self, &net.UDPAddr{}, "", nil)
+ tab, db := newTestTable(transport)
defer tab.Close()
+ defer db.Close()
tab.stuff(test.All)
- // check that doClosest(Target, N) returns nodes
+ // check that closest(Target, N) returns nodes
result := tab.closest(test.Target, test.N).entries
if hasDuplicates(result) {
t.Errorf("result contains duplicates")
@@ -258,15 +204,15 @@ func TestTable_closest(t *testing.T) {
// check that the result nodes have minimum distance to target.
for _, b := range tab.buckets {
for _, n := range b.entries {
- if contains(result, n.ID) {
+ if contains(result, n.ID()) {
continue // don't run the check below for nodes in result
}
- farthestResult := result[len(result)-1].sha
- if distcmp(test.Target, n.sha, farthestResult) < 0 {
+ farthestResult := result[len(result)-1].ID()
+ if enode.DistCmp(test.Target, n.ID(), farthestResult) < 0 {
t.Errorf("table contains node that is closer to target but it's not in result")
t.Logf(" Target: %v", test.Target)
t.Logf(" Farthest Result: %v", farthestResult)
- t.Logf(" ID: %v", n.ID)
+ t.Logf(" ID: %v", n.ID())
return false
}
}
@@ -283,25 +229,26 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) {
MaxCount: 200,
Rand: rand.New(rand.NewSource(time.Now().Unix())),
Values: func(args []reflect.Value, rand *rand.Rand) {
- args[0] = reflect.ValueOf(make([]*Node, rand.Intn(1000)))
+ args[0] = reflect.ValueOf(make([]*enode.Node, rand.Intn(1000)))
},
}
- test := func(buf []*Node) bool {
+ test := func(buf []*enode.Node) bool {
transport := newPingRecorder()
- tab, _ := newTable(transport, NodeID{}, &net.UDPAddr{}, "", nil)
+ tab, db := newTestTable(transport)
defer tab.Close()
+ defer db.Close()
<-tab.initDone
for i := 0; i < len(buf); i++ {
ld := cfg.Rand.Intn(len(tab.buckets))
- tab.stuff([]*Node{nodeAtDistance(tab.self.sha, ld)})
+ tab.stuff([]*node{nodeAtDistance(tab.self().ID(), ld, intIP(ld))})
}
gotN := tab.ReadRandomNodes(buf)
if gotN != tab.len() {
t.Errorf("wrong number of nodes, got %d, want %d", gotN, tab.len())
return false
}
- if hasDuplicates(buf[:gotN]) {
+ if hasDuplicates(wrapNodes(buf[:gotN])) {
t.Errorf("result contains duplicates")
return false
}
@@ -313,302 +260,308 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) {
}
type closeTest struct {
- Self NodeID
- Target common.Hash
- All []*Node
+ Self enode.ID
+ Target enode.ID
+ All []*node
N int
}
func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value {
t := &closeTest{
- Self: gen(NodeID{}, rand).(NodeID),
- Target: gen(common.Hash{}, rand).(common.Hash),
+ Self: gen(enode.ID{}, rand).(enode.ID),
+ Target: gen(enode.ID{}, rand).(enode.ID),
N: rand.Intn(bucketSize),
}
- for _, id := range gen([]NodeID{}, rand).([]NodeID) {
- t.All = append(t.All, &Node{ID: id})
+ for _, id := range gen([]enode.ID{}, rand).([]enode.ID) {
+ n := enode.SignNull(new(enr.Record), id)
+ t.All = append(t.All, wrapNode(n))
}
return reflect.ValueOf(t)
}
-//func TestTable_Lookup(t *testing.T) {
-// bucketSizeTest := 16
-// self := nodeAtDistance(common.Hash{}, 0)
-// tab, _ := newTable(lookupTestnet, self.ID, &net.UDPAddr{}, "", nil)
-// defer tab.Close()
-//
-// // lookup on empty table returns no nodes
-// if results := tab.Lookup(lookupTestnet.target); len(results) > 0 {
-// t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results)
-// }
-// // seed table with initial node (otherwise lookup will terminate immediately)
-// seed := NewNode(lookupTestnet.dists[256][0], net.IP{}, 256, 0)
-// tab.stuff([]*Node{seed})
-//
-// results := tab.Lookup(lookupTestnet.target)
-// t.Logf("results:")
-// for _, e := range results {
-// t.Logf(" ld=%d, %x", logdist(lookupTestnet.targetSha, e.sha), e.sha[:])
-// }
-// if len(results) != bucketSizeTest {
-// t.Errorf("wrong number of results: got %d, want %d", len(results), bucketSizeTest)
-// }
-// if hasDuplicates(results) {
-// t.Errorf("result set contains duplicate entries")
-// }
-// if !sortedByDistanceTo(lookupTestnet.targetSha, results) {
-// t.Errorf("result set not sorted by distance to target")
-// }
-// // TODO: check result nodes are actually closest
-//}
+func TestTable_Lookup(t *testing.T) {
+ tab, db := newTestTable(lookupTestnet)
+ defer tab.Close()
+ defer db.Close()
+
+ // lookup on empty table returns no nodes
+ if results := tab.lookup(lookupTestnet.target, false); len(results) > 0 {
+ t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results)
+ }
+ // seed table with initial node (otherwise lookup will terminate immediately)
+ seedKey, _ := decodePubkey(lookupTestnet.dists[256][0])
+ seed := wrapNode(enode.NewV4(seedKey, net.IP{}, 0, 256))
+ tab.stuff([]*node{seed})
+
+ results := tab.lookup(lookupTestnet.target, true)
+ t.Logf("results:")
+ for _, e := range results {
+ t.Logf(" ld=%d, %x", enode.LogDist(lookupTestnet.targetSha, e.ID()), e.ID().Bytes())
+ }
+ if len(results) != bucketSize {
+ t.Errorf("wrong number of results: got %d, want %d", len(results), bucketSize)
+ }
+ if hasDuplicates(results) {
+ t.Errorf("result set contains duplicate entries")
+ }
+ if !sortedByDistanceTo(lookupTestnet.targetSha, results) {
+ t.Errorf("result set not sorted by distance to target")
+ }
+ // TODO: check result nodes are actually closest
+}
// This is the test network for the Lookup test.
// The nodes were obtained by running testnet.mine with a random NodeID as target.
var lookupTestnet = &preminedTestnet{
- target: MustHexID("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"),
- targetSha: common.Hash{0x5c, 0x94, 0x4e, 0xe5, 0x1c, 0x5a, 0xe9, 0xf7, 0x2a, 0x95, 0xec, 0xcb, 0x8a, 0xed, 0x3, 0x74, 0xee, 0xcb, 0x51, 0x19, 0xd7, 0x20, 0xcb, 0xea, 0x68, 0x13, 0xe8, 0xe0, 0xd6, 0xad, 0x92, 0x61},
- dists: [257][]NodeID{
+ target: hexEncPubkey("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"),
+ targetSha: enode.HexID("5c944ee51c5ae9f72a95eccb8aed0374eecb5119d720cbea6813e8e0d6ad9261"),
+ dists: [257][]encPubkey{
240: {
- MustHexID("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"),
- MustHexID("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"),
+ hexEncPubkey("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"),
+ hexEncPubkey("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"),
},
244: {
- MustHexID("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"),
+ hexEncPubkey("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"),
},
246: {
- MustHexID("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"),
- MustHexID("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"),
- MustHexID("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"),
+ hexEncPubkey("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"),
+ hexEncPubkey("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"),
+ hexEncPubkey("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"),
},
247: {
- MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
- MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
- MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
- MustHexID("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
- MustHexID("8b58c6073dd98bbad4e310b97186c8f822d3a5c7d57af40e2136e88e315afd115edb27d2d0685a908cfe5aa49d0debdda6e6e63972691d6bd8c5af2d771dd2a9"),
- MustHexID("2cbb718b7dc682da19652e7d9eb4fefaf7b7147d82c1c2b6805edf77b85e29fde9f6da195741467ff2638dc62c8d3e014ea5686693c15ed0080b6de90354c137"),
- MustHexID("e84027696d3f12f2de30a9311afea8fbd313c2360daff52bb5fc8c7094d5295758bec3134e4eef24e4cdf377b40da344993284628a7a346eba94f74160998feb"),
- MustHexID("f1357a4f04f9d33753a57c0b65ba20a5d8777abbffd04e906014491c9103fb08590e45548d37aa4bd70965e2e81ddba94f31860348df01469eec8c1829200a68"),
- MustHexID("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"),
- MustHexID("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"),
+ hexEncPubkey("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
+ hexEncPubkey("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
+ hexEncPubkey("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
+ hexEncPubkey("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
+ hexEncPubkey("8b58c6073dd98bbad4e310b97186c8f822d3a5c7d57af40e2136e88e315afd115edb27d2d0685a908cfe5aa49d0debdda6e6e63972691d6bd8c5af2d771dd2a9"),
+ hexEncPubkey("2cbb718b7dc682da19652e7d9eb4fefaf7b7147d82c1c2b6805edf77b85e29fde9f6da195741467ff2638dc62c8d3e014ea5686693c15ed0080b6de90354c137"),
+ hexEncPubkey("e84027696d3f12f2de30a9311afea8fbd313c2360daff52bb5fc8c7094d5295758bec3134e4eef24e4cdf377b40da344993284628a7a346eba94f74160998feb"),
+ hexEncPubkey("f1357a4f04f9d33753a57c0b65ba20a5d8777abbffd04e906014491c9103fb08590e45548d37aa4bd70965e2e81ddba94f31860348df01469eec8c1829200a68"),
+ hexEncPubkey("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"),
+ hexEncPubkey("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"),
},
248: {
- MustHexID("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"),
- MustHexID("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"),
- MustHexID("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"),
- MustHexID("3ed426322dee7572b08592e1e079f8b6c6b30e10e6243edd144a6a48fdbdb83df73a6e41b1143722cb82604f2203a32758610b5d9544f44a1a7921ba001528c1"),
- MustHexID("b2e2a2b7fdd363572a3256e75435fab1da3b16f7891a8bd2015f30995dae665d7eabfd194d87d99d5df628b4bbc7b04e5b492c596422dd8272746c7a1b0b8e4f"),
- MustHexID("0c69c9756162c593e85615b814ce57a2a8ca2df6c690b9c4e4602731b61e1531a3bbe3f7114271554427ffabea80ad8f36fa95a49fa77b675ae182c6ccac1728"),
- MustHexID("8d28be21d5a97b0876442fa4f5e5387f5bf3faad0b6f13b8607b64d6e448c0991ca28dd7fe2f64eb8eadd7150bff5d5666aa6ed868b84c71311f4ba9a38569dd"),
- MustHexID("2c677e1c64b9c9df6359348a7f5f33dc79e22f0177042486d125f8b6ca7f0dc756b1f672aceee5f1746bcff80aaf6f92a8dc0c9fbeb259b3fa0da060de5ab7e8"),
- MustHexID("3994880f94a8678f0cd247a43f474a8af375d2a072128da1ad6cae84a244105ff85e94fc7d8496f639468de7ee998908a91c7e33ef7585fff92e984b210941a1"),
- MustHexID("b45a9153c08d002a48090d15d61a7c7dad8c2af85d4ff5bd36ce23a9a11e0709bf8d56614c7b193bc028c16cbf7f20dfbcc751328b64a924995d47b41e452422"),
- MustHexID("057ab3a9e53c7a84b0f3fc586117a525cdd18e313f52a67bf31798d48078e325abe5cfee3f6c2533230cb37d0549289d692a29dd400e899b8552d4b928f6f907"),
- MustHexID("0ddf663d308791eb92e6bd88a2f8cb45e4f4f35bb16708a0e6ff7f1362aa6a73fedd0a1b1557fb3365e38e1b79d6918e2fae2788728b70c9ab6b51a3b94a4338"),
- MustHexID("f637e07ff50cc1e3731735841c4798411059f2023abcf3885674f3e8032531b0edca50fd715df6feb489b6177c345374d64f4b07d257a7745de393a107b013a5"),
- MustHexID("e24ec7c6eec094f63c7b3239f56d311ec5a3e45bc4e622a1095a65b95eea6fe13e29f3b6b7a2cbfe40906e3989f17ac834c3102dd0cadaaa26e16ee06d782b72"),
- MustHexID("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"),
- MustHexID("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"),
+ hexEncPubkey("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"),
+ hexEncPubkey("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"),
+ hexEncPubkey("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"),
+ hexEncPubkey("3ed426322dee7572b08592e1e079f8b6c6b30e10e6243edd144a6a48fdbdb83df73a6e41b1143722cb82604f2203a32758610b5d9544f44a1a7921ba001528c1"),
+ hexEncPubkey("b2e2a2b7fdd363572a3256e75435fab1da3b16f7891a8bd2015f30995dae665d7eabfd194d87d99d5df628b4bbc7b04e5b492c596422dd8272746c7a1b0b8e4f"),
+ hexEncPubkey("0c69c9756162c593e85615b814ce57a2a8ca2df6c690b9c4e4602731b61e1531a3bbe3f7114271554427ffabea80ad8f36fa95a49fa77b675ae182c6ccac1728"),
+ hexEncPubkey("8d28be21d5a97b0876442fa4f5e5387f5bf3faad0b6f13b8607b64d6e448c0991ca28dd7fe2f64eb8eadd7150bff5d5666aa6ed868b84c71311f4ba9a38569dd"),
+ hexEncPubkey("2c677e1c64b9c9df6359348a7f5f33dc79e22f0177042486d125f8b6ca7f0dc756b1f672aceee5f1746bcff80aaf6f92a8dc0c9fbeb259b3fa0da060de5ab7e8"),
+ hexEncPubkey("3994880f94a8678f0cd247a43f474a8af375d2a072128da1ad6cae84a244105ff85e94fc7d8496f639468de7ee998908a91c7e33ef7585fff92e984b210941a1"),
+ hexEncPubkey("b45a9153c08d002a48090d15d61a7c7dad8c2af85d4ff5bd36ce23a9a11e0709bf8d56614c7b193bc028c16cbf7f20dfbcc751328b64a924995d47b41e452422"),
+ hexEncPubkey("057ab3a9e53c7a84b0f3fc586117a525cdd18e313f52a67bf31798d48078e325abe5cfee3f6c2533230cb37d0549289d692a29dd400e899b8552d4b928f6f907"),
+ hexEncPubkey("0ddf663d308791eb92e6bd88a2f8cb45e4f4f35bb16708a0e6ff7f1362aa6a73fedd0a1b1557fb3365e38e1b79d6918e2fae2788728b70c9ab6b51a3b94a4338"),
+ hexEncPubkey("f637e07ff50cc1e3731735841c4798411059f2023abcf3885674f3e8032531b0edca50fd715df6feb489b6177c345374d64f4b07d257a7745de393a107b013a5"),
+ hexEncPubkey("e24ec7c6eec094f63c7b3239f56d311ec5a3e45bc4e622a1095a65b95eea6fe13e29f3b6b7a2cbfe40906e3989f17ac834c3102dd0cadaaa26e16ee06d782b72"),
+ hexEncPubkey("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"),
+ hexEncPubkey("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"),
},
249: {
- MustHexID("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"),
- MustHexID("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"),
- MustHexID("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"),
- MustHexID("b1b4bfbda514d9b8f35b1c28961da5d5216fe50548f4066f69af3b7666a3b2e06eac646735e963e5c8f8138a2fb95af15b13b23ff00c6986eccc0efaa8ee6fb4"),
- MustHexID("d2139281b289ad0e4d7b4243c4364f5c51aac8b60f4806135de06b12b5b369c9e43a6eb494eab860d115c15c6fbb8c5a1b0e382972e0e460af395b8385363de7"),
- MustHexID("4a693df4b8fc5bdc7cec342c3ed2e228d7c5b4ab7321ddaa6cccbeb45b05a9f1d95766b4002e6d4791c2deacb8a667aadea6a700da28a3eea810a30395701bbc"),
- MustHexID("ab41611195ec3c62bb8cd762ee19fb182d194fd141f4a66780efbef4b07ce916246c022b841237a3a6b512a93431157edd221e854ed2a259b72e9c5351f44d0c"),
- MustHexID("68e8e26099030d10c3c703ae7045c0a48061fb88058d853b3e67880014c449d4311014da99d617d3150a20f1a3da5e34bf0f14f1c51fe4dd9d58afd222823176"),
- MustHexID("3fbcacf546fb129cd70fc48de3b593ba99d3c473798bc309292aca280320e0eacc04442c914cad5c4cf6950345ba79b0d51302df88285d4e83ee3fe41339eee7"),
- MustHexID("1d4a623659f7c8f80b6c3939596afdf42e78f892f682c768ad36eb7bfba402dbf97aea3a268f3badd8fe7636be216edf3d67ee1e08789ebbc7be625056bd7109"),
- MustHexID("a283c474ab09da02bbc96b16317241d0627646fcc427d1fe790b76a7bf1989ced90f92101a973047ae9940c92720dffbac8eff21df8cae468a50f72f9e159417"),
- MustHexID("dbf7e5ad7f87c3dfecae65d87c3039e14ed0bdc56caf00ce81931073e2e16719d746295512ff7937a15c3b03603e7c41a4f9df94fcd37bb200dd8f332767e9cb"),
- MustHexID("caaa070a26692f64fc77f30d7b5ae980d419b4393a0f442b1c821ef58c0862898b0d22f74a4f8c5d83069493e3ec0b92f17dc1fe6e4cd437c1ec25039e7ce839"),
- MustHexID("874cc8d1213beb65c4e0e1de38ef5d8165235893ac74ab5ea937c885eaab25c8d79dad0456e9fd3e9450626cac7e107b004478fb59842f067857f39a47cee695"),
- MustHexID("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"),
- MustHexID("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"),
+ hexEncPubkey("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"),
+ hexEncPubkey("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"),
+ hexEncPubkey("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"),
+ hexEncPubkey("b1b4bfbda514d9b8f35b1c28961da5d5216fe50548f4066f69af3b7666a3b2e06eac646735e963e5c8f8138a2fb95af15b13b23ff00c6986eccc0efaa8ee6fb4"),
+ hexEncPubkey("d2139281b289ad0e4d7b4243c4364f5c51aac8b60f4806135de06b12b5b369c9e43a6eb494eab860d115c15c6fbb8c5a1b0e382972e0e460af395b8385363de7"),
+ hexEncPubkey("4a693df4b8fc5bdc7cec342c3ed2e228d7c5b4ab7321ddaa6cccbeb45b05a9f1d95766b4002e6d4791c2deacb8a667aadea6a700da28a3eea810a30395701bbc"),
+ hexEncPubkey("ab41611195ec3c62bb8cd762ee19fb182d194fd141f4a66780efbef4b07ce916246c022b841237a3a6b512a93431157edd221e854ed2a259b72e9c5351f44d0c"),
+ hexEncPubkey("68e8e26099030d10c3c703ae7045c0a48061fb88058d853b3e67880014c449d4311014da99d617d3150a20f1a3da5e34bf0f14f1c51fe4dd9d58afd222823176"),
+ hexEncPubkey("3fbcacf546fb129cd70fc48de3b593ba99d3c473798bc309292aca280320e0eacc04442c914cad5c4cf6950345ba79b0d51302df88285d4e83ee3fe41339eee7"),
+ hexEncPubkey("1d4a623659f7c8f80b6c3939596afdf42e78f892f682c768ad36eb7bfba402dbf97aea3a268f3badd8fe7636be216edf3d67ee1e08789ebbc7be625056bd7109"),
+ hexEncPubkey("a283c474ab09da02bbc96b16317241d0627646fcc427d1fe790b76a7bf1989ced90f92101a973047ae9940c92720dffbac8eff21df8cae468a50f72f9e159417"),
+ hexEncPubkey("dbf7e5ad7f87c3dfecae65d87c3039e14ed0bdc56caf00ce81931073e2e16719d746295512ff7937a15c3b03603e7c41a4f9df94fcd37bb200dd8f332767e9cb"),
+ hexEncPubkey("caaa070a26692f64fc77f30d7b5ae980d419b4393a0f442b1c821ef58c0862898b0d22f74a4f8c5d83069493e3ec0b92f17dc1fe6e4cd437c1ec25039e7ce839"),
+ hexEncPubkey("874cc8d1213beb65c4e0e1de38ef5d8165235893ac74ab5ea937c885eaab25c8d79dad0456e9fd3e9450626cac7e107b004478fb59842f067857f39a47cee695"),
+ hexEncPubkey("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"),
+ hexEncPubkey("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"),
},
250: {
- MustHexID("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"),
- MustHexID("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"),
- MustHexID("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"),
- MustHexID("1e1eac1c9add703eb252eb991594f8f5a173255d526a855fab24ae57dc277e055bc3c7a7ae0b45d437c4f47a72d97eb7b126f2ba344ba6c0e14b2c6f27d4b1e6"),
- MustHexID("ae28953f63d4bc4e706712a59319c111f5ff8f312584f65d7436b4cd3d14b217b958f8486bad666b4481fe879019fb1f767cf15b3e3e2711efc33b56d460448a"),
- MustHexID("934bb1edf9c7a318b82306aca67feb3d6b434421fa275d694f0b4927afd8b1d3935b727fd4ff6e3d012e0c82f1824385174e8c6450ade59c2a43281a4b3446b6"),
- MustHexID("9eef3f28f70ce19637519a0916555bf76d26de31312ac656cf9d3e379899ea44e4dd7ffcce923b4f3563f8a00489a34bd6936db0cbb4c959d32c49f017e07d05"),
- MustHexID("82200872e8f871c48f1fad13daec6478298099b591bb3dbc4ef6890aa28ebee5860d07d70be62f4c0af85085a90ae8179ee8f937cf37915c67ea73e704b03ee7"),
- MustHexID("6c75a5834a08476b7fc37ff3dc2011dc3ea3b36524bad7a6d319b18878fad813c0ba76d1f4555cacd3890c865438c21f0e0aed1f80e0a157e642124c69f43a11"),
- MustHexID("995b873742206cb02b736e73a88580c2aacb0bd4a3c97a647b647bcab3f5e03c0e0736520a8b3600da09edf4248991fb01091ec7ff3ec7cdc8a1beae011e7aae"),
- MustHexID("c773a056594b5cdef2e850d30891ff0e927c3b1b9c35cd8e8d53a1017001e237468e1ece3ae33d612ca3e6abb0a9169aa352e9dcda358e5af2ad982b577447db"),
- MustHexID("2b46a5f6923f475c6be99ec6d134437a6d11f6bb4b4ac6bcd94572fa1092639d1c08aeefcb51f0912f0a060f71d4f38ee4da70ecc16010b05dd4a674aab14c3a"),
- MustHexID("af6ab501366debbaa0d22e20e9688f32ef6b3b644440580fd78de4fe0e99e2a16eb5636bbae0d1c259df8ddda77b35b9a35cbc36137473e9c68fbc9d203ba842"),
- MustHexID("c9f6f2dd1a941926f03f770695bda289859e85fabaf94baaae20b93e5015dc014ba41150176a36a1884adb52f405194693e63b0c464a6891cc9cc1c80d450326"),
- MustHexID("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"),
- MustHexID("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"),
+ hexEncPubkey("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"),
+ hexEncPubkey("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"),
+ hexEncPubkey("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"),
+ hexEncPubkey("1e1eac1c9add703eb252eb991594f8f5a173255d526a855fab24ae57dc277e055bc3c7a7ae0b45d437c4f47a72d97eb7b126f2ba344ba6c0e14b2c6f27d4b1e6"),
+ hexEncPubkey("ae28953f63d4bc4e706712a59319c111f5ff8f312584f65d7436b4cd3d14b217b958f8486bad666b4481fe879019fb1f767cf15b3e3e2711efc33b56d460448a"),
+ hexEncPubkey("934bb1edf9c7a318b82306aca67feb3d6b434421fa275d694f0b4927afd8b1d3935b727fd4ff6e3d012e0c82f1824385174e8c6450ade59c2a43281a4b3446b6"),
+ hexEncPubkey("9eef3f28f70ce19637519a0916555bf76d26de31312ac656cf9d3e379899ea44e4dd7ffcce923b4f3563f8a00489a34bd6936db0cbb4c959d32c49f017e07d05"),
+ hexEncPubkey("82200872e8f871c48f1fad13daec6478298099b591bb3dbc4ef6890aa28ebee5860d07d70be62f4c0af85085a90ae8179ee8f937cf37915c67ea73e704b03ee7"),
+ hexEncPubkey("6c75a5834a08476b7fc37ff3dc2011dc3ea3b36524bad7a6d319b18878fad813c0ba76d1f4555cacd3890c865438c21f0e0aed1f80e0a157e642124c69f43a11"),
+ hexEncPubkey("995b873742206cb02b736e73a88580c2aacb0bd4a3c97a647b647bcab3f5e03c0e0736520a8b3600da09edf4248991fb01091ec7ff3ec7cdc8a1beae011e7aae"),
+ hexEncPubkey("c773a056594b5cdef2e850d30891ff0e927c3b1b9c35cd8e8d53a1017001e237468e1ece3ae33d612ca3e6abb0a9169aa352e9dcda358e5af2ad982b577447db"),
+ hexEncPubkey("2b46a5f6923f475c6be99ec6d134437a6d11f6bb4b4ac6bcd94572fa1092639d1c08aeefcb51f0912f0a060f71d4f38ee4da70ecc16010b05dd4a674aab14c3a"),
+ hexEncPubkey("af6ab501366debbaa0d22e20e9688f32ef6b3b644440580fd78de4fe0e99e2a16eb5636bbae0d1c259df8ddda77b35b9a35cbc36137473e9c68fbc9d203ba842"),
+ hexEncPubkey("c9f6f2dd1a941926f03f770695bda289859e85fabaf94baaae20b93e5015dc014ba41150176a36a1884adb52f405194693e63b0c464a6891cc9cc1c80d450326"),
+ hexEncPubkey("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"),
+ hexEncPubkey("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"),
},
251: {
- MustHexID("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"),
- MustHexID("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"),
- MustHexID("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"),
- MustHexID("42c7483781727051a0b3660f14faf39e0d33de5e643702ae933837d036508ab856ce7eec8ec89c4929a4901256e5233a3d847d5d4893f91bcf21835a9a880fee"),
- MustHexID("873bae27bf1dc854408fba94046a53ab0c965cebe1e4e12290806fc62b88deb1f4a47f9e18f78fc0e7913a0c6e42ac4d0fc3a20cea6bc65f0c8a0ca90b67521e"),
- MustHexID("a7e3a370bbd761d413f8d209e85886f68bf73d5c3089b2dc6fa42aab1ecb5162635497eed95dee2417f3c9c74a3e76319625c48ead2e963c7de877cd4551f347"),
- MustHexID("528597534776a40df2addaaea15b6ff832ce36b9748a265768368f657e76d58569d9f30dbb91e91cf0ae7efe8f402f17aa0ae15f5c55051ba03ba830287f4c42"),
- MustHexID("461d8bd4f13c3c09031fdb84f104ed737a52f630261463ce0bdb5704259bab4b737dda688285b8444dbecaecad7f50f835190b38684ced5e90c54219e5adf1bc"),
- MustHexID("6ec50c0be3fd232737090fc0111caaf0bb6b18f72be453428087a11a97fd6b52db0344acbf789a689bd4f5f50f79017ea784f8fd6fe723ad6ae675b9e3b13e21"),
- MustHexID("12fc5e2f77a83fdcc727b79d8ae7fe6a516881138d3011847ee136b400fed7cfba1f53fd7a9730253c7aa4f39abeacd04f138417ba7fcb0f36cccc3514e0dab6"),
- MustHexID("4fdbe75914ccd0bce02101606a1ccf3657ec963e3b3c20239d5fec87673fe446d649b4f15f1fe1a40e6cfbd446dda2d31d40bb602b1093b8fcd5f139ba0eb46a"),
- MustHexID("3753668a0f6281e425ea69b52cb2d17ab97afbe6eb84cf5d25425bc5e53009388857640668fadd7c110721e6047c9697803bd8a6487b43bb343bfa32ebf24039"),
- MustHexID("2e81b16346637dec4410fd88e527346145b9c0a849dbf2628049ac7dae016c8f4305649d5659ec77f1e8a0fac0db457b6080547226f06283598e3740ad94849a"),
- MustHexID("802c3cc27f91c89213223d758f8d2ecd41135b357b6d698f24d811cdf113033a81c38e0bdff574a5c005b00a8c193dc2531f8c1fa05fa60acf0ab6f2858af09f"),
- MustHexID("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"),
- MustHexID("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"),
+ hexEncPubkey("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"),
+ hexEncPubkey("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"),
+ hexEncPubkey("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"),
+ hexEncPubkey("42c7483781727051a0b3660f14faf39e0d33de5e643702ae933837d036508ab856ce7eec8ec89c4929a4901256e5233a3d847d5d4893f91bcf21835a9a880fee"),
+ hexEncPubkey("873bae27bf1dc854408fba94046a53ab0c965cebe1e4e12290806fc62b88deb1f4a47f9e18f78fc0e7913a0c6e42ac4d0fc3a20cea6bc65f0c8a0ca90b67521e"),
+ hexEncPubkey("a7e3a370bbd761d413f8d209e85886f68bf73d5c3089b2dc6fa42aab1ecb5162635497eed95dee2417f3c9c74a3e76319625c48ead2e963c7de877cd4551f347"),
+ hexEncPubkey("528597534776a40df2addaaea15b6ff832ce36b9748a265768368f657e76d58569d9f30dbb91e91cf0ae7efe8f402f17aa0ae15f5c55051ba03ba830287f4c42"),
+ hexEncPubkey("461d8bd4f13c3c09031fdb84f104ed737a52f630261463ce0bdb5704259bab4b737dda688285b8444dbecaecad7f50f835190b38684ced5e90c54219e5adf1bc"),
+ hexEncPubkey("6ec50c0be3fd232737090fc0111caaf0bb6b18f72be453428087a11a97fd6b52db0344acbf789a689bd4f5f50f79017ea784f8fd6fe723ad6ae675b9e3b13e21"),
+ hexEncPubkey("12fc5e2f77a83fdcc727b79d8ae7fe6a516881138d3011847ee136b400fed7cfba1f53fd7a9730253c7aa4f39abeacd04f138417ba7fcb0f36cccc3514e0dab6"),
+ hexEncPubkey("4fdbe75914ccd0bce02101606a1ccf3657ec963e3b3c20239d5fec87673fe446d649b4f15f1fe1a40e6cfbd446dda2d31d40bb602b1093b8fcd5f139ba0eb46a"),
+ hexEncPubkey("3753668a0f6281e425ea69b52cb2d17ab97afbe6eb84cf5d25425bc5e53009388857640668fadd7c110721e6047c9697803bd8a6487b43bb343bfa32ebf24039"),
+ hexEncPubkey("2e81b16346637dec4410fd88e527346145b9c0a849dbf2628049ac7dae016c8f4305649d5659ec77f1e8a0fac0db457b6080547226f06283598e3740ad94849a"),
+ hexEncPubkey("802c3cc27f91c89213223d758f8d2ecd41135b357b6d698f24d811cdf113033a81c38e0bdff574a5c005b00a8c193dc2531f8c1fa05fa60acf0ab6f2858af09f"),
+ hexEncPubkey("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"),
+ hexEncPubkey("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"),
},
252: {
- MustHexID("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"),
- MustHexID("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"),
- MustHexID("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"),
- MustHexID("0ddc736077da9a12ba410dc5ea63cbcbe7659dd08596485b2bff3435221f82c10d263efd9af938e128464be64a178b7cd22e19f400d5802f4c9df54bf89f2619"),
- MustHexID("784aa34d833c6ce63fcc1279630113c3272e82c4ae8c126c5a52a88ac461b6baeed4244e607b05dc14e5b2f41c70a273c3804dea237f14f7a1e546f6d1309d14"),
- MustHexID("f253a2c354ee0e27cfcae786d726753d4ad24be6516b279a936195a487de4a59dbc296accf20463749ff55293263ed8c1b6365eecb248d44e75e9741c0d18205"),
- MustHexID("a1910b80357b3ad9b4593e0628922939614dc9056a5fbf477279c8b2c1d0b4b31d89a0c09d0d41f795271d14d3360ef08a3f821e65e7e1f56c07a36afe49c7c5"),
- MustHexID("f1168552c2efe541160f0909b0b4a9d6aeedcf595cdf0e9b165c97e3e197471a1ee6320e93389edfba28af6eaf10de98597ad56e7ab1b504ed762451996c3b98"),
- MustHexID("b0c8e5d2c8634a7930e1a6fd082e448c6cf9d2d8b7293558b59238815a4df926c286bf297d2049f14e8296a6eb3256af614ec1812c4f2bbe807673b58bf14c8c"),
- MustHexID("0fb346076396a38badc342df3679b55bd7f40a609ab103411fe45082c01f12ea016729e95914b2b5540e987ff5c9b133e85862648e7f36abdfd23100d248d234"),
- MustHexID("f736e0cc83417feaa280d9483f5d4d72d1b036cd0c6d9cbdeb8ac35ceb2604780de46dddaa32a378474e1d5ccdf79b373331c30c7911ade2ae32f98832e5de1f"),
- MustHexID("8b02991457602f42b38b342d3f2259ae4100c354b3843885f7e4e07bd644f64dab94bb7f38a3915f8b7f11d8e3f81c28e07a0078cf79d7397e38a7b7e0c857e2"),
- MustHexID("9221d9f04a8a184993d12baa91116692bb685f887671302999d69300ad103eb2d2c75a09d8979404c6dd28f12362f58a1a43619c493d9108fd47588a23ce5824"),
- MustHexID("652797801744dada833fff207d67484742eea6835d695925f3e618d71b68ec3c65bdd85b4302b2cdcb835ad3f94fd00d8da07e570b41bc0d2bcf69a8de1b3284"),
- MustHexID("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"),
- MustHexID("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"),
+ hexEncPubkey("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"),
+ hexEncPubkey("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"),
+ hexEncPubkey("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"),
+ hexEncPubkey("0ddc736077da9a12ba410dc5ea63cbcbe7659dd08596485b2bff3435221f82c10d263efd9af938e128464be64a178b7cd22e19f400d5802f4c9df54bf89f2619"),
+ hexEncPubkey("784aa34d833c6ce63fcc1279630113c3272e82c4ae8c126c5a52a88ac461b6baeed4244e607b05dc14e5b2f41c70a273c3804dea237f14f7a1e546f6d1309d14"),
+ hexEncPubkey("f253a2c354ee0e27cfcae786d726753d4ad24be6516b279a936195a487de4a59dbc296accf20463749ff55293263ed8c1b6365eecb248d44e75e9741c0d18205"),
+ hexEncPubkey("a1910b80357b3ad9b4593e0628922939614dc9056a5fbf477279c8b2c1d0b4b31d89a0c09d0d41f795271d14d3360ef08a3f821e65e7e1f56c07a36afe49c7c5"),
+ hexEncPubkey("f1168552c2efe541160f0909b0b4a9d6aeedcf595cdf0e9b165c97e3e197471a1ee6320e93389edfba28af6eaf10de98597ad56e7ab1b504ed762451996c3b98"),
+ hexEncPubkey("b0c8e5d2c8634a7930e1a6fd082e448c6cf9d2d8b7293558b59238815a4df926c286bf297d2049f14e8296a6eb3256af614ec1812c4f2bbe807673b58bf14c8c"),
+ hexEncPubkey("0fb346076396a38badc342df3679b55bd7f40a609ab103411fe45082c01f12ea016729e95914b2b5540e987ff5c9b133e85862648e7f36abdfd23100d248d234"),
+ hexEncPubkey("f736e0cc83417feaa280d9483f5d4d72d1b036cd0c6d9cbdeb8ac35ceb2604780de46dddaa32a378474e1d5ccdf79b373331c30c7911ade2ae32f98832e5de1f"),
+ hexEncPubkey("8b02991457602f42b38b342d3f2259ae4100c354b3843885f7e4e07bd644f64dab94bb7f38a3915f8b7f11d8e3f81c28e07a0078cf79d7397e38a7b7e0c857e2"),
+ hexEncPubkey("9221d9f04a8a184993d12baa91116692bb685f887671302999d69300ad103eb2d2c75a09d8979404c6dd28f12362f58a1a43619c493d9108fd47588a23ce5824"),
+ hexEncPubkey("652797801744dada833fff207d67484742eea6835d695925f3e618d71b68ec3c65bdd85b4302b2cdcb835ad3f94fd00d8da07e570b41bc0d2bcf69a8de1b3284"),
+ hexEncPubkey("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"),
+ hexEncPubkey("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"),
},
253: {
- MustHexID("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"),
- MustHexID("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"),
- MustHexID("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"),
- MustHexID("d97bf55f88c83fae36232661af115d66ca600fc4bd6d1fb35ff9bb4dad674c02cf8c8d05f317525b5522250db58bb1ecafb7157392bf5aa61b178c61f098d995"),
- MustHexID("7045d678f1f9eb7a4613764d17bd5698796494d0bf977b16f2dbc272b8a0f7858a60805c022fc3d1fe4f31c37e63cdaca0416c0d053ef48a815f8b19121605e0"),
- MustHexID("14e1f21418d445748de2a95cd9a8c3b15b506f86a0acabd8af44bb968ce39885b19c8822af61b3dd58a34d1f265baec30e3ae56149dc7d2aa4a538f7319f69c8"),
- MustHexID("b9453d78281b66a4eac95a1546017111eaaa5f92a65d0de10b1122940e92b319728a24edf4dec6acc412321b1c95266d39c7b3a5d265c629c3e49a65fb022c09"),
- MustHexID("e8a49248419e3824a00d86af422f22f7366e2d4922b304b7169937616a01d9d6fa5abf5cc01061a352dc866f48e1fa2240dbb453d872b1d7be62bdfc1d5e248c"),
- MustHexID("bebcff24b52362f30e0589ee573ce2d86f073d58d18e6852a592fa86ceb1a6c9b96d7fb9ec7ed1ed98a51b6743039e780279f6bb49d0a04327ac7a182d9a56f6"),
- MustHexID("d0835e5a4291db249b8d2fca9f503049988180c7d247bedaa2cf3a1bad0a76709360a85d4f9a1423b2cbc82bb4d94b47c0cde20afc430224834c49fe312a9ae3"),
- MustHexID("6b087fe2a2da5e4f0b0f4777598a4a7fb66bf77dbd5bfc44e8a7eaa432ab585a6e226891f56a7d4f5ed11a7c57b90f1661bba1059590ca4267a35801c2802913"),
- MustHexID("d901e5bde52d1a0f4ddf010a686a53974cdae4ebe5c6551b3c37d6b6d635d38d5b0e5f80bc0186a2c7809dbf3a42870dd09643e68d32db896c6da8ba734579e7"),
- MustHexID("96419fb80efae4b674402bb969ebaab86c1274f29a83a311e24516d36cdf148fe21754d46c97688cdd7468f24c08b13e4727c29263393638a3b37b99ff60ebca"),
- MustHexID("7b9c1889ae916a5d5abcdfb0aaedcc9c6f9eb1c1a4f68d0c2d034fe79ac610ce917c3abc670744150fa891bfcd8ab14fed6983fca964de920aa393fa7b326748"),
- MustHexID("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"),
- MustHexID("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"),
+ hexEncPubkey("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"),
+ hexEncPubkey("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"),
+ hexEncPubkey("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"),
+ hexEncPubkey("d97bf55f88c83fae36232661af115d66ca600fc4bd6d1fb35ff9bb4dad674c02cf8c8d05f317525b5522250db58bb1ecafb7157392bf5aa61b178c61f098d995"),
+ hexEncPubkey("7045d678f1f9eb7a4613764d17bd5698796494d0bf977b16f2dbc272b8a0f7858a60805c022fc3d1fe4f31c37e63cdaca0416c0d053ef48a815f8b19121605e0"),
+ hexEncPubkey("14e1f21418d445748de2a95cd9a8c3b15b506f86a0acabd8af44bb968ce39885b19c8822af61b3dd58a34d1f265baec30e3ae56149dc7d2aa4a538f7319f69c8"),
+ hexEncPubkey("b9453d78281b66a4eac95a1546017111eaaa5f92a65d0de10b1122940e92b319728a24edf4dec6acc412321b1c95266d39c7b3a5d265c629c3e49a65fb022c09"),
+ hexEncPubkey("e8a49248419e3824a00d86af422f22f7366e2d4922b304b7169937616a01d9d6fa5abf5cc01061a352dc866f48e1fa2240dbb453d872b1d7be62bdfc1d5e248c"),
+ hexEncPubkey("bebcff24b52362f30e0589ee573ce2d86f073d58d18e6852a592fa86ceb1a6c9b96d7fb9ec7ed1ed98a51b6743039e780279f6bb49d0a04327ac7a182d9a56f6"),
+ hexEncPubkey("d0835e5a4291db249b8d2fca9f503049988180c7d247bedaa2cf3a1bad0a76709360a85d4f9a1423b2cbc82bb4d94b47c0cde20afc430224834c49fe312a9ae3"),
+ hexEncPubkey("6b087fe2a2da5e4f0b0f4777598a4a7fb66bf77dbd5bfc44e8a7eaa432ab585a6e226891f56a7d4f5ed11a7c57b90f1661bba1059590ca4267a35801c2802913"),
+ hexEncPubkey("d901e5bde52d1a0f4ddf010a686a53974cdae4ebe5c6551b3c37d6b6d635d38d5b0e5f80bc0186a2c7809dbf3a42870dd09643e68d32db896c6da8ba734579e7"),
+ hexEncPubkey("96419fb80efae4b674402bb969ebaab86c1274f29a83a311e24516d36cdf148fe21754d46c97688cdd7468f24c08b13e4727c29263393638a3b37b99ff60ebca"),
+ hexEncPubkey("7b9c1889ae916a5d5abcdfb0aaedcc9c6f9eb1c1a4f68d0c2d034fe79ac610ce917c3abc670744150fa891bfcd8ab14fed6983fca964de920aa393fa7b326748"),
+ hexEncPubkey("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"),
+ hexEncPubkey("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"),
},
254: {
- MustHexID("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"),
- MustHexID("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"),
- MustHexID("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"),
- MustHexID("27f4a16cc085e72d86e25c98bd2eca173eaaee7565c78ec5a52e9e12b2211f35de81b5b45e9195de2ebfe29106742c59112b951a04eb7ae48822911fc1f9389e"),
- MustHexID("55db5ee7d98e7f0b1c3b9d5be6f2bc619a1b86c3cdd513160ad4dcf267037a5fffad527ac15d50aeb32c59c13d1d4c1e567ebbf4de0d25236130c8361f9aac63"),
- MustHexID("883df308b0130fc928a8559fe50667a0fff80493bc09685d18213b2db241a3ad11310ed86b0ef662b3ce21fc3d9aa7f3fc24b8d9afe17c7407e9afd3345ae548"),
- MustHexID("c7af968cc9bc8200c3ee1a387405f7563be1dce6710a3439f42ea40657d0eae9d2b3c16c42d779605351fcdece4da637b9804e60ca08cfb89aec32c197beffa6"),
- MustHexID("3e66f2b788e3ff1d04106b80597915cd7afa06c405a7ae026556b6e583dca8e05cfbab5039bb9a1b5d06083ffe8de5780b1775550e7218f5e98624bf7af9a0a8"),
- MustHexID("4fc7f53764de3337fdaec0a711d35d3a923e72fa65025444d12230b3552ed43d9b2d1ad08ccb11f2d50c58809e6dd74dde910e195294fca3b47ae5a3967cc479"),
- MustHexID("bafdfdcf6ccaa989436752fa97c77477b6baa7deb374b16c095492c529eb133e8e2f99e1977012b64767b9d34b2cf6d2048ed489bd822b5139b523f6a423167b"),
- MustHexID("7f5d78008a4312fe059104ce80202c82b8915c2eb4411c6b812b16f7642e57c00f2c9425121f5cbac4257fe0b3e81ef5dea97ea2dbaa98f6a8b6fd4d1e5980bb"),
- MustHexID("598c37fe78f922751a052f463aeb0cb0bc7f52b7c2a4cf2da72ec0931c7c32175d4165d0f8998f7320e87324ac3311c03f9382a5385c55f0407b7a66b2acd864"),
- MustHexID("f758c4136e1c148777a7f3275a76e2db0b2b04066fd738554ec398c1c6cc9fb47e14a3b4c87bd47deaeab3ffd2110514c3855685a374794daff87b605b27ee2e"),
- MustHexID("0307bb9e4fd865a49dcf1fe4333d1b944547db650ab580af0b33e53c4fef6c789531110fac801bbcbce21fc4d6f61b6d5b24abdf5b22e3030646d579f6dca9c2"),
- MustHexID("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"),
- MustHexID("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"),
+ hexEncPubkey("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"),
+ hexEncPubkey("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"),
+ hexEncPubkey("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"),
+ hexEncPubkey("27f4a16cc085e72d86e25c98bd2eca173eaaee7565c78ec5a52e9e12b2211f35de81b5b45e9195de2ebfe29106742c59112b951a04eb7ae48822911fc1f9389e"),
+ hexEncPubkey("55db5ee7d98e7f0b1c3b9d5be6f2bc619a1b86c3cdd513160ad4dcf267037a5fffad527ac15d50aeb32c59c13d1d4c1e567ebbf4de0d25236130c8361f9aac63"),
+ hexEncPubkey("883df308b0130fc928a8559fe50667a0fff80493bc09685d18213b2db241a3ad11310ed86b0ef662b3ce21fc3d9aa7f3fc24b8d9afe17c7407e9afd3345ae548"),
+ hexEncPubkey("c7af968cc9bc8200c3ee1a387405f7563be1dce6710a3439f42ea40657d0eae9d2b3c16c42d779605351fcdece4da637b9804e60ca08cfb89aec32c197beffa6"),
+ hexEncPubkey("3e66f2b788e3ff1d04106b80597915cd7afa06c405a7ae026556b6e583dca8e05cfbab5039bb9a1b5d06083ffe8de5780b1775550e7218f5e98624bf7af9a0a8"),
+ hexEncPubkey("4fc7f53764de3337fdaec0a711d35d3a923e72fa65025444d12230b3552ed43d9b2d1ad08ccb11f2d50c58809e6dd74dde910e195294fca3b47ae5a3967cc479"),
+ hexEncPubkey("bafdfdcf6ccaa989436752fa97c77477b6baa7deb374b16c095492c529eb133e8e2f99e1977012b64767b9d34b2cf6d2048ed489bd822b5139b523f6a423167b"),
+ hexEncPubkey("7f5d78008a4312fe059104ce80202c82b8915c2eb4411c6b812b16f7642e57c00f2c9425121f5cbac4257fe0b3e81ef5dea97ea2dbaa98f6a8b6fd4d1e5980bb"),
+ hexEncPubkey("598c37fe78f922751a052f463aeb0cb0bc7f52b7c2a4cf2da72ec0931c7c32175d4165d0f8998f7320e87324ac3311c03f9382a5385c55f0407b7a66b2acd864"),
+ hexEncPubkey("f758c4136e1c148777a7f3275a76e2db0b2b04066fd738554ec398c1c6cc9fb47e14a3b4c87bd47deaeab3ffd2110514c3855685a374794daff87b605b27ee2e"),
+ hexEncPubkey("0307bb9e4fd865a49dcf1fe4333d1b944547db650ab580af0b33e53c4fef6c789531110fac801bbcbce21fc4d6f61b6d5b24abdf5b22e3030646d579f6dca9c2"),
+ hexEncPubkey("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"),
+ hexEncPubkey("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"),
},
255: {
- MustHexID("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"),
- MustHexID("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"),
- MustHexID("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"),
- MustHexID("c08397d5751b47bd3da044b908be0fb0e510d3149574dff7aeab33749b023bb171b5769990fe17469dbebc100bc150e798aeda426a2dcc766699a225fddd75c6"),
- MustHexID("0222c1c194b749736e593f937fad67ee348ac57287a15c7e42877aa38a9b87732a408bca370f812efd0eedbff13e6d5b854bf3ba1dec431a796ed47f32552b09"),
- MustHexID("03d859cd46ef02d9bfad5268461a6955426845eef4126de6be0fa4e8d7e0727ba2385b78f1a883a8239e95ebb814f2af8379632c7d5b100688eebc5841209582"),
- MustHexID("64d5004b7e043c39ff0bd10cb20094c287721d5251715884c280a612b494b3e9e1c64ba6f67614994c7d969a0d0c0295d107d53fc225d47c44c4b82852d6f960"),
- MustHexID("b0a5eefb2dab6f786670f35bf9641eefe6dd87fd3f1362bcab4aaa792903500ab23d88fae68411372e0813b057535a601d46e454323745a948017f6063a47b1f"),
- MustHexID("0cc6df0a3433d448b5684d2a3ffa9d1a825388177a18f44ad0008c7bd7702f1ec0fc38b83506f7de689c3b6ecb552599927e29699eed6bb867ff08f80068b287"),
- MustHexID("50772f7b8c03a4e153355fbbf79c8a80cf32af656ff0c7873c99911099d04a0dae0674706c357e0145ad017a0ade65e6052cb1b0d574fcd6f67da3eee0ace66b"),
- MustHexID("1ae37829c9ef41f8b508b82259ebac76b1ed900d7a45c08b7970f25d2d48ddd1829e2f11423a18749940b6dab8598c6e416cef0efd47e46e51f29a0bc65b37cd"),
- MustHexID("ba973cab31c2af091fc1644a93527d62b2394999e2b6ccbf158dd5ab9796a43d408786f1803ef4e29debfeb62fce2b6caa5ab2b24d1549c822a11c40c2856665"),
- MustHexID("bc413ad270dd6ea25bddba78f3298b03b8ba6f8608ac03d06007d4116fa78ef5a0cfe8c80155089382fc7a193243ee5500082660cb5d7793f60f2d7d18650964"),
- MustHexID("5a6a9ef07634d9eec3baa87c997b529b92652afa11473dfee41ef7037d5c06e0ddb9fe842364462d79dd31cff8a59a1b8d5bc2b810dea1d4cbbd3beb80ecec83"),
- MustHexID("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"),
- MustHexID("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"),
+ hexEncPubkey("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"),
+ hexEncPubkey("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"),
+ hexEncPubkey("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"),
+ hexEncPubkey("c08397d5751b47bd3da044b908be0fb0e510d3149574dff7aeab33749b023bb171b5769990fe17469dbebc100bc150e798aeda426a2dcc766699a225fddd75c6"),
+ hexEncPubkey("0222c1c194b749736e593f937fad67ee348ac57287a15c7e42877aa38a9b87732a408bca370f812efd0eedbff13e6d5b854bf3ba1dec431a796ed47f32552b09"),
+ hexEncPubkey("03d859cd46ef02d9bfad5268461a6955426845eef4126de6be0fa4e8d7e0727ba2385b78f1a883a8239e95ebb814f2af8379632c7d5b100688eebc5841209582"),
+ hexEncPubkey("64d5004b7e043c39ff0bd10cb20094c287721d5251715884c280a612b494b3e9e1c64ba6f67614994c7d969a0d0c0295d107d53fc225d47c44c4b82852d6f960"),
+ hexEncPubkey("b0a5eefb2dab6f786670f35bf9641eefe6dd87fd3f1362bcab4aaa792903500ab23d88fae68411372e0813b057535a601d46e454323745a948017f6063a47b1f"),
+ hexEncPubkey("0cc6df0a3433d448b5684d2a3ffa9d1a825388177a18f44ad0008c7bd7702f1ec0fc38b83506f7de689c3b6ecb552599927e29699eed6bb867ff08f80068b287"),
+ hexEncPubkey("50772f7b8c03a4e153355fbbf79c8a80cf32af656ff0c7873c99911099d04a0dae0674706c357e0145ad017a0ade65e6052cb1b0d574fcd6f67da3eee0ace66b"),
+ hexEncPubkey("1ae37829c9ef41f8b508b82259ebac76b1ed900d7a45c08b7970f25d2d48ddd1829e2f11423a18749940b6dab8598c6e416cef0efd47e46e51f29a0bc65b37cd"),
+ hexEncPubkey("ba973cab31c2af091fc1644a93527d62b2394999e2b6ccbf158dd5ab9796a43d408786f1803ef4e29debfeb62fce2b6caa5ab2b24d1549c822a11c40c2856665"),
+ hexEncPubkey("bc413ad270dd6ea25bddba78f3298b03b8ba6f8608ac03d06007d4116fa78ef5a0cfe8c80155089382fc7a193243ee5500082660cb5d7793f60f2d7d18650964"),
+ hexEncPubkey("5a6a9ef07634d9eec3baa87c997b529b92652afa11473dfee41ef7037d5c06e0ddb9fe842364462d79dd31cff8a59a1b8d5bc2b810dea1d4cbbd3beb80ecec83"),
+ hexEncPubkey("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"),
+ hexEncPubkey("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"),
},
256: {
- MustHexID("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"),
- MustHexID("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"),
- MustHexID("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"),
- MustHexID("7feaee0d818c03eb30e4e0bf03ade0f3c21ca38e938a761aa1781cf70bda8cc5cd631a6cc53dd44f1d4a6d3e2dae6513c6c66ee50cb2f0e9ad6f7e319b309fd9"),
- MustHexID("4ca3b657b139311db8d583c25dd5963005e46689e1317620496cc64129c7f3e52870820e0ec7941d28809311df6db8a2867bbd4f235b4248af24d7a9c22d1232"),
- MustHexID("1181defb1d16851d42dd951d84424d6bd1479137f587fa184d5a8152be6b6b16ed08bcdb2c2ed8539bcde98c80c432875f9f724737c316a2bd385a39d3cab1d8"),
- MustHexID("d9dd818769fa0c3ec9f553c759b92476f082817252a04a47dc1777740b1731d280058c66f982812f173a294acf4944a85ba08346e2de153ba3ba41ce8a62cb64"),
- MustHexID("bd7c4f8a9e770aa915c771b15e107ca123d838762da0d3ffc53aa6b53e9cd076cffc534ec4d2e4c334c683f1f5ea72e0e123f6c261915ed5b58ac1b59f003d88"),
- MustHexID("3dd5739c73649d510456a70e9d6b46a855864a4a3f744e088fd8c8da11b18e4c9b5f2d7da50b1c147b2bae5ca9609ae01f7a3cdea9dce34f80a91d29cd82f918"),
- MustHexID("f0d7df1efc439b4bcc0b762118c1cfa99b2a6143a9f4b10e3c9465125f4c9fca4ab88a2504169bbcad65492cf2f50da9dd5d077c39574a944f94d8246529066b"),
- MustHexID("dd598b9ba441448e5fb1a6ec6c5f5aa9605bad6e223297c729b1705d11d05f6bfd3d41988b694681ae69bb03b9a08bff4beab5596503d12a39bffb5cd6e94c7c"),
- MustHexID("3fce284ac97e567aebae681b15b7a2b6df9d873945536335883e4bbc26460c064370537f323fd1ada828ea43154992d14ac0cec0940a2bd2a3f42ec156d60c83"),
- MustHexID("7c8dfa8c1311cb14fb29a8ac11bca23ecc115e56d9fcf7b7ac1db9066aa4eb39f8b1dabf46e192a65be95ebfb4e839b5ab4533fef414921825e996b210dd53bd"),
- MustHexID("cafa6934f82120456620573d7f801390ed5e16ed619613a37e409e44ab355ef755e83565a913b48a9466db786f8d4fbd590bfec474c2524d4a2608d4eafd6abd"),
- MustHexID("9d16600d0dd310d77045769fed2cb427f32db88cd57d86e49390c2ba8a9698cfa856f775be2013237226e7bf47b248871cf865d23015937d1edeb20db5e3e760"),
- MustHexID("17be6b6ba54199b1d80eff866d348ea11d8a4b341d63ad9a6681d3ef8a43853ac564d153eb2a8737f0afc9ab320f6f95c55aa11aaa13bbb1ff422fd16bdf8188"),
+ hexEncPubkey("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"),
+ hexEncPubkey("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"),
+ hexEncPubkey("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"),
+ hexEncPubkey("7feaee0d818c03eb30e4e0bf03ade0f3c21ca38e938a761aa1781cf70bda8cc5cd631a6cc53dd44f1d4a6d3e2dae6513c6c66ee50cb2f0e9ad6f7e319b309fd9"),
+ hexEncPubkey("4ca3b657b139311db8d583c25dd5963005e46689e1317620496cc64129c7f3e52870820e0ec7941d28809311df6db8a2867bbd4f235b4248af24d7a9c22d1232"),
+ hexEncPubkey("1181defb1d16851d42dd951d84424d6bd1479137f587fa184d5a8152be6b6b16ed08bcdb2c2ed8539bcde98c80c432875f9f724737c316a2bd385a39d3cab1d8"),
+ hexEncPubkey("d9dd818769fa0c3ec9f553c759b92476f082817252a04a47dc1777740b1731d280058c66f982812f173a294acf4944a85ba08346e2de153ba3ba41ce8a62cb64"),
+ hexEncPubkey("bd7c4f8a9e770aa915c771b15e107ca123d838762da0d3ffc53aa6b53e9cd076cffc534ec4d2e4c334c683f1f5ea72e0e123f6c261915ed5b58ac1b59f003d88"),
+ hexEncPubkey("3dd5739c73649d510456a70e9d6b46a855864a4a3f744e088fd8c8da11b18e4c9b5f2d7da50b1c147b2bae5ca9609ae01f7a3cdea9dce34f80a91d29cd82f918"),
+ hexEncPubkey("f0d7df1efc439b4bcc0b762118c1cfa99b2a6143a9f4b10e3c9465125f4c9fca4ab88a2504169bbcad65492cf2f50da9dd5d077c39574a944f94d8246529066b"),
+ hexEncPubkey("dd598b9ba441448e5fb1a6ec6c5f5aa9605bad6e223297c729b1705d11d05f6bfd3d41988b694681ae69bb03b9a08bff4beab5596503d12a39bffb5cd6e94c7c"),
+ hexEncPubkey("3fce284ac97e567aebae681b15b7a2b6df9d873945536335883e4bbc26460c064370537f323fd1ada828ea43154992d14ac0cec0940a2bd2a3f42ec156d60c83"),
+ hexEncPubkey("7c8dfa8c1311cb14fb29a8ac11bca23ecc115e56d9fcf7b7ac1db9066aa4eb39f8b1dabf46e192a65be95ebfb4e839b5ab4533fef414921825e996b210dd53bd"),
+ hexEncPubkey("cafa6934f82120456620573d7f801390ed5e16ed619613a37e409e44ab355ef755e83565a913b48a9466db786f8d4fbd590bfec474c2524d4a2608d4eafd6abd"),
+ hexEncPubkey("9d16600d0dd310d77045769fed2cb427f32db88cd57d86e49390c2ba8a9698cfa856f775be2013237226e7bf47b248871cf865d23015937d1edeb20db5e3e760"),
+ hexEncPubkey("17be6b6ba54199b1d80eff866d348ea11d8a4b341d63ad9a6681d3ef8a43853ac564d153eb2a8737f0afc9ab320f6f95c55aa11aaa13bbb1ff422fd16bdf8188"),
},
},
}
type preminedTestnet struct {
- target NodeID
- targetSha common.Hash // sha3(target)
- dists [hashBits + 1][]NodeID
+ target encPubkey
+ targetSha enode.ID // sha3(target)
+ dists [hashBits + 1][]encPubkey
+}
+
+func (tn *preminedTestnet) self() *enode.Node {
+ return nullNode
}
-func (tn *preminedTestnet) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
+func (tn *preminedTestnet) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) {
// current log distance is encoded in port number
// fmt.Println("findnode query at dist", toaddr.Port)
if toaddr.Port == 0 {
panic("query to node at distance 0")
}
- next := uint16(toaddr.Port) - 1
- var result []*Node
- for i, id := range tn.dists[toaddr.Port] {
- result = append(result, NewNode(id, net.ParseIP("127.0.0.1"), next, uint16(i)))
+ next := toaddr.Port - 1
+ var result []*node
+ for i, ekey := range tn.dists[toaddr.Port] {
+ key, _ := decodePubkey(ekey)
+ node := wrapNode(enode.NewV4(key, net.ParseIP("127.0.0.1"), i, next))
+ result = append(result, node)
}
return result, nil
}
-func (*preminedTestnet) close() {}
-func (*preminedTestnet) waitping(from NodeID) error { return nil }
-func (*preminedTestnet) ping(toid NodeID, toaddr *net.UDPAddr) error { return nil }
+func (*preminedTestnet) close() {}
+func (*preminedTestnet) waitping(from enode.ID) error { return nil }
+func (*preminedTestnet) ping(toid enode.ID, toaddr *net.UDPAddr) error { return nil }
// mine generates a testnet struct literal with nodes at
// various distances to the given target.
-func (n *preminedTestnet) mine(target NodeID) {
- n.target = target
- n.targetSha = crypto.Keccak256Hash(n.target[:])
+func (tn *preminedTestnet) mine(target encPubkey) {
+ tn.target = target
+ tn.targetSha = tn.target.id()
found := 0
for found < bucketSize*10 {
k := newkey()
- id := PubkeyID(&k.PublicKey)
- sha := crypto.Keccak256Hash(id[:])
- ld := logdist(n.targetSha, sha)
- if len(n.dists[ld]) < bucketSize {
- n.dists[ld] = append(n.dists[ld], id)
+ key := encodePubkey(&k.PublicKey)
+ ld := enode.LogDist(tn.targetSha, key.id())
+ if len(tn.dists[ld]) < bucketSize {
+ tn.dists[ld] = append(tn.dists[ld], key)
fmt.Println("found ID with ld", ld)
found++
}
}
fmt.Println("&preminedTestnet{")
- fmt.Printf(" target: %#v,\n", n.target)
- fmt.Printf(" targetSha: %#v,\n", n.targetSha)
- fmt.Printf(" dists: [%d][]NodeID{\n", len(n.dists))
- for ld, ns := range n.dists {
+ fmt.Printf(" target: %#v,\n", tn.target)
+ fmt.Printf(" targetSha: %#v,\n", tn.targetSha)
+ fmt.Printf(" dists: [%d][]encPubkey{\n", len(tn.dists))
+ for ld, ns := range tn.dists {
if len(ns) == 0 {
continue
}
- fmt.Printf(" %d: []NodeID{\n", ld)
+ fmt.Printf(" %d: []encPubkey{\n", ld)
for _, n := range ns {
- fmt.Printf(" MustHexID(\"%x\"),\n", n[:])
+ fmt.Printf(" hexEncPubkey(\"%x\"),\n", n[:])
}
fmt.Println(" },")
}
@@ -616,40 +569,6 @@ func (n *preminedTestnet) mine(target NodeID) {
fmt.Println("}")
}
-func hasDuplicates(slice []*Node) bool {
- seen := make(map[NodeID]bool)
- for i, e := range slice {
- if e == nil {
- panic(fmt.Sprintf("nil *Node at %d", i))
- }
- if seen[e.ID] {
- return true
- }
- seen[e.ID] = true
- }
- return false
-}
-
-func sortedByDistanceTo(distbase common.Hash, slice []*Node) bool {
- var last common.Hash
- for i, e := range slice {
- if i > 0 && distcmp(distbase, e.sha, last) < 0 {
- return false
- }
- last = e.sha
- }
- return true
-}
-
-func contains(ns []*Node, id NodeID) bool {
- for _, n := range ns {
- if n.ID == id {
- return true
- }
- }
- return false
-}
-
// gen wraps quick.Value so it's easier to use.
// it generates a random value of the given value's type.
func gen(typ interface{}, rand *rand.Rand) interface{} {
@@ -660,6 +579,13 @@ func gen(typ interface{}, rand *rand.Rand) interface{} {
return v.Interface()
}
+func quickcfg() *quick.Config {
+ return &quick.Config{
+ MaxCount: 5000,
+ Rand: rand.New(rand.NewSource(time.Now().Unix())),
+ }
+}
+
func newkey() *ecdsa.PrivateKey {
key, err := crypto.GenerateKey()
if err != nil {
diff --git a/p2p/discover/table_util_test.go b/p2p/discover/table_util_test.go
new file mode 100644
index 000000000000..cefca94b8dbd
--- /dev/null
+++ b/p2p/discover/table_util_test.go
@@ -0,0 +1,182 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package discover
+
+import (
+ "crypto/ecdsa"
+ "encoding/hex"
+ "fmt"
+ "math/rand"
+ "net"
+ "sync"
+
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
+)
+
+var nullNode *enode.Node
+
+func init() {
+ var r enr.Record
+ r.Set(enr.IP{0, 0, 0, 0})
+ nullNode = enode.SignNull(&r, enode.ID{})
+}
+
+func newTestTable(t transport) (*Table, *enode.DB) {
+ db, _ := enode.OpenDB("")
+ tab, _ := newTable(t, db, nil)
+ return tab, db
+}
+
+// nodeAtDistance creates a node for which enode.LogDist(base, n.id) == ld.
+func nodeAtDistance(base enode.ID, ld int, ip net.IP) *node {
+ var r enr.Record
+ r.Set(enr.IP(ip))
+ return wrapNode(enode.SignNull(&r, idAtDistance(base, ld)))
+}
+
+// idAtDistance returns a random hash such that enode.LogDist(a, b) == n
+func idAtDistance(a enode.ID, n int) (b enode.ID) {
+ if n == 0 {
+ return a
+ }
+ // flip bit at position n, fill the rest with random bits
+ b = a
+ pos := len(a) - n/8 - 1
+ bit := byte(0x01) << (byte(n%8) - 1)
+ if bit == 0 {
+ pos++
+ bit = 0x80
+ }
+ b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
+ for i := pos + 1; i < len(a); i++ {
+ b[i] = byte(rand.Intn(255))
+ }
+ return b
+}
+
+func intIP(i int) net.IP {
+ return net.IP{byte(i), 0, 2, byte(i)}
+}
+
+// fillBucket inserts nodes into the given bucket until it is full.
+func fillBucket(tab *Table, n *node) (last *node) {
+ ld := enode.LogDist(tab.self().ID(), n.ID())
+ b := tab.bucket(n.ID())
+ for len(b.entries) < bucketSize {
+ b.entries = append(b.entries, nodeAtDistance(tab.self().ID(), ld, intIP(ld)))
+ }
+ return b.entries[bucketSize-1]
+}
+
+type pingRecorder struct {
+ mu sync.Mutex
+ dead, pinged map[enode.ID]bool
+ n *enode.Node
+}
+
+func newPingRecorder() *pingRecorder {
+ var r enr.Record
+ r.Set(enr.IP{0, 0, 0, 0})
+ n := enode.SignNull(&r, enode.ID{})
+
+ return &pingRecorder{
+ dead: make(map[enode.ID]bool),
+ pinged: make(map[enode.ID]bool),
+ n: n,
+ }
+}
+
+func (t *pingRecorder) self() *enode.Node {
+ return nullNode
+}
+
+func (t *pingRecorder) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) {
+ return nil, nil
+}
+
+func (t *pingRecorder) waitping(from enode.ID) error {
+ return nil // remote always pings
+}
+
+func (t *pingRecorder) ping(toid enode.ID, toaddr *net.UDPAddr) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ t.pinged[toid] = true
+ if t.dead[toid] {
+ return errTimeout
+ } else {
+ return nil
+ }
+}
+
+func (t *pingRecorder) close() {}
+
+func hasDuplicates(slice []*node) bool {
+ seen := make(map[enode.ID]bool)
+ for i, e := range slice {
+ if e == nil {
+ panic(fmt.Sprintf("nil *Node at %d", i))
+ }
+ if seen[e.ID()] {
+ return true
+ }
+ seen[e.ID()] = true
+ }
+ return false
+}
+
+func contains(ns []*node, id enode.ID) bool {
+ for _, n := range ns {
+ if n.ID() == id {
+ return true
+ }
+ }
+ return false
+}
+
+func sortedByDistanceTo(distbase enode.ID, slice []*node) bool {
+ var last enode.ID
+ for i, e := range slice {
+ if i > 0 && enode.DistCmp(distbase, e.ID(), last) < 0 {
+ return false
+ }
+ last = e.ID()
+ }
+ return true
+}
+
+func hexEncPubkey(h string) (ret encPubkey) {
+ b, err := hex.DecodeString(h)
+ if err != nil {
+ panic(err)
+ }
+ if len(b) != len(ret) {
+ panic("invalid length")
+ }
+ copy(ret[:], b)
+ return ret
+}
+
+func hexPubkey(h string) *ecdsa.PublicKey {
+ k, err := decodePubkey(hexEncPubkey(h))
+ if err != nil {
+ panic(err)
+ }
+ return k
+}
diff --git a/p2p/discover/udp.go b/p2p/discover/udp.go
index 701747bb0442..47ee761986fa 100644
--- a/p2p/discover/udp.go
+++ b/p2p/discover/udp.go
@@ -23,11 +23,12 @@ import (
"errors"
"fmt"
"net"
+ "sync"
"time"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/p2p/nat"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/netutil"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
@@ -48,9 +49,9 @@ var (
// Timeouts
const (
- respTimeout = 500 * time.Millisecond
- sendTimeout = 500 * time.Millisecond
- expiration = 20 * time.Second
+ respTimeout = 500 * time.Millisecond
+ expiration = 20 * time.Second
+ bondExpiration = 24 * time.Hour
ntpFailureThreshold = 32 // Continuous timeouts after which to check NTP
ntpWarningCooldown = 10 * time.Minute // Minimum amount of time to pass before repeating NTP warning
@@ -91,7 +92,7 @@ type (
// findnode is a query for nodes close to the given target.
findnode struct {
- Target NodeID // doesn't need to be an actual public key
+ Target encPubkey
Expiration uint64
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
@@ -109,7 +110,7 @@ type (
IP net.IP // len 4 for IPv4 or 16 for IPv6
UDP uint16 // for discovery protocol
TCP uint16 // for RLPx protocol
- ID NodeID
+ ID encPubkey
}
rpcEndpoint struct {
@@ -120,14 +121,16 @@ type (
)
func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint {
- ip := addr.IP.To4()
- if ip == nil {
- ip = addr.IP.To16()
+ ip := net.IP{}
+ if ip4 := addr.IP.To4(); ip4 != nil {
+ ip = ip4
+ } else if ip6 := addr.IP.To16(); ip6 != nil {
+ ip = ip6
}
return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort}
}
-func (t *udp) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) {
+func (t *udp) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*node, error) {
if rn.UDP <= 1024 {
return nil, errors.New("low port")
}
@@ -137,17 +140,26 @@ func (t *udp) nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) {
if t.netrestrict != nil && !t.netrestrict.Contains(rn.IP) {
return nil, errors.New("not contained in netrestrict whitelist")
}
- n := NewNode(rn.ID, rn.IP, rn.UDP, rn.TCP)
- err := n.validateComplete()
+ key, err := decodePubkey(rn.ID)
+ if err != nil {
+ return nil, err
+ }
+ n := wrapNode(enode.NewV4(key, rn.IP, int(rn.TCP), int(rn.UDP)))
+ err = n.ValidateComplete()
return n, err
}
-func nodeToRPC(n *Node) rpcNode {
- return rpcNode{ID: n.ID, IP: n.IP, UDP: n.UDP, TCP: n.TCP}
+func nodeToRPC(n *node) rpcNode {
+ var key ecdsa.PublicKey
+ var ekey encPubkey
+ if err := n.Load((*enode.Secp256k1)(&key)); err == nil {
+ ekey = encodePubkey(&key)
+ }
+ return rpcNode{ID: ekey, IP: n.IP(), UDP: uint16(n.UDP()), TCP: uint16(n.TCP())}
}
type packet interface {
- handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error
+ handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error
name() string
}
@@ -158,20 +170,19 @@ type conn interface {
LocalAddr() net.Addr
}
-// udp implements the RPC protocol.
+// udp implements the discovery v4 UDP wire protocol.
type udp struct {
conn conn
netrestrict *netutil.Netlist
priv *ecdsa.PrivateKey
- ourEndpoint rpcEndpoint
+ localNode *enode.LocalNode
+ db *enode.DB
+ tab *Table
+ wg sync.WaitGroup
addpending chan *pending
gotreply chan reply
-
- closing chan struct{}
- nat nat.Interface
-
- *Table
+ closing chan struct{}
}
// pending represents a pending reply.
@@ -185,7 +196,7 @@ type udp struct {
// to all the callback functions for that node.
type pending struct {
// these fields must match in the reply.
- from NodeID
+ from enode.ID
ptype byte
// time when the request must complete
@@ -203,7 +214,7 @@ type pending struct {
}
type reply struct {
- from NodeID
+ from enode.ID
ptype byte
data interface{}
// loop indicates whether there was
@@ -223,82 +234,106 @@ type Config struct {
PrivateKey *ecdsa.PrivateKey
// These settings are optional:
- AnnounceAddr *net.UDPAddr // local address announced in the DHT
- NodeDBPath string // if set, the node database is stored at this filesystem location
- NetRestrict *netutil.Netlist // network whitelist
- Bootnodes []*Node // list of bootstrap nodes
- Unhandled chan<- ReadPacket // unhandled packets are sent on this channel
+ NetRestrict *netutil.Netlist // network whitelist
+ Bootnodes []*enode.Node // list of bootstrap nodes
+ Unhandled chan<- ReadPacket // unhandled packets are sent on this channel
}
// ListenUDP returns a new table that listens for UDP packets on laddr.
-func ListenUDP(c conn, cfg Config) (*Table, error) {
- tab, _, err := newUDP(c, cfg)
+func ListenUDP(c conn, ln *enode.LocalNode, cfg Config) (*Table, error) {
+ tab, _, err := newUDP(c, ln, cfg)
if err != nil {
return nil, err
}
- log.Info("UDP listener up", "self", tab.self)
return tab, nil
}
-func newUDP(c conn, cfg Config) (*Table, *udp, error) {
+func newUDP(c conn, ln *enode.LocalNode, cfg Config) (*Table, *udp, error) {
udp := &udp{
conn: c,
priv: cfg.PrivateKey,
netrestrict: cfg.NetRestrict,
+ localNode: ln,
+ db: ln.Database(),
closing: make(chan struct{}),
gotreply: make(chan reply),
addpending: make(chan *pending),
}
- realaddr := c.LocalAddr().(*net.UDPAddr)
- if cfg.AnnounceAddr != nil {
- realaddr = cfg.AnnounceAddr
- }
- // TODO: separate TCP port
- udp.ourEndpoint = makeEndpoint(realaddr, uint16(realaddr.Port))
- tab, err := newTable(udp, PubkeyID(&cfg.PrivateKey.PublicKey), realaddr, cfg.NodeDBPath, cfg.Bootnodes)
+ tab, err := newTable(udp, ln.Database(), cfg.Bootnodes)
if err != nil {
return nil, nil, err
}
- udp.Table = tab
+ udp.tab = tab
+ udp.wg.Add(2)
go udp.loop()
go udp.readLoop(cfg.Unhandled)
- return udp.Table, udp, nil
+ return udp.tab, udp, nil
+}
+
+func (t *udp) self() *enode.Node {
+ return t.localNode.Node()
}
func (t *udp) close() {
close(t.closing)
t.conn.Close()
- // TODO: wait for the loops to end.
+ t.wg.Wait()
+}
+
+func (t *udp) ourEndpoint() rpcEndpoint {
+ n := t.self()
+ a := &net.UDPAddr{IP: n.IP(), Port: n.UDP()}
+ return makeEndpoint(a, uint16(n.TCP()))
}
// ping sends a ping message to the given node and waits for a reply.
-func (t *udp) ping(toid NodeID, toaddr *net.UDPAddr) error {
+func (t *udp) ping(toid enode.ID, toaddr *net.UDPAddr) error {
+ return <-t.sendPing(toid, toaddr, nil)
+}
+
+// sendPing sends a ping message to the given node and invokes the callback
+// when the reply arrives.
+func (t *udp) sendPing(toid enode.ID, toaddr *net.UDPAddr, callback func()) <-chan error {
req := &ping{
- Version: Version,
- From: t.ourEndpoint,
+ Version: 4,
+ From: t.ourEndpoint(),
To: makeEndpoint(toaddr, 0), // TODO: maybe use known TCP port from DB
Expiration: uint64(time.Now().Add(expiration).Unix()),
}
packet, hash, err := encodePacket(t.priv, pingXDC, req)
if err != nil {
- return err
+ errc := make(chan error, 1)
+ errc <- err
+ return errc
}
errc := t.pending(toid, pongPacket, func(p interface{}) bool {
- return bytes.Equal(p.(*pong).ReplyTok, hash)
+ ok := bytes.Equal(p.(*pong).ReplyTok, hash)
+ if ok && callback != nil {
+ callback()
+ }
+ return ok
})
+ t.localNode.UDPContact(toaddr)
t.write(toaddr, req.name(), packet)
- return <-errc
+ return errc
}
-func (t *udp) waitping(from NodeID) error {
+func (t *udp) waitping(from enode.ID) error {
return <-t.pending(from, pingXDC, func(interface{}) bool { return true })
}
// findnode sends a findnode request to the given node and waits until
// the node has sent up to k neighbors.
-func (t *udp) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node, error) {
- nodes := make([]*Node, 0, bucketSize)
+func (t *udp) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) {
+ // If we haven't seen a ping from the destination node for a while, it won't remember
+ // our endpoint proof and reject findnode. Solicit a ping first.
+ if time.Since(t.db.LastPingReceived(toid)) > bondExpiration {
+ t.ping(toid, toaddr)
+ t.waitping(toid)
+ }
+
+ nodes := make([]*node, 0, bucketSize)
nreceived := 0
errc := t.pending(toid, neighborsPacket, func(r interface{}) bool {
reply := r.(*neighbors)
@@ -323,7 +358,7 @@ func (t *udp) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node
// pending adds a reply callback to the pending reply queue.
// see the documentation of type pending for a detailed explanation.
-func (t *udp) pending(id NodeID, ptype byte, callback func(interface{}) bool) <-chan error {
+func (t *udp) pending(id enode.ID, ptype byte, callback func(interface{}) bool) <-chan error {
ch := make(chan error, 1)
p := &pending{from: id, ptype: ptype, callback: callback, errc: ch}
select {
@@ -335,7 +370,7 @@ func (t *udp) pending(id NodeID, ptype byte, callback func(interface{}) bool) <-
return ch
}
-func (t *udp) handleReply(from NodeID, ptype byte, req packet) bool {
+func (t *udp) handleReply(from enode.ID, ptype byte, req packet) bool {
matched := make(chan bool, 1)
select {
case t.gotreply <- reply{from, ptype, req, matched}:
@@ -349,6 +384,8 @@ func (t *udp) handleReply(from NodeID, ptype byte, req packet) bool {
// loop runs in its own goroutine. it keeps track of
// the refresh timer and the pending reply queue.
func (t *udp) loop() {
+ defer t.wg.Done()
+
var (
plist = list.New()
timeout = time.NewTimer(0)
@@ -510,10 +547,11 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (packet,
// readLoop runs in its own goroutine. it handles incoming UDP packets.
func (t *udp) readLoop(unhandled chan<- ReadPacket) {
- defer t.conn.Close()
+ defer t.wg.Done()
if unhandled != nil {
defer close(unhandled)
}
+
// Discovery packets are defined to be no larger than 1280 bytes.
// Packets larger than this size will be cut at the end and treated
// as invalid because their hash won't match.
@@ -549,19 +587,20 @@ func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
return err
}
-func decodePacket(buf []byte) (packet, NodeID, []byte, error) {
+func decodePacket(buf []byte) (packet, encPubkey, []byte, error) {
if len(buf) < headSize+1 {
- return nil, NodeID{}, nil, errPacketTooSmall
+ return nil, encPubkey{}, nil, errPacketTooSmall
}
hash, sig, sigdata := buf[:macSize], buf[macSize:headSize], buf[headSize:]
shouldhash := crypto.Keccak256(buf[macSize:])
if !bytes.Equal(hash, shouldhash) {
- return nil, NodeID{}, nil, errBadHash
+ return nil, encPubkey{}, nil, errBadHash
}
- fromID, err := recoverNodeID(crypto.Keccak256(buf[headSize:]), sig)
+ fromKey, err := recoverNodeKey(crypto.Keccak256(buf[headSize:]), sig)
if err != nil {
- return nil, NodeID{}, hash, err
+ return nil, fromKey, hash, err
}
+
var req packet
switch ptype := sigdata[0]; ptype {
case pingXDC:
@@ -573,68 +612,80 @@ func decodePacket(buf []byte) (packet, NodeID, []byte, error) {
case neighborsPacket:
req = new(neighbors)
default:
- return nil, fromID, hash, fmt.Errorf("unknown type: %d", ptype)
+ return nil, fromKey, hash, fmt.Errorf("unknown type: %d", ptype)
}
s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0)
err = s.Decode(req)
- return req, fromID, hash, err
+ return req, fromKey, hash, err
}
-func (req *ping) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error {
+func (req *ping) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
+ key, err := decodePubkey(fromKey)
+ if err != nil {
+ return fmt.Errorf("invalid public key: %v", err)
+ }
t.send(from, pongPacket, &pong{
To: makeEndpoint(from, req.From.TCP),
ReplyTok: mac,
Expiration: uint64(time.Now().Add(expiration).Unix()),
})
- if !t.handleReply(fromID, pingXDC, req) {
- // Note: we're ignoring the provided IP address right now
- go t.bond(true, fromID, from, req.From.TCP)
- }
+ n := wrapNode(enode.NewV4(key, from.IP, int(req.From.TCP), from.Port))
+ t.handleReply(n.ID(), pingXDC, req)
+ if time.Since(t.db.LastPongReceived(n.ID())) > bondExpiration {
+ t.sendPing(n.ID(), from, func() { t.tab.addThroughPing(n) })
+ } else {
+ t.tab.addThroughPing(n)
+ }
+ t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)})
+ t.db.UpdateLastPingReceived(n.ID(), time.Now())
return nil
}
func (req *ping) name() string { return "PING XDC/v4" }
-func (req *pong) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error {
+func (req *pong) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
+ fromID := fromKey.id()
if !t.handleReply(fromID, pongPacket, req) {
return errUnsolicitedReply
}
+ t.localNode.UDPEndpointStatement(from, &net.UDPAddr{IP: req.To.IP, Port: int(req.To.UDP)})
+ t.db.UpdateLastPongReceived(fromID, time.Now())
return nil
}
func (req *pong) name() string { return "PONG/v4" }
-func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error {
+func (req *findnode) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
- if !t.db.hasBond(fromID) {
- // No bond exists, we don't process the packet. This prevents
- // an attack vector where the discovery protocol could be used
- // to amplify traffic in a DDOS attack. A malicious actor
- // would send a findnode request with the IP address and UDP
- // port of the target as the source address. The recipient of
- // the findnode packet would then send a neighbors packet
- // (which is a much bigger packet than findnode) to the victim.
+ fromID := fromKey.id()
+ if time.Since(t.db.LastPongReceived(fromID)) > bondExpiration {
+ // No endpoint proof pong exists, we don't process the packet. This prevents an
+ // attack vector where the discovery protocol could be used to amplify traffic in a
+ // DDOS attack. A malicious actor would send a findnode request with the IP address
+ // and UDP port of the target as the source address. The recipient of the findnode
+ // packet would then send a neighbors packet (which is a much bigger packet than
+ // findnode) to the victim.
return errUnknownNode
}
- target := crypto.Keccak256Hash(req.Target[:])
- t.mutex.Lock()
- closest := t.closest(target, bucketSize).entries
- t.mutex.Unlock()
+ target := enode.ID(crypto.Keccak256Hash(req.Target[:]))
+ t.tab.mutex.Lock()
+ closest := t.tab.closest(target, bucketSize).entries
+ t.tab.mutex.Unlock()
log.Trace("find neighbors ", "from", from, "fromID", fromID, "closest", len(closest))
p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())}
var sent bool
// Send neighbors in chunks with at most maxNeighbors per packet
// to stay below the 1280 byte limit.
for _, n := range closest {
- if netutil.CheckRelayIP(from.IP, n.IP) == nil {
+ if netutil.CheckRelayIP(from.IP, n.IP()) == nil {
p.Nodes = append(p.Nodes, nodeToRPC(n))
}
if len(p.Nodes) == maxNeighbors {
@@ -651,11 +702,11 @@ func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte
func (req *findnode) name() string { return "FINDNODE/v4" }
-func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error {
+func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromKey encPubkey, mac []byte) error {
if expired(req.Expiration) {
return errExpired
}
- if !t.handleReply(fromID, neighborsPacket, req) {
+ if !t.handleReply(fromKey.id(), neighborsPacket, req) {
return errUnsolicitedReply
}
return nil
diff --git a/p2p/discover/udp_test.go b/p2p/discover/udp_test.go
index a559a1eeee42..a185f9f00615 100644
--- a/p2p/discover/udp_test.go
+++ b/p2p/discover/udp_test.go
@@ -35,6 +35,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/crypto"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/rlp"
"github.com/davecgh/go-spew/spew"
)
@@ -46,7 +47,7 @@ func init() {
// shared test variables
var (
futureExp = uint64(time.Now().Add(10 * time.Hour).Unix())
- testTarget = NodeID{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}
+ testTarget = encPubkey{0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1}
testRemote = rpcEndpoint{IP: net.ParseIP("1.1.1.1").To4(), UDP: 1, TCP: 2}
testLocalAnnounced = rpcEndpoint{IP: net.ParseIP("2.2.2.2").To4(), UDP: 3, TCP: 4}
testLocal = rpcEndpoint{IP: net.ParseIP("3.3.3.3").To4(), UDP: 5, TCP: 6}
@@ -70,7 +71,9 @@ func newUDPTest(t *testing.T) *udpTest {
remotekey: newkey(),
remoteaddr: &net.UDPAddr{IP: net.IP{10, 0, 1, 99}, Port: 30303},
}
- test.table, test.udp, _ = newUDP(test.pipe, Config{PrivateKey: test.localkey})
+ db, _ := enode.OpenDB("")
+ ln := enode.NewLocalNode(db, test.localkey)
+ test.table, test.udp, _ = newUDP(test.pipe, ln, Config{PrivateKey: test.localkey})
// Wait for initial refresh so the table doesn't send unexpected findnode.
<-test.table.initDone
return test
@@ -136,7 +139,7 @@ func TestUDP_pingTimeout(t *testing.T) {
defer test.table.Close()
toaddr := &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 2222}
- toid := NodeID{1, 2, 3, 4}
+ toid := enode.ID{1, 2, 3, 4}
if err := test.udp.ping(toid, toaddr); err != errTimeout {
t.Error("expected timeout error, got", err)
}
@@ -220,8 +223,8 @@ func TestUDP_findnodeTimeout(t *testing.T) {
defer test.table.Close()
toaddr := &net.UDPAddr{IP: net.ParseIP("1.2.3.4"), Port: 2222}
- toid := NodeID{1, 2, 3, 4}
- target := NodeID{4, 5, 6, 7}
+ toid := enode.ID{1, 2, 3, 4}
+ target := encPubkey{4, 5, 6, 7}
result, err := test.udp.findnode(toid, toaddr, target)
if err != errTimeout {
t.Error("expected timeout error, got", err)
@@ -239,28 +242,30 @@ func TestUDP_findnode(t *testing.T) {
// put a few nodes into the table. their exact
// distribution shouldn't matter much, although we need to
// take care not to overflow any bucket.
- targetHash := crypto.Keccak256Hash(testTarget[:])
- nodes := &nodesByDistance{target: targetHash}
- for i := 0; i < bucketSizeTest; i++ {
- nodes.push(nodeAtDistance(test.table.self.sha, i+2), bucketSizeTest)
+ nodes := &nodesByDistance{target: testTarget.id()}
+ for i := 0; i < bucketSize; i++ {
+ key := newkey()
+ n := wrapNode(enode.NewV4(&key.PublicKey, net.IP{10, 13, 0, 1}, 0, i))
+ nodes.push(n, bucketSize)
}
test.table.stuff(nodes.entries)
// ensure there's a bond with the test node,
// findnode won't be accepted otherwise.
- test.table.db.updateBondTime(PubkeyID(&test.remotekey.PublicKey), time.Now())
+ remoteID := encodePubkey(&test.remotekey.PublicKey).id()
+ test.table.db.UpdateLastPongReceived(remoteID, time.Now())
// check that closest neighbors are returned.
test.packetIn(nil, findnodePacket, &findnode{Target: testTarget, Expiration: futureExp})
- expected := test.table.closest(targetHash, bucketSizeTest)
+ expected := test.table.closest(testTarget.id(), bucketSize)
- waitNeighbors := func(want []*Node) {
+ waitNeighbors := func(want []*node) {
test.waitPacketOut(func(p *neighbors) {
if len(p.Nodes) != len(want) {
t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSizeTest)
}
for i := range p.Nodes {
- if p.Nodes[i].ID != want[i].ID {
+ if p.Nodes[i].ID.id() != want[i].ID() {
t.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, p.Nodes[i], expected.entries[i])
}
}
@@ -274,10 +279,13 @@ func TestUDP_findnodeMultiReply(t *testing.T) {
test := newUDPTest(t)
defer test.table.Close()
+ rid := enode.PubkeyToIDV4(&test.remotekey.PublicKey)
+ test.table.db.UpdateLastPingReceived(rid, time.Now())
+
// queue a pending findnode request
- resultc, errc := make(chan []*Node), make(chan error)
+ resultc, errc := make(chan []*node), make(chan error)
go func() {
- rid := PubkeyID(&test.remotekey.PublicKey)
+ rid := encodePubkey(&test.remotekey.PublicKey).id()
ns, err := test.udp.findnode(rid, test.remoteaddr, testTarget)
if err != nil && len(ns) == 0 {
errc <- err
@@ -295,11 +303,11 @@ func TestUDP_findnodeMultiReply(t *testing.T) {
})
// send the reply as two packets.
- list := []*Node{
- MustParseNode("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304"),
- MustParseNode("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303"),
- MustParseNode("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17"),
- MustParseNode("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303"),
+ list := []*node{
+ wrapNode(enode.MustParseV4("enode://ba85011c70bcc5c04d8607d3a0ed29aa6179c092cbdda10d5d32684fb33ed01bd94f588ca8f91ac48318087dcb02eaf36773a7a453f0eedd6742af668097b29c@10.0.1.16:30303?discport=30304")),
+ wrapNode(enode.MustParseV4("enode://81fa361d25f157cd421c60dcc28d8dac5ef6a89476633339c5df30287474520caca09627da18543d9079b5b288698b542d56167aa5c09111e55acdbbdf2ef799@10.0.1.16:30303")),
+ wrapNode(enode.MustParseV4("enode://9bffefd833d53fac8e652415f4973bee289e8b1a5c6c4cbe70abf817ce8a64cee11b823b66a987f51aaa9fba0d6a91b3e6bf0d5a5d1042de8e9eeea057b217f8@10.0.1.36:30301?discport=17")),
+ wrapNode(enode.MustParseV4("enode://1b5b4aa662d7cb44a7221bfba67302590b643028197a7d5214790f3bac7aaa4a3241be9e83c09cf1f6c69d007c634faae3dc1b1221793e8446c0b3a09de65960@10.0.1.16:30303")),
}
rpclist := make([]rpcNode, len(list))
for i := range list {
@@ -324,8 +332,8 @@ func TestUDP_findnodeMultiReply(t *testing.T) {
func TestUDP_successfulPing(t *testing.T) {
test := newUDPTest(t)
- added := make(chan *Node, 1)
- test.table.nodeAddedHook = func(n *Node) { added <- n }
+ added := make(chan *node, 1)
+ test.table.nodeAddedHook = func(n *node) { added <- n }
defer test.table.Close()
// The remote side sends a ping packet to initiate the exchange.
@@ -350,12 +358,13 @@ func TestUDP_successfulPing(t *testing.T) {
// remote is unknown, the table pings back.
hash, _ := test.waitPacketOut(func(p *ping) error {
- if !reflect.DeepEqual(p.From, test.udp.ourEndpoint) {
- t.Errorf("got ping.From %v, want %v", p.From, test.udp.ourEndpoint)
+ if !reflect.DeepEqual(p.From, test.udp.ourEndpoint()) {
+ t.Errorf("got ping.From %#v, want %#v", p.From, test.udp.ourEndpoint())
}
wantTo := rpcEndpoint{
// The mirrored UDP address is the UDP packet sender.
- IP: test.remoteaddr.IP, UDP: uint16(test.remoteaddr.Port),
+ IP: test.remoteaddr.IP,
+ UDP: uint16(test.remoteaddr.Port),
TCP: 0,
}
if !reflect.DeepEqual(p.To, wantTo) {
@@ -369,18 +378,18 @@ func TestUDP_successfulPing(t *testing.T) {
// pong packet.
select {
case n := <-added:
- rid := PubkeyID(&test.remotekey.PublicKey)
- if n.ID != rid {
- t.Errorf("node has wrong ID: got %v, want %v", n.ID, rid)
+ rid := encodePubkey(&test.remotekey.PublicKey).id()
+ if n.ID() != rid {
+ t.Errorf("node has wrong ID: got %v, want %v", n.ID(), rid)
}
- if !n.IP.Equal(test.remoteaddr.IP) {
- t.Errorf("node has wrong IP: got %v, want: %v", n.IP, test.remoteaddr.IP)
+ if !n.IP().Equal(test.remoteaddr.IP) {
+ t.Errorf("node has wrong IP: got %v, want: %v", n.IP(), test.remoteaddr.IP)
}
- if int(n.UDP) != test.remoteaddr.Port {
- t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP, test.remoteaddr.Port)
+ if int(n.UDP()) != test.remoteaddr.Port {
+ t.Errorf("node has wrong UDP port: got %v, want: %v", n.UDP(), test.remoteaddr.Port)
}
- if n.TCP != testRemote.TCP {
- t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP, testRemote.TCP)
+ if n.TCP() != int(testRemote.TCP) {
+ t.Errorf("node has wrong TCP port: got %v, want: %v", n.TCP(), testRemote.TCP)
}
case <-time.After(2 * time.Second):
t.Errorf("node was not added within 2 seconds")
@@ -433,7 +442,7 @@ var testPackets = []struct {
{
input: "c7c44041b9f7c7e41934417ebac9a8e1a4c6298f74553f2fcfdcae6ed6fe53163eb3d2b52e39fe91831b8a927bf4fc222c3902202027e5e9eb812195f95d20061ef5cd31d502e47ecb61183f74a504fe04c51e73df81f25c4d506b26db4517490103f84eb840ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f8443b9a35582999983999999280dc62cc8255c73471e0a61da0c89acdc0e035e260add7fc0c04ad9ebf3919644c91cb247affc82b69bd2ca235c71eab8e49737c937a2c396",
wantPacket: &findnode{
- Target: MustHexID("ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"),
+ Target: hexEncPubkey("ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31387574077f301b421bc84df7266c44e9e6d569fc56be00812904767bf5ccd1fc7f"),
Expiration: 1136239445,
Rest: []rlp.RawValue{{0x82, 0x99, 0x99}, {0x83, 0x99, 0x99, 0x99}},
},
@@ -443,25 +452,25 @@ var testPackets = []struct {
wantPacket: &neighbors{
Nodes: []rpcNode{
{
- ID: MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
+ ID: hexEncPubkey("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"),
IP: net.ParseIP("99.33.22.55").To4(),
UDP: 4444,
TCP: 4445,
},
{
- ID: MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
+ ID: hexEncPubkey("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"),
IP: net.ParseIP("1.2.3.4").To4(),
UDP: 1,
TCP: 1,
},
{
- ID: MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
+ ID: hexEncPubkey("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"),
IP: net.ParseIP("2001:db8:3c4d:15::abcd:ef12"),
UDP: 3333,
TCP: 3333,
},
{
- ID: MustHexID("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
+ ID: hexEncPubkey("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"),
IP: net.ParseIP("2001:db8:85a3:8d3:1319:8a2e:370:7348"),
UDP: 999,
TCP: 1000,
@@ -475,13 +484,14 @@ var testPackets = []struct {
func TestForwardCompatibility(t *testing.T) {
testkey, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- wantNodeID := PubkeyID(&testkey.PublicKey)
+ wantNodeKey := encodePubkey(&testkey.PublicKey)
+
for _, test := range testPackets {
input, err := hex.DecodeString(test.input)
if err != nil {
t.Fatalf("invalid hex: %s", test.input)
}
- packet, nodeid, _, err := decodePacket(input)
+ packet, nodekey, _, err := decodePacket(input)
if err != nil {
t.Errorf("did not accept packet %s\n%v", test.input, err)
continue
@@ -489,8 +499,8 @@ func TestForwardCompatibility(t *testing.T) {
if !reflect.DeepEqual(packet, test.wantPacket) {
t.Errorf("got %s\nwant %s", spew.Sdump(packet), spew.Sdump(test.wantPacket))
}
- if nodeid != wantNodeID {
- t.Errorf("got id %v\nwant id %v", nodeid, wantNodeID)
+ if nodekey != wantNodeKey {
+ t.Errorf("got id %v\nwant id %v", nodekey, wantNodeKey)
}
}
}
diff --git a/p2p/discv5/udp.go b/p2p/discv5/udp.go
index 9dc9e557a21a..6b3026d5517b 100644
--- a/p2p/discv5/udp.go
+++ b/p2p/discv5/udp.go
@@ -238,7 +238,8 @@ type udp struct {
}
// ListenUDP returns a new table that listens for UDP packets on laddr.
-func ListenUDP(priv *ecdsa.PrivateKey, conn conn, realaddr *net.UDPAddr, nodeDBPath string, netrestrict *netutil.Netlist) (*Network, error) {
+func ListenUDP(priv *ecdsa.PrivateKey, conn conn, nodeDBPath string, netrestrict *netutil.Netlist) (*Network, error) {
+ realaddr := conn.LocalAddr().(*net.UDPAddr)
transport, err := listenUDP(priv, conn, realaddr)
if err != nil {
return nil, err
diff --git a/p2p/enode/idscheme.go b/p2p/enode/idscheme.go
new file mode 100644
index 000000000000..2cef32f646a3
--- /dev/null
+++ b/p2p/enode/idscheme.go
@@ -0,0 +1,160 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package enode
+
+import (
+ "crypto/ecdsa"
+ "fmt"
+ "io"
+
+ "github.com/XinFinOrg/XDPoSChain/common/math"
+ "github.com/XinFinOrg/XDPoSChain/crypto"
+ "github.com/XinFinOrg/XDPoSChain/crypto/sha3"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
+)
+
+// List of known secure identity schemes.
+var ValidSchemes = enr.SchemeMap{
+ "v4": V4ID{},
+}
+
+var ValidSchemesForTesting = enr.SchemeMap{
+ "v4": V4ID{},
+ "null": NullID{},
+}
+
+// v4ID is the "v4" identity scheme.
+type V4ID struct{}
+
+// SignV4 signs a record using the v4 scheme.
+func SignV4(r *enr.Record, privkey *ecdsa.PrivateKey) error {
+ // Copy r to avoid modifying it if signing fails.
+ cpy := *r
+ cpy.Set(enr.ID("v4"))
+ cpy.Set(Secp256k1(privkey.PublicKey))
+
+ h := sha3.NewKeccak256()
+ rlp.Encode(h, cpy.AppendElements(nil))
+ sig, err := crypto.Sign(h.Sum(nil), privkey)
+ if err != nil {
+ return err
+ }
+ sig = sig[:len(sig)-1] // remove v
+ if err = cpy.SetSig(V4ID{}, sig); err == nil {
+ *r = cpy
+ }
+ return err
+}
+
+func (V4ID) Verify(r *enr.Record, sig []byte) error {
+ var entry s256raw
+ if err := r.Load(&entry); err != nil {
+ return err
+ } else if len(entry) != 33 {
+ return fmt.Errorf("invalid public key")
+ }
+
+ h := sha3.NewKeccak256()
+ rlp.Encode(h, r.AppendElements(nil))
+ if !crypto.VerifySignature(entry, h.Sum(nil), sig) {
+ return enr.ErrInvalidSig
+ }
+ return nil
+}
+
+func (V4ID) NodeAddr(r *enr.Record) []byte {
+ var pubkey Secp256k1
+ err := r.Load(&pubkey)
+ if err != nil {
+ return nil
+ }
+ buf := make([]byte, 64)
+ math.ReadBits(pubkey.X, buf[:32])
+ math.ReadBits(pubkey.Y, buf[32:])
+ return crypto.Keccak256(buf)
+}
+
+// Secp256k1 is the "secp256k1" key, which holds a public key.
+type Secp256k1 ecdsa.PublicKey
+
+func (v Secp256k1) ENRKey() string { return "secp256k1" }
+
+// EncodeRLP implements rlp.Encoder.
+func (v Secp256k1) EncodeRLP(w io.Writer) error {
+ return rlp.Encode(w, crypto.CompressPubkey((*ecdsa.PublicKey)(&v)))
+}
+
+// DecodeRLP implements rlp.Decoder.
+func (v *Secp256k1) DecodeRLP(s *rlp.Stream) error {
+ buf, err := s.Bytes()
+ if err != nil {
+ return err
+ }
+ pk, err := crypto.DecompressPubkey(buf)
+ if err != nil {
+ return err
+ }
+ *v = (Secp256k1)(*pk)
+ return nil
+}
+
+// s256raw is an unparsed secp256k1 public key entry.
+type s256raw []byte
+
+func (s256raw) ENRKey() string { return "secp256k1" }
+
+// v4CompatID is a weaker and insecure version of the "v4" scheme which only checks for the
+// presence of a secp256k1 public key, but doesn't verify the signature.
+type v4CompatID struct {
+ V4ID
+}
+
+func (v4CompatID) Verify(r *enr.Record, sig []byte) error {
+ var pubkey Secp256k1
+ return r.Load(&pubkey)
+}
+
+func signV4Compat(r *enr.Record, pubkey *ecdsa.PublicKey) {
+ r.Set((*Secp256k1)(pubkey))
+ if err := r.SetSig(v4CompatID{}, []byte{}); err != nil {
+ panic(err)
+ }
+}
+
+// NullID is the "null" ENR identity scheme. This scheme stores the node
+// ID in the record without any signature.
+type NullID struct{}
+
+func (NullID) Verify(r *enr.Record, sig []byte) error {
+ return nil
+}
+
+func (NullID) NodeAddr(r *enr.Record) []byte {
+ var id ID
+ r.Load(enr.WithEntry("nulladdr", &id))
+ return id[:]
+}
+
+func SignNull(r *enr.Record, id ID) *Node {
+ r.Set(enr.ID("null"))
+ r.Set(enr.WithEntry("nulladdr", id))
+ if err := r.SetSig(NullID{}, []byte{}); err != nil {
+ panic(err)
+ }
+ return &Node{r: *r, id: id}
+}
diff --git a/p2p/enode/idscheme_test.go b/p2p/enode/idscheme_test.go
new file mode 100644
index 000000000000..a4d0bb6c25be
--- /dev/null
+++ b/p2p/enode/idscheme_test.go
@@ -0,0 +1,74 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package enode
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "encoding/hex"
+ "math/big"
+ "testing"
+
+ "github.com/XinFinOrg/XDPoSChain/crypto"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ privkey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ pubkey = &privkey.PublicKey
+)
+
+func TestEmptyNodeID(t *testing.T) {
+ var r enr.Record
+ if addr := ValidSchemes.NodeAddr(&r); addr != nil {
+ t.Errorf("wrong address on empty record: got %v, want %v", addr, nil)
+ }
+
+ require.NoError(t, SignV4(&r, privkey))
+ expected := "a448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7"
+ assert.Equal(t, expected, hex.EncodeToString(ValidSchemes.NodeAddr(&r)))
+}
+
+// Checks that failure to sign leaves the record unmodified.
+func TestSignError(t *testing.T) {
+ invalidKey := &ecdsa.PrivateKey{D: new(big.Int), PublicKey: *pubkey}
+
+ var r enr.Record
+ emptyEnc, _ := rlp.EncodeToBytes(&r)
+ if err := SignV4(&r, invalidKey); err == nil {
+ t.Fatal("expected error from SignV4")
+ }
+ newEnc, _ := rlp.EncodeToBytes(&r)
+ if !bytes.Equal(newEnc, emptyEnc) {
+ t.Fatal("record modified even though signing failed")
+ }
+}
+
+// TestGetSetSecp256k1 tests encoding/decoding and setting/getting of the Secp256k1 key.
+func TestGetSetSecp256k1(t *testing.T) {
+ var r enr.Record
+ if err := SignV4(&r, privkey); err != nil {
+ t.Fatal(err)
+ }
+
+ var pk Secp256k1
+ require.NoError(t, r.Load(&pk))
+ assert.EqualValues(t, pubkey, &pk)
+}
diff --git a/p2p/enode/localnode.go b/p2p/enode/localnode.go
new file mode 100644
index 000000000000..70c7a253f2a5
--- /dev/null
+++ b/p2p/enode/localnode.go
@@ -0,0 +1,246 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package enode
+
+import (
+ "crypto/ecdsa"
+ "fmt"
+ "net"
+ "reflect"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/XinFinOrg/XDPoSChain/log"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
+ "github.com/XinFinOrg/XDPoSChain/p2p/netutil"
+)
+
+const (
+ // IP tracker configuration
+ iptrackMinStatements = 10
+ iptrackWindow = 5 * time.Minute
+ iptrackContactWindow = 10 * time.Minute
+)
+
+// LocalNode produces the signed node record of a local node, i.e. a node run in the
+// current process. Setting ENR entries via the Set method updates the record. A new version
+// of the record is signed on demand when the Node method is called.
+type LocalNode struct {
+ cur atomic.Value // holds a non-nil node pointer while the record is up-to-date.
+ id ID
+ key *ecdsa.PrivateKey
+ db *DB
+
+ // everything below is protected by a lock
+ mu sync.Mutex
+ seq uint64
+ entries map[string]enr.Entry
+ udpTrack *netutil.IPTracker // predicts external UDP endpoint
+ staticIP net.IP
+ fallbackIP net.IP
+ fallbackUDP int
+}
+
+// NewLocalNode creates a local node.
+func NewLocalNode(db *DB, key *ecdsa.PrivateKey) *LocalNode {
+ ln := &LocalNode{
+ id: PubkeyToIDV4(&key.PublicKey),
+ db: db,
+ key: key,
+ udpTrack: netutil.NewIPTracker(iptrackWindow, iptrackContactWindow, iptrackMinStatements),
+ entries: make(map[string]enr.Entry),
+ }
+ ln.seq = db.localSeq(ln.id)
+ ln.invalidate()
+ return ln
+}
+
+// Database returns the node database associated with the local node.
+func (ln *LocalNode) Database() *DB {
+ return ln.db
+}
+
+// Node returns the current version of the local node record.
+func (ln *LocalNode) Node() *Node {
+ n := ln.cur.Load().(*Node)
+ if n != nil {
+ return n
+ }
+ // Record was invalidated, sign a new copy.
+ ln.mu.Lock()
+ defer ln.mu.Unlock()
+ ln.sign()
+ return ln.cur.Load().(*Node)
+}
+
+// ID returns the local node ID.
+func (ln *LocalNode) ID() ID {
+ return ln.id
+}
+
+// Set puts the given entry into the local record, overwriting
+// any existing value.
+func (ln *LocalNode) Set(e enr.Entry) {
+ ln.mu.Lock()
+ defer ln.mu.Unlock()
+
+ ln.set(e)
+}
+
+func (ln *LocalNode) set(e enr.Entry) {
+ val, exists := ln.entries[e.ENRKey()]
+ if !exists || !reflect.DeepEqual(val, e) {
+ ln.entries[e.ENRKey()] = e
+ ln.invalidate()
+ }
+}
+
+// Delete removes the given entry from the local record.
+func (ln *LocalNode) Delete(e enr.Entry) {
+ ln.mu.Lock()
+ defer ln.mu.Unlock()
+
+ ln.delete(e)
+}
+
+func (ln *LocalNode) delete(e enr.Entry) {
+ _, exists := ln.entries[e.ENRKey()]
+ if exists {
+ delete(ln.entries, e.ENRKey())
+ ln.invalidate()
+ }
+}
+
+// SetStaticIP sets the local IP to the given one unconditionally.
+// This disables endpoint prediction.
+func (ln *LocalNode) SetStaticIP(ip net.IP) {
+ ln.mu.Lock()
+ defer ln.mu.Unlock()
+
+ ln.staticIP = ip
+ ln.updateEndpoints()
+}
+
+// SetFallbackIP sets the last-resort IP address. This address is used
+// if no endpoint prediction can be made and no static IP is set.
+func (ln *LocalNode) SetFallbackIP(ip net.IP) {
+ ln.mu.Lock()
+ defer ln.mu.Unlock()
+
+ ln.fallbackIP = ip
+ ln.updateEndpoints()
+}
+
+// SetFallbackUDP sets the last-resort UDP port. This port is used
+// if no endpoint prediction can be made.
+func (ln *LocalNode) SetFallbackUDP(port int) {
+ ln.mu.Lock()
+ defer ln.mu.Unlock()
+
+ ln.fallbackUDP = port
+ ln.updateEndpoints()
+}
+
+// UDPEndpointStatement should be called whenever a statement about the local node's
+// UDP endpoint is received. It feeds the local endpoint predictor.
+func (ln *LocalNode) UDPEndpointStatement(fromaddr, endpoint *net.UDPAddr) {
+ ln.mu.Lock()
+ defer ln.mu.Unlock()
+
+ ln.udpTrack.AddStatement(fromaddr.String(), endpoint.String())
+ ln.updateEndpoints()
+}
+
+// UDPContact should be called whenever the local node has announced itself to another node
+// via UDP. It feeds the local endpoint predictor.
+func (ln *LocalNode) UDPContact(toaddr *net.UDPAddr) {
+ ln.mu.Lock()
+ defer ln.mu.Unlock()
+
+ ln.udpTrack.AddContact(toaddr.String())
+ ln.updateEndpoints()
+}
+
+func (ln *LocalNode) updateEndpoints() {
+ // Determine the endpoints.
+ newIP := ln.fallbackIP
+ newUDP := ln.fallbackUDP
+ if ln.staticIP != nil {
+ newIP = ln.staticIP
+ } else if ip, port := predictAddr(ln.udpTrack); ip != nil {
+ newIP = ip
+ newUDP = port
+ }
+
+ // Update the record.
+ if newIP != nil && !newIP.IsUnspecified() {
+ ln.set(enr.IP(newIP))
+ if newUDP != 0 {
+ ln.set(enr.UDP(newUDP))
+ } else {
+ ln.delete(enr.UDP(0))
+ }
+ } else {
+ ln.delete(enr.IP{})
+ }
+}
+
+// predictAddr wraps IPTracker.PredictEndpoint, converting from its string-based
+// endpoint representation to IP and port types.
+func predictAddr(t *netutil.IPTracker) (net.IP, int) {
+ ep := t.PredictEndpoint()
+ if ep == "" {
+ return nil, 0
+ }
+ ipString, portString, _ := net.SplitHostPort(ep)
+ ip := net.ParseIP(ipString)
+ port, _ := strconv.Atoi(portString)
+ return ip, port
+}
+
+func (ln *LocalNode) invalidate() {
+ ln.cur.Store((*Node)(nil))
+}
+
+func (ln *LocalNode) sign() {
+ if n := ln.cur.Load().(*Node); n != nil {
+ return // no changes
+ }
+
+ var r enr.Record
+ for _, e := range ln.entries {
+ r.Set(e)
+ }
+ ln.bumpSeq()
+ r.SetSeq(ln.seq)
+ if err := SignV4(&r, ln.key); err != nil {
+ panic(fmt.Errorf("enode: can't sign record: %v", err))
+ }
+ n, err := New(ValidSchemes, &r)
+ if err != nil {
+ panic(fmt.Errorf("enode: can't verify local record: %v", err))
+ }
+ ln.cur.Store(n)
+ log.Info("New local node record", "seq", ln.seq, "id", n.ID(), "ip", n.IP(), "udp", n.UDP(), "tcp", n.TCP())
+}
+
+func (ln *LocalNode) bumpSeq() {
+ ln.seq++
+ ln.db.storeLocalSeq(ln.id, ln.seq)
+}
diff --git a/p2p/enode/localnode_test.go b/p2p/enode/localnode_test.go
new file mode 100644
index 000000000000..6bb6f0004dfb
--- /dev/null
+++ b/p2p/enode/localnode_test.go
@@ -0,0 +1,76 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package enode
+
+import (
+ "testing"
+
+ "github.com/XinFinOrg/XDPoSChain/crypto"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
+)
+
+func newLocalNodeForTesting() (*LocalNode, *DB) {
+ db, _ := OpenDB("")
+ key, _ := crypto.GenerateKey()
+ return NewLocalNode(db, key), db
+}
+
+func TestLocalNode(t *testing.T) {
+ ln, db := newLocalNodeForTesting()
+ defer db.Close()
+
+ if ln.Node().ID() != ln.ID() {
+ t.Fatal("inconsistent ID")
+ }
+
+ ln.Set(enr.WithEntry("x", uint(3)))
+ var x uint
+ if err := ln.Node().Load(enr.WithEntry("x", &x)); err != nil {
+ t.Fatal("can't load entry 'x':", err)
+ } else if x != 3 {
+ t.Fatal("wrong value for entry 'x':", x)
+ }
+}
+
+func TestLocalNodeSeqPersist(t *testing.T) {
+ ln, db := newLocalNodeForTesting()
+ defer db.Close()
+
+ if s := ln.Node().Seq(); s != 1 {
+ t.Fatalf("wrong initial seq %d, want 1", s)
+ }
+ ln.Set(enr.WithEntry("x", uint(1)))
+ if s := ln.Node().Seq(); s != 2 {
+ t.Fatalf("wrong seq %d after set, want 2", s)
+ }
+
+ // Create a new instance, it should reload the sequence number.
+ // The number increases just after that because a new record is
+ // created without the "x" entry.
+ ln2 := NewLocalNode(db, ln.key)
+ if s := ln2.Node().Seq(); s != 3 {
+ t.Fatalf("wrong seq %d on new instance, want 3", s)
+ }
+
+ // Create a new instance with a different node key on the same database.
+ // This should reset the sequence number.
+ key, _ := crypto.GenerateKey()
+ ln3 := NewLocalNode(db, key)
+ if s := ln3.Node().Seq(); s != 1 {
+ t.Fatalf("wrong seq %d on instance with changed key, want 1", s)
+ }
+}
diff --git a/p2p/enode/node.go b/p2p/enode/node.go
new file mode 100644
index 000000000000..304ceb3e71a3
--- /dev/null
+++ b/p2p/enode/node.go
@@ -0,0 +1,255 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package enode
+
+import (
+ "crypto/ecdsa"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math/bits"
+ "math/rand"
+ "net"
+ "strings"
+
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
+)
+
+// Node represents a host on the network.
+type Node struct {
+ r enr.Record
+ id ID
+}
+
+// New wraps a node record. The record must be valid according to the given
+// identity scheme.
+func New(validSchemes enr.IdentityScheme, r *enr.Record) (*Node, error) {
+ if err := r.VerifySignature(validSchemes); err != nil {
+ return nil, err
+ }
+ node := &Node{r: *r}
+ if n := copy(node.id[:], validSchemes.NodeAddr(&node.r)); n != len(ID{}) {
+ return nil, fmt.Errorf("invalid node ID length %d, need %d", n, len(ID{}))
+ }
+ return node, nil
+}
+
+// ID returns the node identifier.
+func (n *Node) ID() ID {
+ return n.id
+}
+
+// Seq returns the sequence number of the underlying record.
+func (n *Node) Seq() uint64 {
+ return n.r.Seq()
+}
+
+// Incomplete returns true for nodes with no IP address.
+func (n *Node) Incomplete() bool {
+ return n.IP() == nil
+}
+
+// Load retrieves an entry from the underlying record.
+func (n *Node) Load(k enr.Entry) error {
+ return n.r.Load(k)
+}
+
+// IP returns the IP address of the node.
+func (n *Node) IP() net.IP {
+ var ip net.IP
+ n.Load((*enr.IP)(&ip))
+ return ip
+}
+
+// UDP returns the UDP port of the node.
+func (n *Node) UDP() int {
+ var port enr.UDP
+ n.Load(&port)
+ return int(port)
+}
+
+// UDP returns the TCP port of the node.
+func (n *Node) TCP() int {
+ var port enr.TCP
+ n.Load(&port)
+ return int(port)
+}
+
+// Pubkey returns the secp256k1 public key of the node, if present.
+func (n *Node) Pubkey() *ecdsa.PublicKey {
+ var key ecdsa.PublicKey
+ if n.Load((*Secp256k1)(&key)) != nil {
+ return nil
+ }
+ return &key
+}
+
+// Record returns the node's record. The return value is a copy and may
+// be modified by the caller.
+func (n *Node) Record() *enr.Record {
+ cpy := n.r
+ return &cpy
+}
+
+// checks whether n is a valid complete node.
+func (n *Node) ValidateComplete() error {
+ if n.Incomplete() {
+ return errors.New("incomplete node")
+ }
+ if n.UDP() == 0 {
+ return errors.New("missing UDP port")
+ }
+ ip := n.IP()
+ if ip.IsMulticast() || ip.IsUnspecified() {
+ return errors.New("invalid IP (multicast/unspecified)")
+ }
+ // Validate the node key (on curve, etc.).
+ var key Secp256k1
+ return n.Load(&key)
+}
+
+// The string representation of a Node is a URL.
+// Please see ParseNode for a description of the format.
+func (n *Node) String() string {
+ return n.v4URL()
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (n *Node) MarshalText() ([]byte, error) {
+ return []byte(n.v4URL()), nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (n *Node) UnmarshalText(text []byte) error {
+ dec, err := ParseV4(string(text))
+ if err == nil {
+ *n = *dec
+ }
+ return err
+}
+
+// ID is a unique identifier for each node.
+type ID [32]byte
+
+// Bytes returns a byte slice representation of the ID
+func (n ID) Bytes() []byte {
+ return n[:]
+}
+
+// ID prints as a long hexadecimal number.
+func (n ID) String() string {
+ return fmt.Sprintf("%x", n[:])
+}
+
+// The Go syntax representation of a ID is a call to HexID.
+func (n ID) GoString() string {
+ return fmt.Sprintf("enode.HexID(\"%x\")", n[:])
+}
+
+// TerminalString returns a shortened hex string for terminal logging.
+func (n ID) TerminalString() string {
+ return hex.EncodeToString(n[:8])
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (n ID) MarshalText() ([]byte, error) {
+ return []byte(hex.EncodeToString(n[:])), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (n *ID) UnmarshalText(text []byte) error {
+ id, err := parseID(string(text))
+ if err != nil {
+ return err
+ }
+ *n = id
+ return nil
+}
+
+// HexID converts a hex string to an ID.
+// The string may be prefixed with 0x.
+// It panics if the string is not a valid ID.
+func HexID(in string) ID {
+ id, err := parseID(in)
+ if err != nil {
+ panic(err)
+ }
+ return id
+}
+
+func parseID(in string) (ID, error) {
+ var id ID
+ b, err := hex.DecodeString(strings.TrimPrefix(in, "0x"))
+ if err != nil {
+ return id, err
+ } else if len(b) != len(id) {
+ return id, fmt.Errorf("wrong length, want %d hex chars", len(id)*2)
+ }
+ copy(id[:], b)
+ return id, nil
+}
+
+// DistCmp compares the distances a->target and b->target.
+// Returns -1 if a is closer to target, 1 if b is closer to target
+// and 0 if they are equal.
+func DistCmp(target, a, b ID) int {
+ for i := range target {
+ da := a[i] ^ target[i]
+ db := b[i] ^ target[i]
+ if da > db {
+ return 1
+ } else if da < db {
+ return -1
+ }
+ }
+ return 0
+}
+
+// LogDist returns the logarithmic distance between a and b, log2(a ^ b).
+func LogDist(a, b ID) int {
+ lz := 0
+ for i := range a {
+ x := a[i] ^ b[i]
+ if x == 0 {
+ lz += 8
+ } else {
+ lz += bits.LeadingZeros8(x)
+ break
+ }
+ }
+ return len(a)*8 - lz
+}
+
+// RandomID returns a random ID b such that logdist(a, b) == n.
+func RandomID(a ID, n int) (b ID) {
+ if n == 0 {
+ return a
+ }
+ // flip bit at position n, fill the rest with random bits
+ b = a
+ pos := len(a) - n/8 - 1
+ bit := byte(0x01) << (byte(n%8) - 1)
+ if bit == 0 {
+ pos++
+ bit = 0x80
+ }
+ b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
+ for i := pos + 1; i < len(a); i++ {
+ b[i] = byte(rand.Intn(255))
+ }
+ return b
+}
diff --git a/p2p/enode/node_test.go b/p2p/enode/node_test.go
new file mode 100644
index 000000000000..d23bf0500541
--- /dev/null
+++ b/p2p/enode/node_test.go
@@ -0,0 +1,62 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package enode
+
+import (
+ "encoding/hex"
+ "fmt"
+ "testing"
+
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
+ "github.com/stretchr/testify/assert"
+)
+
+var pyRecord, _ = hex.DecodeString("f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f")
+
+// TestPythonInterop checks that we can decode and verify a record produced by the Python
+// implementation.
+func TestPythonInterop(t *testing.T) {
+ var r enr.Record
+ if err := rlp.DecodeBytes(pyRecord, &r); err != nil {
+ t.Fatalf("can't decode: %v", err)
+ }
+ n, err := New(ValidSchemes, &r)
+ if err != nil {
+ t.Fatalf("can't verify record: %v", err)
+ }
+
+ var (
+ wantID = HexID("a448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7")
+ wantSeq = uint64(1)
+ wantIP = enr.IP{127, 0, 0, 1}
+ wantUDP = enr.UDP(30303)
+ )
+ if n.Seq() != wantSeq {
+ t.Errorf("wrong seq: got %d, want %d", n.Seq(), wantSeq)
+ }
+ if n.ID() != wantID {
+ t.Errorf("wrong id: got %x, want %x", n.ID(), wantID)
+ }
+ want := map[enr.Entry]interface{}{new(enr.IP): &wantIP, new(enr.UDP): &wantUDP}
+ for k, v := range want {
+ desc := fmt.Sprintf("loading key %q", k.ENRKey())
+ if assert.NoError(t, n.Load(k), desc) {
+ assert.Equal(t, k, v, desc)
+ }
+ }
+}
diff --git a/p2p/discover/database.go b/p2p/enode/nodedb.go
similarity index 50%
rename from p2p/discover/database.go
rename to p2p/enode/nodedb.go
index 1f5d80f6445e..6e5361fabf63 100644
--- a/p2p/discover/database.go
+++ b/p2p/enode/nodedb.go
@@ -14,20 +14,17 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// Contains the node database, storing previously seen nodes and any collected
-// metadata about them for QoS purposes.
-
-package discover
+package enode
import (
"bytes"
"crypto/rand"
"encoding/binary"
+ "fmt"
"os"
"sync"
"time"
- "github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/rlp"
"github.com/syndtr/goleveldb/leveldb"
@@ -38,58 +35,55 @@ import (
"github.com/syndtr/goleveldb/leveldb/util"
)
+// Keys in the node database.
+const (
+ dbVersionKey = "version" // Version of the database to flush if changes
+ dbItemPrefix = "n:" // Identifier to prefix node entries with
+
+ dbDiscoverRoot = ":discover"
+ dbDiscoverSeq = dbDiscoverRoot + ":seq"
+ dbDiscoverPing = dbDiscoverRoot + ":lastping"
+ dbDiscoverPong = dbDiscoverRoot + ":lastpong"
+ dbDiscoverFindFails = dbDiscoverRoot + ":findfail"
+ dbLocalRoot = ":local"
+ dbLocalSeq = dbLocalRoot + ":seq"
+)
+
var (
- nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element.
- nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
- nodeDBCleanupCycle = time.Hour // Time period for running the expiration task.
+ dbNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped.
+ dbCleanupCycle = time.Hour // Time period for running the expiration task.
+ dbVersion = 7
)
-// nodeDB stores all nodes we know about.
-type nodeDB struct {
+// DB is the node database, storing previously seen nodes and any collected metadata about
+// them for QoS purposes.
+type DB struct {
lvl *leveldb.DB // Interface to the database itself
- self NodeID // Own node id to prevent adding it into the database
runner sync.Once // Ensures we can start at most one expirer
quit chan struct{} // Channel to signal the expiring thread to stop
}
-// Schema layout for the node database
-var (
- nodeDBVersionKey = []byte("version") // Version of the database to flush if changes
- nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with
-
- nodeDBDiscoverRoot = ":discover"
- nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping"
- nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong"
- nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail"
-)
-
-// newNodeDB creates a new node database for storing and retrieving infos about
-// known peers in the network. If no path is given, an in-memory, temporary
-// database is constructed.
-func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
+// OpenDB opens a node database for storing and retrieving infos about known peers in the
+// network. If no path is given an in-memory, temporary database is constructed.
+func OpenDB(path string) (*DB, error) {
if path == "" {
- return newMemoryNodeDB(self)
+ return newMemoryDB()
}
- return newPersistentNodeDB(path, version, self)
+ return newPersistentDB(path)
}
-// newMemoryNodeDB creates a new in-memory node database without a persistent
-// backend.
-func newMemoryNodeDB(self NodeID) (*nodeDB, error) {
+// newMemoryNodeDB creates a new in-memory node database without a persistent backend.
+func newMemoryDB() (*DB, error) {
db, err := leveldb.Open(storage.NewMemStorage(), nil)
if err != nil {
return nil, err
}
- return &nodeDB{
- lvl: db,
- self: self,
- quit: make(chan struct{}),
- }, nil
+ return &DB{lvl: db, quit: make(chan struct{})}, nil
}
// newPersistentNodeDB creates/opens a leveldb backed persistent node database,
// also flushing its contents in case of a version mismatch.
-func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) {
+func newPersistentDB(path string) (*DB, error) {
opts := &opt.Options{OpenFilesCacheCapacity: 5}
db, err := leveldb.OpenFile(path, opts)
if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted {
@@ -101,13 +95,13 @@ func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error)
// The nodes contained in the cache correspond to a certain protocol version.
// Flush all nodes if the version doesn't match.
currentVer := make([]byte, binary.MaxVarintLen64)
- currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))]
+ currentVer = currentVer[:binary.PutVarint(currentVer, int64(dbVersion))]
- blob, err := db.Get(nodeDBVersionKey, nil)
+ blob, err := db.Get([]byte(dbVersionKey), nil)
switch err {
case leveldb.ErrNotFound:
// Version not found (i.e. empty cache), insert it
- if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil {
+ if err := db.Put([]byte(dbVersionKey), currentVer, nil); err != nil {
db.Close()
return nil, err
}
@@ -119,42 +113,37 @@ func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error)
if err = os.RemoveAll(path); err != nil {
return nil, err
}
- return newPersistentNodeDB(path, version, self)
+ return newPersistentDB(path)
}
}
- return &nodeDB{
- lvl: db,
- self: self,
- quit: make(chan struct{}),
- }, nil
+ return &DB{lvl: db, quit: make(chan struct{})}, nil
}
// makeKey generates the leveldb key-blob from a node id and its particular
// field of interest.
-func makeKey(id NodeID, field string) []byte {
- if bytes.Equal(id[:], nodeDBNilNodeID[:]) {
+func makeKey(id ID, field string) []byte {
+ if (id == ID{}) {
return []byte(field)
}
- return append(nodeDBItemPrefix, append(id[:], field...)...)
+ return append([]byte(dbItemPrefix), append(id[:], field...)...)
}
// splitKey tries to split a database key into a node id and a field part.
-func splitKey(key []byte) (id NodeID, field string) {
+func splitKey(key []byte) (id ID, field string) {
// If the key is not of a node, return it plainly
- if !bytes.HasPrefix(key, nodeDBItemPrefix) {
- return NodeID{}, string(key)
+ if !bytes.HasPrefix(key, []byte(dbItemPrefix)) {
+ return ID{}, string(key)
}
// Otherwise split the id and field
- item := key[len(nodeDBItemPrefix):]
+ item := key[len(dbItemPrefix):]
copy(id[:], item[:len(id)])
field = string(item[len(id):])
return id, field
}
-// fetchInt64 retrieves an integer instance associated with a particular
-// database key.
-func (db *nodeDB) fetchInt64(key []byte) int64 {
+// fetchInt64 retrieves an integer associated with a particular key.
+func (db *DB) fetchInt64(key []byte) int64 {
blob, err := db.lvl.Get(key, nil)
if err != nil {
return 0
@@ -166,41 +155,80 @@ func (db *nodeDB) fetchInt64(key []byte) int64 {
return val
}
-// storeInt64 update a specific database entry to the current time instance as a
-// unix timestamp.
-func (db *nodeDB) storeInt64(key []byte, n int64) error {
+// storeInt64 stores an integer in the given key.
+func (db *DB) storeInt64(key []byte, n int64) error {
blob := make([]byte, binary.MaxVarintLen64)
blob = blob[:binary.PutVarint(blob, n)]
+ return db.lvl.Put(key, blob, nil)
+}
+// fetchUint64 retrieves an integer associated with a particular key.
+func (db *DB) fetchUint64(key []byte) uint64 {
+ blob, err := db.lvl.Get(key, nil)
+ if err != nil {
+ return 0
+ }
+ val, _ := binary.Uvarint(blob)
+ return val
+}
+
+// storeUint64 stores an integer in the given key.
+func (db *DB) storeUint64(key []byte, n uint64) error {
+ blob := make([]byte, binary.MaxVarintLen64)
+ blob = blob[:binary.PutUvarint(blob, n)]
return db.lvl.Put(key, blob, nil)
}
-// node retrieves a node with a given id from the database.
-func (db *nodeDB) node(id NodeID) *Node {
- blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil)
+// Node retrieves a node with a given id from the database.
+func (db *DB) Node(id ID) *Node {
+ blob, err := db.lvl.Get(makeKey(id, dbDiscoverRoot), nil)
if err != nil {
return nil
}
+ return mustDecodeNode(id[:], blob)
+}
+
+func mustDecodeNode(id, data []byte) *Node {
node := new(Node)
- if err := rlp.DecodeBytes(blob, node); err != nil {
- log.Error("Failed to decode node RLP", "err", err)
- return nil
+ if err := rlp.DecodeBytes(data, &node.r); err != nil {
+ panic(fmt.Errorf("p2p/enode: can't decode node %x in DB: %v", id, err))
}
- node.sha = crypto.Keccak256Hash(node.ID[:])
+ // Restore node id cache.
+ copy(node.id[:], id)
return node
}
-// updateNode inserts - potentially overwriting - a node into the peer database.
-func (db *nodeDB) updateNode(node *Node) error {
- blob, err := rlp.EncodeToBytes(node)
+// UpdateNode inserts - potentially overwriting - a node into the peer database.
+func (db *DB) UpdateNode(node *Node) error {
+ if node.Seq() < db.NodeSeq(node.ID()) {
+ return nil
+ }
+ blob, err := rlp.EncodeToBytes(&node.r)
if err != nil {
return err
}
- return db.lvl.Put(makeKey(node.ID, nodeDBDiscoverRoot), blob, nil)
+ if err := db.lvl.Put(makeKey(node.ID(), dbDiscoverRoot), blob, nil); err != nil {
+ return err
+ }
+ return db.storeUint64(makeKey(node.ID(), dbDiscoverSeq), node.Seq())
+}
+
+// NodeSeq returns the stored record sequence number of the given node.
+func (db *DB) NodeSeq(id ID) uint64 {
+ return db.fetchUint64(makeKey(id, dbDiscoverSeq))
+}
+
+// Resolve returns the stored record of the node if it has a larger sequence
+// number than n.
+func (db *DB) Resolve(n *Node) *Node {
+ if n.Seq() > db.NodeSeq(n.ID()) {
+ return n
+ }
+ return db.Node(n.ID())
}
-// deleteNode deletes all information/keys associated with a node.
-func (db *nodeDB) deleteNode(id NodeID) error {
+// DeleteNode deletes all information/keys associated with a node.
+func (db *DB) DeleteNode(id ID) error {
deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil)
for deleter.Next() {
if err := db.lvl.Delete(deleter.Key(), nil); err != nil {
@@ -219,14 +247,14 @@ func (db *nodeDB) deleteNode(id NodeID) error {
// it would require significant overhead to exactly trace the first successful
// convergence, it's simpler to "ensure" the correct state when an appropriate
// condition occurs (i.e. a successful bonding), and discard further events.
-func (db *nodeDB) ensureExpirer() {
+func (db *DB) ensureExpirer() {
db.runner.Do(func() { go db.expirer() })
}
// expirer should be started in a go routine, and is responsible for looping ad
// infinitum and dropping stale data from the database.
-func (db *nodeDB) expirer() {
- tick := time.NewTicker(nodeDBCleanupCycle)
+func (db *DB) expirer() {
+ tick := time.NewTicker(dbCleanupCycle)
defer tick.Stop()
for {
select {
@@ -242,8 +270,8 @@ func (db *nodeDB) expirer() {
// expireNodes iterates over the database and deletes all nodes that have not
// been seen (i.e. received a pong from) for some allotted time.
-func (db *nodeDB) expireNodes() error {
- threshold := time.Now().Add(-nodeDBNodeExpiration)
+func (db *DB) expireNodes() error {
+ threshold := time.Now().Add(-dbNodeExpiration)
// Find discovered nodes that are older than the allowance
it := db.lvl.NewIterator(nil, nil)
@@ -252,65 +280,70 @@ func (db *nodeDB) expireNodes() error {
for it.Next() {
// Skip the item if not a discovery node
id, field := splitKey(it.Key())
- if field != nodeDBDiscoverRoot {
+ if field != dbDiscoverRoot {
continue
}
// Skip the node if not expired yet (and not self)
- if !bytes.Equal(id[:], db.self[:]) {
- if seen := db.bondTime(id); seen.After(threshold) {
- continue
- }
+ if seen := db.LastPongReceived(id); seen.After(threshold) {
+ continue
}
// Otherwise delete all associated information
- db.deleteNode(id)
+ db.DeleteNode(id)
}
return nil
}
-// lastPing retrieves the time of the last ping packet send to a remote node,
-// requesting binding.
-func (db *nodeDB) lastPing(id NodeID) time.Time {
- return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0)
+// LastPingReceived retrieves the time of the last ping packet received from
+// a remote node.
+func (db *DB) LastPingReceived(id ID) time.Time {
+ return time.Unix(db.fetchInt64(makeKey(id, dbDiscoverPing)), 0)
+}
+
+// UpdateLastPingReceived updates the last time we tried contacting a remote node.
+func (db *DB) UpdateLastPingReceived(id ID, instance time.Time) error {
+ return db.storeInt64(makeKey(id, dbDiscoverPing), instance.Unix())
}
-// updateLastPing updates the last time we tried contacting a remote node.
-func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error {
- return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix())
+// LastPongReceived retrieves the time of the last successful pong from remote node.
+func (db *DB) LastPongReceived(id ID) time.Time {
+ // Launch expirer
+ db.ensureExpirer()
+ return time.Unix(db.fetchInt64(makeKey(id, dbDiscoverPong)), 0)
}
-// bondTime retrieves the time of the last successful pong from remote node.
-func (db *nodeDB) bondTime(id NodeID) time.Time {
- return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0)
+// UpdateLastPongReceived updates the last pong time of a node.
+func (db *DB) UpdateLastPongReceived(id ID, instance time.Time) error {
+ return db.storeInt64(makeKey(id, dbDiscoverPong), instance.Unix())
}
-// hasBond reports whether the given node is considered bonded.
-func (db *nodeDB) hasBond(id NodeID) bool {
- return time.Since(db.bondTime(id)) < nodeDBNodeExpiration
+// FindFails retrieves the number of findnode failures since bonding.
+func (db *DB) FindFails(id ID) int {
+ return int(db.fetchInt64(makeKey(id, dbDiscoverFindFails)))
}
-// updateBondTime updates the last pong time of a node.
-func (db *nodeDB) updateBondTime(id NodeID, instance time.Time) error {
- return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix())
+// UpdateFindFails updates the number of findnode failures since bonding.
+func (db *DB) UpdateFindFails(id ID, fails int) error {
+ return db.storeInt64(makeKey(id, dbDiscoverFindFails), int64(fails))
}
-// findFails retrieves the number of findnode failures since bonding.
-func (db *nodeDB) findFails(id NodeID) int {
- return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails)))
+// LocalSeq retrieves the local record sequence counter.
+func (db *DB) localSeq(id ID) uint64 {
+ return db.fetchUint64(makeKey(id, dbLocalSeq))
}
-// updateFindFails updates the number of findnode failures since bonding.
-func (db *nodeDB) updateFindFails(id NodeID, fails int) error {
- return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails))
+// storeLocalSeq stores the local record sequence counter.
+func (db *DB) storeLocalSeq(id ID, n uint64) {
+ db.storeUint64(makeKey(id, dbLocalSeq), n)
}
-// querySeeds retrieves random nodes to be used as potential seed nodes
+// QuerySeeds retrieves random nodes to be used as potential seed nodes
// for bootstrapping.
-func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node {
+func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node {
var (
now = time.Now()
nodes = make([]*Node, 0, n)
it = db.lvl.NewIterator(nil, nil)
- id NodeID
+ id ID
)
defer it.Release()
@@ -322,21 +355,18 @@ seek:
ctr := id[0]
rand.Read(id[:])
id[0] = ctr + id[0]%16
- it.Seek(makeKey(id, nodeDBDiscoverRoot))
+ it.Seek(makeKey(id, dbDiscoverRoot))
n := nextNode(it)
if n == nil {
id[0] = 0
continue seek // iterator exhausted
}
- if n.ID == db.self {
- continue seek
- }
- if now.Sub(db.bondTime(n.ID)) > maxAge {
+ if now.Sub(db.LastPongReceived(n.ID())) > maxAge {
continue seek
}
for i := range nodes {
- if nodes[i].ID == n.ID {
+ if nodes[i].ID() == n.ID() {
continue seek // duplicate
}
}
@@ -350,21 +380,16 @@ seek:
func nextNode(it iterator.Iterator) *Node {
for end := false; !end; end = !it.Next() {
id, field := splitKey(it.Key())
- if field != nodeDBDiscoverRoot {
- continue
- }
- var n Node
- if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
- log.Warn("Failed to decode node RLP", "id", id, "err", err)
+ if field != dbDiscoverRoot {
continue
}
- return &n
+ return mustDecodeNode(id[:], it.Value())
}
return nil
}
// close flushes and closes the database files.
-func (db *nodeDB) close() {
+func (db *DB) Close() {
close(db.quit)
db.lvl.Close()
}
diff --git a/p2p/discover/database_test.go b/p2p/enode/nodedb_test.go
similarity index 54%
rename from p2p/discover/database_test.go
rename to p2p/enode/nodedb_test.go
index 6f452a060ceb..59e9533fbeb4 100644
--- a/p2p/discover/database_test.go
+++ b/p2p/enode/nodedb_test.go
@@ -14,10 +14,12 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package discover
+package enode
import (
"bytes"
+ "fmt"
+ "io/ioutil"
"net"
"os"
"path/filepath"
@@ -27,24 +29,21 @@ import (
)
var nodeDBKeyTests = []struct {
- id NodeID
+ id ID
field string
key []byte
}{
{
- id: NodeID{},
+ id: ID{},
field: "version",
key: []byte{0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e}, // field
},
{
- id: MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ id: HexID("51232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
field: ":discover",
- key: []byte{0x6e, 0x3a, // prefix
- 0x1d, 0xd9, 0xd6, 0x5c, 0x45, 0x52, 0xb5, 0xeb, // node id
- 0x43, 0xd5, 0xad, 0x55, 0xa2, 0xee, 0x3f, 0x56, //
- 0xc6, 0xcb, 0xc1, 0xc6, 0x4a, 0x5c, 0x8d, 0x65, //
- 0x9f, 0x51, 0xfc, 0xd5, 0x1b, 0xac, 0xe2, 0x43, //
- 0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, //
+ key: []byte{
+ 0x6e, 0x3a, // prefix
+ 0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, // node id
0x2b, 0x29, 0xb5, 0x4b, 0x81, 0xcd, 0xef, 0xb9, //
0xb3, 0xe9, 0xc3, 0x7d, 0x7f, 0xd5, 0xf6, 0x32, //
0x70, 0xbc, 0xc9, 0xe1, 0xa6, 0xf6, 0xa4, 0x39, //
@@ -53,7 +52,7 @@ var nodeDBKeyTests = []struct {
},
}
-func TestNodeDBKeys(t *testing.T) {
+func TestDBKeys(t *testing.T) {
for i, tt := range nodeDBKeyTests {
if key := makeKey(tt.id, tt.field); !bytes.Equal(key, tt.key) {
t.Errorf("make test %d: key mismatch: have 0x%x, want 0x%x", i, key, tt.key)
@@ -77,9 +76,9 @@ var nodeDBInt64Tests = []struct {
{key: []byte{0x03}, value: 3},
}
-func TestNodeDBInt64(t *testing.T) {
- db, _ := newNodeDB("", Version, NodeID{})
- defer db.close()
+func TestDBInt64(t *testing.T) {
+ db, _ := OpenDB("")
+ defer db.Close()
tests := nodeDBInt64Tests
for i := 0; i < len(tests); i++ {
@@ -100,9 +99,9 @@ func TestNodeDBInt64(t *testing.T) {
}
}
-func TestNodeDBFetchStore(t *testing.T) {
- node := NewNode(
- MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+func TestDBFetchStore(t *testing.T) {
+ node := NewV4(
+ hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.IP{192, 168, 0, 1},
30303,
30303,
@@ -110,47 +109,47 @@ func TestNodeDBFetchStore(t *testing.T) {
inst := time.Now()
num := 314
- db, _ := newNodeDB("", Version, NodeID{})
- defer db.close()
+ db, _ := OpenDB("")
+ defer db.Close()
// Check fetch/store operations on a node ping object
- if stored := db.lastPing(node.ID); stored.Unix() != 0 {
+ if stored := db.LastPingReceived(node.ID()); stored.Unix() != 0 {
t.Errorf("ping: non-existing object: %v", stored)
}
- if err := db.updateLastPing(node.ID, inst); err != nil {
+ if err := db.UpdateLastPingReceived(node.ID(), inst); err != nil {
t.Errorf("ping: failed to update: %v", err)
}
- if stored := db.lastPing(node.ID); stored.Unix() != inst.Unix() {
+ if stored := db.LastPingReceived(node.ID()); stored.Unix() != inst.Unix() {
t.Errorf("ping: value mismatch: have %v, want %v", stored, inst)
}
// Check fetch/store operations on a node pong object
- if stored := db.bondTime(node.ID); stored.Unix() != 0 {
+ if stored := db.LastPongReceived(node.ID()); stored.Unix() != 0 {
t.Errorf("pong: non-existing object: %v", stored)
}
- if err := db.updateBondTime(node.ID, inst); err != nil {
+ if err := db.UpdateLastPongReceived(node.ID(), inst); err != nil {
t.Errorf("pong: failed to update: %v", err)
}
- if stored := db.bondTime(node.ID); stored.Unix() != inst.Unix() {
+ if stored := db.LastPongReceived(node.ID()); stored.Unix() != inst.Unix() {
t.Errorf("pong: value mismatch: have %v, want %v", stored, inst)
}
// Check fetch/store operations on a node findnode-failure object
- if stored := db.findFails(node.ID); stored != 0 {
+ if stored := db.FindFails(node.ID()); stored != 0 {
t.Errorf("find-node fails: non-existing object: %v", stored)
}
- if err := db.updateFindFails(node.ID, num); err != nil {
+ if err := db.UpdateFindFails(node.ID(), num); err != nil {
t.Errorf("find-node fails: failed to update: %v", err)
}
- if stored := db.findFails(node.ID); stored != num {
+ if stored := db.FindFails(node.ID()); stored != num {
t.Errorf("find-node fails: value mismatch: have %v, want %v", stored, num)
}
// Check fetch/store operations on an actual node object
- if stored := db.node(node.ID); stored != nil {
+ if stored := db.Node(node.ID()); stored != nil {
t.Errorf("node: non-existing object: %v", stored)
}
- if err := db.updateNode(node); err != nil {
+ if err := db.UpdateNode(node); err != nil {
t.Errorf("node: failed to update: %v", err)
}
- if stored := db.node(node.ID); stored == nil {
+ if stored := db.Node(node.ID()); stored == nil {
t.Errorf("node: not found")
} else if !reflect.DeepEqual(stored, node) {
t.Errorf("node: data mismatch: have %v, want %v", stored, node)
@@ -164,8 +163,8 @@ var nodeDBSeedQueryNodes = []struct {
// This one should not be in the result set because its last
// pong time is too far in the past.
{
- node: NewNode(
- MustHexID("0x84d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ node: NewV4(
+ hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.IP{127, 0, 0, 3},
30303,
30303,
@@ -175,8 +174,8 @@ var nodeDBSeedQueryNodes = []struct {
// This one shouldn't be in in the result set because its
// nodeID is the local node's ID.
{
- node: NewNode(
- MustHexID("0x57d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ node: NewV4(
+ hexPubkey("ff93ff820abacd4351b0f14e47b324bc82ff014c226f3f66a53535734a3c150e7e38ca03ef0964ba55acddc768f5e99cd59dea95ddd4defbab1339c92fa319b2"),
net.IP{127, 0, 0, 3},
30303,
30303,
@@ -186,8 +185,8 @@ var nodeDBSeedQueryNodes = []struct {
// These should be in the result set.
{
- node: NewNode(
- MustHexID("0x22d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ node: NewV4(
+ hexPubkey("c2b5eb3f5dde05f815b63777809ee3e7e0cbb20035a6b00ce327191e6eaa8f26a8d461c9112b7ab94698e7361fa19fd647e603e73239002946d76085b6f928d6"),
net.IP{127, 0, 0, 1},
30303,
30303,
@@ -195,8 +194,8 @@ var nodeDBSeedQueryNodes = []struct {
pong: time.Now().Add(-2 * time.Second),
},
{
- node: NewNode(
- MustHexID("0x44d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ node: NewV4(
+ hexPubkey("6ca1d400c8ddf8acc94bcb0dd254911ad71a57bed5e0ae5aa205beed59b28c2339908e97990c493499613cff8ecf6c3dc7112a8ead220cdcd00d8847ca3db755"),
net.IP{127, 0, 0, 2},
30303,
30303,
@@ -204,57 +203,92 @@ var nodeDBSeedQueryNodes = []struct {
pong: time.Now().Add(-3 * time.Second),
},
{
- node: NewNode(
- MustHexID("0xe2d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ node: NewV4(
+ hexPubkey("234dc63fe4d131212b38236c4c3411288d7bec61cbf7b120ff12c43dc60c96182882f4291d209db66f8a38e986c9c010ff59231a67f9515c7d1668b86b221a47"),
net.IP{127, 0, 0, 3},
30303,
30303,
),
pong: time.Now().Add(-1 * time.Second),
},
+ {
+ node: NewV4(
+ hexPubkey("c013a50b4d1ebce5c377d8af8cb7114fd933ffc9627f96ad56d90fef5b7253ec736fd07ef9a81dc2955a997e54b7bf50afd0aa9f110595e2bec5bb7ce1657004"),
+ net.IP{127, 0, 0, 3},
+ 30303,
+ 30303,
+ ),
+ pong: time.Now().Add(-2 * time.Second),
+ },
+ {
+ node: NewV4(
+ hexPubkey("f141087e3e08af1aeec261ff75f48b5b1637f594ea9ad670e50051646b0416daa3b134c28788cbe98af26992a47652889cd8577ccc108ac02c6a664db2dc1283"),
+ net.IP{127, 0, 0, 3},
+ 30303,
+ 30303,
+ ),
+ pong: time.Now().Add(-2 * time.Second),
+ },
+}
+
+func TestDBSeedQuery(t *testing.T) {
+ // Querying seeds uses seeks an might not find all nodes
+ // every time when the database is small. Run the test multiple
+ // times to avoid flakes.
+ const attempts = 15
+ var err error
+ for i := 0; i < attempts; i++ {
+ if err = testSeedQuery(); err == nil {
+ return
+ }
+ }
+ if err != nil {
+ t.Errorf("no successful run in %d attempts: %v", attempts, err)
+ }
}
-func TestNodeDBSeedQuery(t *testing.T) {
- db, _ := newNodeDB("", Version, nodeDBSeedQueryNodes[1].node.ID)
- defer db.close()
+func testSeedQuery() error {
+ db, _ := OpenDB("")
+ defer db.Close()
// Insert a batch of nodes for querying
for i, seed := range nodeDBSeedQueryNodes {
- if err := db.updateNode(seed.node); err != nil {
- t.Fatalf("node %d: failed to insert: %v", i, err)
+ if err := db.UpdateNode(seed.node); err != nil {
+ return fmt.Errorf("node %d: failed to insert: %v", i, err)
}
- if err := db.updateBondTime(seed.node.ID, seed.pong); err != nil {
- t.Fatalf("node %d: failed to insert bondTime: %v", i, err)
+ if err := db.UpdateLastPongReceived(seed.node.ID(), seed.pong); err != nil {
+ return fmt.Errorf("node %d: failed to insert bondTime: %v", i, err)
}
}
// Retrieve the entire batch and check for duplicates
- seeds := db.querySeeds(len(nodeDBSeedQueryNodes)*2, time.Hour)
- have := make(map[NodeID]struct{})
+ seeds := db.QuerySeeds(len(nodeDBSeedQueryNodes)*2, time.Hour)
+ have := make(map[ID]struct{})
for _, seed := range seeds {
- have[seed.ID] = struct{}{}
+ have[seed.ID()] = struct{}{}
}
- want := make(map[NodeID]struct{})
- for _, seed := range nodeDBSeedQueryNodes[2:] {
- want[seed.node.ID] = struct{}{}
+ want := make(map[ID]struct{})
+ for _, seed := range nodeDBSeedQueryNodes[1:] {
+ want[seed.node.ID()] = struct{}{}
}
if len(seeds) != len(want) {
- t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want))
+ return fmt.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want))
}
for id := range have {
if _, ok := want[id]; !ok {
- t.Errorf("extra seed: %v", id)
+ return fmt.Errorf("extra seed: %v", id)
}
}
for id := range want {
if _, ok := have[id]; !ok {
- t.Errorf("missing seed: %v", id)
+ return fmt.Errorf("missing seed: %v", id)
}
}
+ return nil
}
-func TestNodeDBPersistency(t *testing.T) {
- root, err := os.MkdirTemp("", "nodedb-")
+func TestDBPersistency(t *testing.T) {
+ root, err := ioutil.TempDir("", "nodedb-")
if err != nil {
t.Fatalf("failed to create temporary data folder: %v", err)
}
@@ -266,34 +300,24 @@ func TestNodeDBPersistency(t *testing.T) {
)
// Create a persistent database and store some values
- db, err := newNodeDB(filepath.Join(root, "database"), Version, NodeID{})
+ db, err := OpenDB(filepath.Join(root, "database"))
if err != nil {
t.Fatalf("failed to create persistent database: %v", err)
}
if err := db.storeInt64(testKey, testInt); err != nil {
t.Fatalf("failed to store value: %v.", err)
}
- db.close()
+ db.Close()
// Reopen the database and check the value
- db, err = newNodeDB(filepath.Join(root, "database"), Version, NodeID{})
+ db, err = OpenDB(filepath.Join(root, "database"))
if err != nil {
t.Fatalf("failed to open persistent database: %v", err)
}
if val := db.fetchInt64(testKey); val != testInt {
t.Fatalf("value mismatch: have %v, want %v", val, testInt)
}
- db.close()
-
- // Change the database version and check flush
- db, err = newNodeDB(filepath.Join(root, "database"), Version+1, NodeID{})
- if err != nil {
- t.Fatalf("failed to open persistent database: %v", err)
- }
- if val := db.fetchInt64(testKey); val != 0 {
- t.Fatalf("value mismatch: have %v, want %v", val, 0)
- }
- db.close()
+ db.Close()
}
var nodeDBExpirationNodes = []struct {
@@ -302,36 +326,36 @@ var nodeDBExpirationNodes = []struct {
exp bool
}{
{
- node: NewNode(
- MustHexID("0x01d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ node: NewV4(
+ hexPubkey("8d110e2ed4b446d9b5fb50f117e5f37fb7597af455e1dab0e6f045a6eeaa786a6781141659020d38bdc5e698ed3d4d2bafa8b5061810dfa63e8ac038db2e9b67"),
net.IP{127, 0, 0, 1},
30303,
30303,
),
- pong: time.Now().Add(-nodeDBNodeExpiration + time.Minute),
+ pong: time.Now().Add(-dbNodeExpiration + time.Minute),
exp: false,
}, {
- node: NewNode(
- MustHexID("0x02d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ node: NewV4(
+ hexPubkey("913a205579c32425b220dfba999d215066e5bdbf900226b11da1907eae5e93eb40616d47412cf819664e9eacbdfcca6b0c6e07e09847a38472d4be46ab0c3672"),
net.IP{127, 0, 0, 2},
30303,
30303,
),
- pong: time.Now().Add(-nodeDBNodeExpiration - time.Minute),
+ pong: time.Now().Add(-dbNodeExpiration - time.Minute),
exp: true,
},
}
-func TestNodeDBExpiration(t *testing.T) {
- db, _ := newNodeDB("", Version, NodeID{})
- defer db.close()
+func TestDBExpiration(t *testing.T) {
+ db, _ := OpenDB("")
+ defer db.Close()
// Add all the test nodes and set their last pong time
for i, seed := range nodeDBExpirationNodes {
- if err := db.updateNode(seed.node); err != nil {
+ if err := db.UpdateNode(seed.node); err != nil {
t.Fatalf("node %d: failed to insert: %v", i, err)
}
- if err := db.updateBondTime(seed.node.ID, seed.pong); err != nil {
+ if err := db.UpdateLastPongReceived(seed.node.ID(), seed.pong); err != nil {
t.Fatalf("node %d: failed to update bondTime: %v", i, err)
}
}
@@ -340,40 +364,9 @@ func TestNodeDBExpiration(t *testing.T) {
t.Fatalf("failed to expire nodes: %v", err)
}
for i, seed := range nodeDBExpirationNodes {
- node := db.node(seed.node.ID)
+ node := db.Node(seed.node.ID())
if (node == nil && !seed.exp) || (node != nil && seed.exp) {
t.Errorf("node %d: expiration mismatch: have %v, want %v", i, node, seed.exp)
}
}
}
-
-func TestNodeDBSelfExpiration(t *testing.T) {
- // Find a node in the tests that shouldn't expire, and assign it as self
- var self NodeID
- for _, node := range nodeDBExpirationNodes {
- if !node.exp {
- self = node.node.ID
- break
- }
- }
- db, _ := newNodeDB("", Version, self)
- defer db.close()
-
- // Add all the test nodes and set their last pong time
- for i, seed := range nodeDBExpirationNodes {
- if err := db.updateNode(seed.node); err != nil {
- t.Fatalf("node %d: failed to insert: %v", i, err)
- }
- if err := db.updateBondTime(seed.node.ID, seed.pong); err != nil {
- t.Fatalf("node %d: failed to update bondTime: %v", i, err)
- }
- }
- // Expire the nodes and make sure self has been evacuated too
- if err := db.expireNodes(); err != nil {
- t.Fatalf("failed to expire nodes: %v", err)
- }
- node := db.node(self)
- if node != nil {
- t.Errorf("self not evacuated")
- }
-}
diff --git a/p2p/enode/urlv4.go b/p2p/enode/urlv4.go
new file mode 100644
index 000000000000..659b9f3e0ea4
--- /dev/null
+++ b/p2p/enode/urlv4.go
@@ -0,0 +1,194 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package enode
+
+import (
+ "crypto/ecdsa"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "regexp"
+ "strconv"
+
+ "github.com/XinFinOrg/XDPoSChain/common/math"
+ "github.com/XinFinOrg/XDPoSChain/crypto"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
+)
+
+var incompleteNodeURL = regexp.MustCompile("(?i)^(?:enode://)?([0-9a-f]+)$")
+
+// MustParseV4 parses a node URL. It panics if the URL is not valid.
+func MustParseV4(rawurl string) *Node {
+ n, err := ParseV4(rawurl)
+ if err != nil {
+ panic("invalid node URL: " + err.Error())
+ }
+ return n
+}
+
+// ParseV4 parses a node URL.
+//
+// There are two basic forms of node URLs:
+//
+// - incomplete nodes, which only have the public key (node ID)
+// - complete nodes, which contain the public key and IP/Port information
+//
+// For incomplete nodes, the designator must look like one of these
+//
+// enode://
+//
+//
+// For complete nodes, the node ID is encoded in the username portion
+// of the URL, separated from the host by an @ sign. The hostname can
+// only be given as an IP address, DNS domain names are not allowed.
+// The port in the host name section is the TCP listening port. If the
+// TCP and UDP (discovery) ports differ, the UDP port is specified as
+// query parameter "discport".
+//
+// In the following example, the node URL describes
+// a node with IP address 10.3.58.6, TCP listening port 30303
+// and UDP discovery port 30301.
+//
+// enode://@10.3.58.6:30303?discport=30301
+func ParseV4(rawurl string) (*Node, error) {
+ if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil {
+ id, err := parsePubkey(m[1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid node ID (%v)", err)
+ }
+ return NewV4(id, nil, 0, 0), nil
+ }
+ return parseComplete(rawurl)
+}
+
+// NewV4 creates a node from discovery v4 node information. The record
+// contained in the node has a zero-length signature.
+func NewV4(pubkey *ecdsa.PublicKey, ip net.IP, tcp, udp int) *Node {
+ var r enr.Record
+ if ip != nil {
+ r.Set(enr.IP(ip))
+ }
+ if udp != 0 {
+ r.Set(enr.UDP(udp))
+ }
+ if tcp != 0 {
+ r.Set(enr.TCP(tcp))
+ }
+ signV4Compat(&r, pubkey)
+ n, err := New(v4CompatID{}, &r)
+ if err != nil {
+ panic(err)
+ }
+ return n
+}
+
+func parseComplete(rawurl string) (*Node, error) {
+ var (
+ id *ecdsa.PublicKey
+ ip net.IP
+ tcpPort, udpPort uint64
+ )
+ u, err := url.Parse(rawurl)
+ if err != nil {
+ return nil, err
+ }
+ if u.Scheme != "enode" {
+ return nil, errors.New("invalid URL scheme, want \"enode\"")
+ }
+ // Parse the Node ID from the user portion.
+ if u.User == nil {
+ return nil, errors.New("does not contain node ID")
+ }
+ if id, err = parsePubkey(u.User.String()); err != nil {
+ return nil, fmt.Errorf("invalid node ID (%v)", err)
+ }
+ // Parse the IP address.
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ return nil, fmt.Errorf("invalid host: %v", err)
+ }
+ if ip = net.ParseIP(host); ip == nil {
+ return nil, errors.New("invalid IP address")
+ }
+ // Ensure the IP is 4 bytes long for IPv4 addresses.
+ if ipv4 := ip.To4(); ipv4 != nil {
+ ip = ipv4
+ }
+ // Parse the port numbers.
+ if tcpPort, err = strconv.ParseUint(port, 10, 16); err != nil {
+ return nil, errors.New("invalid port")
+ }
+ udpPort = tcpPort
+ qv := u.Query()
+ if qv.Get("discport") != "" {
+ udpPort, err = strconv.ParseUint(qv.Get("discport"), 10, 16)
+ if err != nil {
+ return nil, errors.New("invalid discport in query")
+ }
+ }
+ return NewV4(id, ip, int(tcpPort), int(udpPort)), nil
+}
+
+// parsePubkey parses a hex-encoded secp256k1 public key.
+func parsePubkey(in string) (*ecdsa.PublicKey, error) {
+ b, err := hex.DecodeString(in)
+ if err != nil {
+ return nil, err
+ } else if len(b) != 64 {
+ return nil, fmt.Errorf("wrong length, want %d hex chars", 128)
+ }
+ b = append([]byte{0x4}, b...)
+ return crypto.UnmarshalPubkey(b)
+}
+
+func (n *Node) v4URL() string {
+ var (
+ scheme enr.ID
+ nodeid string
+ key ecdsa.PublicKey
+ )
+ n.Load(&scheme)
+ n.Load((*Secp256k1)(&key))
+ switch {
+ case scheme == "v4" || key != ecdsa.PublicKey{}:
+ nodeid = fmt.Sprintf("%x", crypto.FromECDSAPub(&key)[1:])
+ default:
+ nodeid = fmt.Sprintf("%s.%x", scheme, n.id[:])
+ }
+ u := url.URL{Scheme: "enode"}
+ if n.Incomplete() {
+ u.Host = nodeid
+ } else {
+ addr := net.TCPAddr{IP: n.IP(), Port: n.TCP()}
+ u.User = url.User(nodeid)
+ u.Host = addr.String()
+ if n.UDP() != n.TCP() {
+ u.RawQuery = "discport=" + strconv.Itoa(n.UDP())
+ }
+ }
+ return u.String()
+}
+
+// PubkeyToIDV4 derives the v4 node address from the given public key.
+func PubkeyToIDV4(key *ecdsa.PublicKey) ID {
+ e := make([]byte, 64)
+ math.ReadBits(key.X, e[:len(e)/2])
+ math.ReadBits(key.Y, e[len(e)/2:])
+ return ID(crypto.Keccak256Hash(e))
+}
diff --git a/p2p/discover/node_test.go b/p2p/enode/urlv4_test.go
similarity index 51%
rename from p2p/discover/node_test.go
rename to p2p/enode/urlv4_test.go
index 846fca179ed0..163306027a08 100644
--- a/p2p/discover/node_test.go
+++ b/p2p/enode/urlv4_test.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The go-ethereum Authors
+// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -14,45 +14,19 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package discover
+package enode
import (
"bytes"
- "fmt"
+ "crypto/ecdsa"
"math/big"
- "math/rand"
"net"
"reflect"
"strings"
"testing"
"testing/quick"
- "time"
-
- "github.com/XinFinOrg/XDPoSChain/common"
- "github.com/XinFinOrg/XDPoSChain/crypto"
)
-func ExampleNewNode() {
- id := MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439")
-
- // Complete nodes contain UDP and TCP endpoints:
- n1 := NewNode(id, net.ParseIP("2001:db8:3c4d:15::abcd:ef12"), 52150, 30303)
- fmt.Println("n1:", n1)
- fmt.Println("n1.Incomplete() ->", n1.Incomplete())
-
- // An incomplete node can be created by passing zero values
- // for all parameters except id.
- n2 := NewNode(id, nil, 0, 0)
- fmt.Println("n2:", n2)
- fmt.Println("n2.Incomplete() ->", n2.Incomplete())
-
- // Output:
- // n1: enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:30303?discport=52150
- // n1.Incomplete() -> false
- // n2: enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439
- // n2.Incomplete() -> true
-}
-
var parseNodeTests = []struct {
rawurl string
wantError string
@@ -81,8 +55,8 @@ var parseNodeTests = []struct {
},
{
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150",
- wantResult: NewNode(
- MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ wantResult: NewV4(
+ hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.IP{0x7f, 0x0, 0x0, 0x1},
52150,
52150,
@@ -90,8 +64,8 @@ var parseNodeTests = []struct {
},
{
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[::]:52150",
- wantResult: NewNode(
- MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ wantResult: NewV4(
+ hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.ParseIP("::"),
52150,
52150,
@@ -99,8 +73,8 @@ var parseNodeTests = []struct {
},
{
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150",
- wantResult: NewNode(
- MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ wantResult: NewV4(
+ hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.ParseIP("2001:db8:3c4d:15::abcd:ef12"),
52150,
52150,
@@ -108,25 +82,25 @@ var parseNodeTests = []struct {
},
{
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150?discport=22334",
- wantResult: NewNode(
- MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ wantResult: NewV4(
+ hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
net.IP{0x7f, 0x0, 0x0, 0x1},
- 22334,
52150,
+ 22334,
),
},
// Incomplete nodes with no address.
{
rawurl: "1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439",
- wantResult: NewNode(
- MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ wantResult: NewV4(
+ hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
nil, 0, 0,
),
},
{
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439",
- wantResult: NewNode(
- MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
+ wantResult: NewV4(
+ hexPubkey("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"),
nil, 0, 0,
),
},
@@ -146,9 +120,17 @@ var parseNodeTests = []struct {
},
}
+func hexPubkey(h string) *ecdsa.PublicKey {
+ k, err := parsePubkey(h)
+ if err != nil {
+ panic(err)
+ }
+ return k
+}
+
func TestParseNode(t *testing.T) {
for _, test := range parseNodeTests {
- n, err := ParseNode(test.rawurl)
+ n, err := ParseV4(test.rawurl)
if test.wantError != "" {
if err == nil {
t.Errorf("test %q:\n got nil error, expected %#q", test.rawurl, test.wantError)
@@ -163,7 +145,7 @@ func TestParseNode(t *testing.T) {
continue
}
if !reflect.DeepEqual(n, test.wantResult) {
- t.Errorf("test %q:\n result mismatch:\ngot: %#v, want: %#v", test.rawurl, n, test.wantResult)
+ t.Errorf("test %q:\n result mismatch:\ngot: %#v\nwant: %#v", test.rawurl, n, test.wantResult)
}
}
}
@@ -181,9 +163,9 @@ func TestNodeString(t *testing.T) {
}
func TestHexID(t *testing.T) {
- ref := NodeID{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 106, 217, 182, 31, 165, 174, 1, 67, 7, 235, 220, 150, 66, 83, 173, 205, 159, 44, 10, 57, 42, 161, 26, 188}
- id1 := MustHexID("0x000000000000000000000000000000000000000000000000000000000000000000000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc")
- id2 := MustHexID("000000000000000000000000000000000000000000000000000000000000000000000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc")
+ ref := ID{0, 0, 0, 0, 0, 0, 0, 128, 106, 217, 182, 31, 165, 174, 1, 67, 7, 235, 220, 150, 66, 83, 173, 205, 159, 44, 10, 57, 42, 161, 26, 188}
+ id1 := HexID("0x00000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc")
+ id2 := HexID("00000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc")
if id1 != ref {
t.Errorf("wrong id1\ngot %v\nwant %v", id1[:], ref[:])
@@ -193,17 +175,14 @@ func TestHexID(t *testing.T) {
}
}
-func TestNodeID_textEncoding(t *testing.T) {
- ref := NodeID{
+func TestID_textEncoding(t *testing.T) {
+ ref := ID{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30,
- 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40,
- 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x50,
- 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x60,
- 0x61, 0x62, 0x63, 0x64,
+ 0x31, 0x32,
}
- hex := "01020304050607080910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364"
+ hex := "0102030405060708091011121314151617181920212223242526272829303132"
text, err := ref.MarshalText()
if err != nil {
@@ -213,7 +192,7 @@ func TestNodeID_textEncoding(t *testing.T) {
t.Fatalf("text encoding did not match\nexpected: %s\ngot: %s", hex, text)
}
- id := new(NodeID)
+ id := new(ID)
if err := id.UnmarshalText(text); err != nil {
t.Fatal(err)
}
@@ -222,114 +201,43 @@ func TestNodeID_textEncoding(t *testing.T) {
}
}
-func TestNodeID_recover(t *testing.T) {
- prv := newkey()
- hash := make([]byte, 32)
- sig, err := crypto.Sign(hash, prv)
- if err != nil {
- t.Fatalf("signing error: %v", err)
- }
-
- pub := PubkeyID(&prv.PublicKey)
- recpub, err := recoverNodeID(hash, sig)
- if err != nil {
- t.Fatalf("recovery error: %v", err)
- }
- if pub != recpub {
- t.Errorf("recovered wrong pubkey:\ngot: %v\nwant: %v", recpub, pub)
- }
-
- ecdsa, err := pub.Pubkey()
- if err != nil {
- t.Errorf("Pubkey error: %v", err)
- }
- if !reflect.DeepEqual(ecdsa, &prv.PublicKey) {
- t.Errorf("Pubkey mismatch:\n got: %#v\n want: %#v", ecdsa, &prv.PublicKey)
- }
-}
-
-func TestNodeID_pubkeyBad(t *testing.T) {
- ecdsa, err := NodeID{}.Pubkey()
- if err == nil {
- t.Error("expected error for zero ID")
- }
- if ecdsa != nil {
- t.Error("expected nil result")
- }
-}
-
func TestNodeID_distcmp(t *testing.T) {
- distcmpBig := func(target, a, b common.Hash) int {
+ distcmpBig := func(target, a, b ID) int {
tbig := new(big.Int).SetBytes(target[:])
abig := new(big.Int).SetBytes(a[:])
bbig := new(big.Int).SetBytes(b[:])
return new(big.Int).Xor(tbig, abig).Cmp(new(big.Int).Xor(tbig, bbig))
}
- if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg()); err != nil {
+ if err := quick.CheckEqual(DistCmp, distcmpBig, nil); err != nil {
t.Error(err)
}
}
-// the random tests is likely to miss the case where they're equal.
+// The random tests is likely to miss the case where a and b are equal,
+// this test checks it explicitly.
func TestNodeID_distcmpEqual(t *testing.T) {
- base := common.Hash{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
- x := common.Hash{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}
- if distcmp(base, x, x) != 0 {
- t.Errorf("distcmp(base, x, x) != 0")
+ base := ID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+ x := ID{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}
+ if DistCmp(base, x, x) != 0 {
+ t.Errorf("DistCmp(base, x, x) != 0")
}
}
func TestNodeID_logdist(t *testing.T) {
- logdistBig := func(a, b common.Hash) int {
+ logdistBig := func(a, b ID) int {
abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:])
return new(big.Int).Xor(abig, bbig).BitLen()
}
- if err := quick.CheckEqual(logdist, logdistBig, quickcfg()); err != nil {
+ if err := quick.CheckEqual(LogDist, logdistBig, nil); err != nil {
t.Error(err)
}
}
-// the random tests is likely to miss the case where they're equal.
+// The random tests is likely to miss the case where a and b are equal,
+// this test checks it explicitly.
func TestNodeID_logdistEqual(t *testing.T) {
- x := common.Hash{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
- if logdist(x, x) != 0 {
- t.Errorf("logdist(x, x) != 0")
- }
-}
-
-func TestNodeID_hashAtDistance(t *testing.T) {
- // we don't use quick.Check here because its output isn't
- // very helpful when the test fails.
- cfg := quickcfg()
- for i := 0; i < cfg.MaxCount; i++ {
- a := gen(common.Hash{}, cfg.Rand).(common.Hash)
- dist := cfg.Rand.Intn(len(common.Hash{}) * 8)
- result := hashAtDistance(a, dist)
- actualdist := logdist(result, a)
-
- if dist != actualdist {
- t.Log("a: ", a)
- t.Log("result:", result)
- t.Fatalf("#%d: distance of result is %d, want %d", i, actualdist, dist)
- }
- }
-}
-
-func quickcfg() *quick.Config {
- return &quick.Config{
- MaxCount: 5000,
- Rand: rand.New(rand.NewSource(time.Now().Unix())),
- }
-}
-
-// TODO: The Generate method can be dropped when we require Go >= 1.5
-// because testing/quick learned to generate arrays in 1.5.
-
-func (NodeID) Generate(rand *rand.Rand, size int) reflect.Value {
- var id NodeID
- m := rand.Intn(len(id))
- for i := len(id) - 1; i > m; i-- {
- id[i] = byte(rand.Uint32())
+ x := ID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+ if LogDist(x, x) != 0 {
+ t.Errorf("LogDist(x, x) != 0")
}
- return reflect.ValueOf(id)
}
diff --git a/p2p/enr/enr.go b/p2p/enr/enr.go
index 5aca3ab25a6c..27d9d4bbac9f 100644
--- a/p2p/enr/enr.go
+++ b/p2p/enr/enr.go
@@ -15,39 +15,42 @@
// along with the go-ethereum library. If not, see .
// Package enr implements Ethereum Node Records as defined in EIP-778. A node record holds
-// arbitrary information about a node on the peer-to-peer network.
-//
-// Records contain named keys. To store and retrieve key/values in a record, use the Entry
+// arbitrary information about a node on the peer-to-peer network. Node information is
+// stored in key/value pairs. To store and retrieve key/values in a record, use the Entry
// interface.
//
-// Records must be signed before transmitting them to another node. Decoding a record verifies
-// its signature. When creating a record, set the entries you want, then call Sign to add the
-// signature. Modifying a record invalidates the signature.
+// # Signature Handling
+//
+// Records must be signed before transmitting them to another node.
+//
+// Decoding a record doesn't check its signature. Code working with records from an
+// untrusted source must always verify two things: that the record uses an identity scheme
+// deemed secure, and that the signature is valid according to the declared scheme.
+//
+// When creating a record, set the entries you want and use a signing function provided by
+// the identity scheme to add the signature. Modifying a record invalidates the signature.
//
// Package enr supports the "secp256k1-keccak" identity scheme.
package enr
import (
"bytes"
- "crypto/ecdsa"
"errors"
"fmt"
"io"
"sort"
- "github.com/XinFinOrg/XDPoSChain/crypto"
- "github.com/XinFinOrg/XDPoSChain/crypto/sha3"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
const SizeLimit = 300 // maximum encoded size of a node record in bytes
-const ID_SECP256k1_KECCAK = ID("secp256k1-keccak") // the default identity scheme
-
var (
- errNoID = errors.New("unknown or unspecified identity scheme")
- errInvalidSigsize = errors.New("invalid signature size")
- errInvalidSig = errors.New("invalid signature")
+ // TODO: check if need below merge conflict
+ // errNoID = errors.New("unknown or unspecified identity scheme")
+ // errInvalidSigsize = errors.New("invalid signature size")
+ // errInvalidSig = errors.New("invalid signature")
+ ErrInvalidSig = errors.New("invalid signature on node record")
errNotSorted = errors.New("record key/value pairs are not sorted by key")
errDuplicateKey = errors.New("record contains duplicate key")
errIncompletePair = errors.New("record contains incomplete k/v pair")
@@ -56,6 +59,32 @@ var (
errNotFound = errors.New("no such key in record")
)
+// An IdentityScheme is capable of verifying record signatures and
+// deriving node addresses.
+type IdentityScheme interface {
+ Verify(r *Record, sig []byte) error
+ NodeAddr(r *Record) []byte
+}
+
+// SchemeMap is a registry of named identity schemes.
+type SchemeMap map[string]IdentityScheme
+
+func (m SchemeMap) Verify(r *Record, sig []byte) error {
+ s := m[r.IdentityScheme()]
+ if s == nil {
+ return ErrInvalidSig
+ }
+ return s.Verify(r, sig)
+}
+
+func (m SchemeMap) NodeAddr(r *Record) []byte {
+ s := m[r.IdentityScheme()]
+ if s == nil {
+ return nil
+ }
+ return s.NodeAddr(r)
+}
+
// Record represents a node record. The zero value is an empty record.
type Record struct {
seq uint64 // sequence number
@@ -70,19 +99,14 @@ type pair struct {
v rlp.RawValue
}
-// Signed reports whether the record has a valid signature.
-func (r *Record) Signed() bool {
- return r.signature != nil
-}
-
// Seq returns the sequence number.
func (r *Record) Seq() uint64 {
return r.seq
}
// SetSeq updates the record sequence number. This invalidates any signature on the record.
-// Calling SetSeq is usually not required because signing the redord increments the
-// sequence number.
+// Calling SetSeq is usually not required because setting any key in a signed record
+// increments the sequence number.
func (r *Record) SetSeq(s uint64) {
r.signature = nil
r.raw = nil
@@ -105,39 +129,48 @@ func (r *Record) Load(e Entry) error {
return &KeyError{Key: e.ENRKey(), Err: errNotFound}
}
-// Set adds or updates the given entry in the record.
-// It panics if the value can't be encoded.
+// Set adds or updates the given entry in the record. It panics if the value can't be
+// encoded. If the record is signed, Set increments the sequence number and invalidates
+// the sequence number.
func (r *Record) Set(e Entry) {
- r.signature = nil
- r.raw = nil
blob, err := rlp.EncodeToBytes(e)
if err != nil {
panic(fmt.Errorf("enr: can't encode %s: %v", e.ENRKey(), err))
}
+ r.invalidate()
- i := sort.Search(len(r.pairs), func(i int) bool { return r.pairs[i].k >= e.ENRKey() })
-
- if i < len(r.pairs) && r.pairs[i].k == e.ENRKey() {
+ pairs := make([]pair, len(r.pairs))
+ copy(pairs, r.pairs)
+ i := sort.Search(len(pairs), func(i int) bool { return pairs[i].k >= e.ENRKey() })
+ switch {
+ case i < len(pairs) && pairs[i].k == e.ENRKey():
// element is present at r.pairs[i]
- r.pairs[i].v = blob
- return
- } else if i < len(r.pairs) {
+ pairs[i].v = blob
+ case i < len(r.pairs):
// insert pair before i-th elem
el := pair{e.ENRKey(), blob}
- r.pairs = append(r.pairs, pair{})
- copy(r.pairs[i+1:], r.pairs[i:])
- r.pairs[i] = el
- return
+ pairs = append(pairs, pair{})
+ copy(pairs[i+1:], pairs[i:])
+ pairs[i] = el
+ default:
+ // element should be placed at the end of r.pairs
+ pairs = append(pairs, pair{e.ENRKey(), blob})
}
+ r.pairs = pairs
+}
- // element should be placed at the end of r.pairs
- r.pairs = append(r.pairs, pair{e.ENRKey(), blob})
+func (r *Record) invalidate() {
+ if r.signature != nil {
+ r.seq++
+ }
+ r.signature = nil
+ r.raw = nil
}
// EncodeRLP implements rlp.Encoder. Encoding fails if
// the record is unsigned.
func (r Record) EncodeRLP(w io.Writer) error {
- if !r.Signed() {
+ if r.signature == nil {
return errEncodeUnsigned
}
_, err := w.Write(r.raw)
@@ -146,25 +179,34 @@ func (r Record) EncodeRLP(w io.Writer) error {
// DecodeRLP implements rlp.Decoder. Decoding verifies the signature.
func (r *Record) DecodeRLP(s *rlp.Stream) error {
- raw, err := s.Raw()
+ dec, raw, err := decodeRecord(s)
if err != nil {
return err
}
+ *r = dec
+ r.raw = raw
+ return nil
+}
+
+func decodeRecord(s *rlp.Stream) (dec Record, raw []byte, err error) {
+ raw, err = s.Raw()
+ if err != nil {
+ return dec, raw, err
+ }
if len(raw) > SizeLimit {
- return errTooBig
+ return dec, raw, errTooBig
}
// Decode the RLP container.
- dec := Record{raw: raw}
s = rlp.NewStream(bytes.NewReader(raw), 0)
if _, err := s.List(); err != nil {
- return err
+ return dec, raw, err
}
if err = s.Decode(&dec.signature); err != nil {
- return err
+ return dec, raw, err
}
if err = s.Decode(&dec.seq); err != nil {
- return err
+ return dec, raw, err
}
// The rest of the record contains sorted k/v pairs.
var prevkey string
@@ -174,62 +216,73 @@ func (r *Record) DecodeRLP(s *rlp.Stream) error {
if err == rlp.EOL {
break
}
- return err
+ return dec, raw, err
}
if err := s.Decode(&kv.v); err != nil {
if err == rlp.EOL {
- return errIncompletePair
+ return dec, raw, errIncompletePair
}
- return err
+ return dec, raw, err
}
if i > 0 {
if kv.k == prevkey {
- return errDuplicateKey
+ return dec, raw, errDuplicateKey
}
if kv.k < prevkey {
- return errNotSorted
+ return dec, raw, errNotSorted
}
}
dec.pairs = append(dec.pairs, kv)
prevkey = kv.k
}
- if err := s.ListEnd(); err != nil {
- return err
- }
-
- // Verify signature.
- if err = dec.verifySignature(); err != nil {
- return err
- }
- *r = dec
- return nil
+ return dec, raw, s.ListEnd()
}
-type s256raw []byte
-
-func (s256raw) ENRKey() string { return "secp256k1" }
+// IdentityScheme returns the name of the identity scheme in the record.
+func (r *Record) IdentityScheme() string {
+ var id ID
+ r.Load(&id)
+ return string(id)
+}
-// NodeAddr returns the node address. The return value will be nil if the record is
-// unsigned.
-func (r *Record) NodeAddr() []byte {
- var entry s256raw
- if r.Load(&entry) != nil {
- return nil
- }
- return crypto.Keccak256(entry)
+// VerifySignature checks whether the record is signed using the given identity scheme.
+func (r *Record) VerifySignature(s IdentityScheme) error {
+ return s.Verify(r, r.signature)
}
-// Sign signs the record with the given private key. It updates the record's identity
-// scheme, public key and increments the sequence number. Sign returns an error if the
-// encoded record is larger than the size limit.
-func (r *Record) Sign(privkey *ecdsa.PrivateKey) error {
- r.seq = r.seq + 1
- r.Set(ID_SECP256k1_KECCAK)
- r.Set(Secp256k1(privkey.PublicKey))
- return r.signAndEncode(privkey)
+// SetSig sets the record signature. It returns an error if the encoded record is larger
+// than the size limit or if the signature is invalid according to the passed scheme.
+//
+// You can also use SetSig to remove the signature explicitly by passing a nil scheme
+// and signature.
+//
+// SetSig panics when either the scheme or the signature (but not both) are nil.
+func (r *Record) SetSig(s IdentityScheme, sig []byte) error {
+ switch {
+ // Prevent storing invalid data.
+ case s == nil && sig != nil:
+ panic("enr: invalid call to SetSig with non-nil signature but nil scheme")
+ case s != nil && sig == nil:
+ panic("enr: invalid call to SetSig with nil signature but non-nil scheme")
+ // Verify if we have a scheme.
+ case s != nil:
+ if err := s.Verify(r, sig); err != nil {
+ return err
+ }
+ raw, err := r.encode(sig)
+ if err != nil {
+ return err
+ }
+ r.signature, r.raw = sig, raw
+ // Reset otherwise.
+ default:
+ r.signature, r.raw = nil, nil
+ }
+ return nil
}
-func (r *Record) appendPairs(list []interface{}) []interface{} {
+// AppendElements appends the sequence number and entries to the given slice.
+func (r *Record) AppendElements(list []interface{}) []interface{} {
list = append(list, r.seq)
for _, p := range r.pairs {
list = append(list, p.k, p.v)
@@ -237,54 +290,15 @@ func (r *Record) appendPairs(list []interface{}) []interface{} {
return list
}
-func (r *Record) signAndEncode(privkey *ecdsa.PrivateKey) error {
- // Put record elements into a flat list. Leave room for the signature.
- list := make([]interface{}, 1, len(r.pairs)*2+2)
- list = r.appendPairs(list)
-
- // Sign the tail of the list.
- h := sha3.NewKeccak256()
- rlp.Encode(h, list[1:])
- sig, err := crypto.Sign(h.Sum(nil), privkey)
- if err != nil {
- return err
- }
- sig = sig[:len(sig)-1] // remove v
-
- // Put signature in front.
- r.signature, list[0] = sig, sig
- r.raw, err = rlp.EncodeToBytes(list)
- if err != nil {
- return err
- }
- if len(r.raw) > SizeLimit {
- return errTooBig
+func (r *Record) encode(sig []byte) (raw []byte, err error) {
+ list := make([]interface{}, 1, 2*len(r.pairs)+1)
+ list[0] = sig
+ list = r.AppendElements(list)
+ if raw, err = rlp.EncodeToBytes(list); err != nil {
+ return nil, err
}
- return nil
-}
-
-func (r *Record) verifySignature() error {
- // Get identity scheme, public key, signature.
- var id ID
- var entry s256raw
- if err := r.Load(&id); err != nil {
- return err
- } else if id != ID_SECP256k1_KECCAK {
- return errNoID
- }
- if err := r.Load(&entry); err != nil {
- return err
- } else if len(entry) != 33 {
- return errors.New("invalid public key")
- }
-
- // Verify the signature.
- list := make([]interface{}, 0, len(r.pairs)*2+1)
- list = r.appendPairs(list)
- h := sha3.NewKeccak256()
- rlp.Encode(h, list)
- if !crypto.VerifySignature(entry, h.Sum(nil), r.signature) {
- return errInvalidSig
+ if len(raw) > SizeLimit {
+ return nil, errTooBig
}
- return nil
+ return raw, nil
}
diff --git a/p2p/enr/enr_test.go b/p2p/enr/enr_test.go
index 41671c8b9363..12a72d1d1957 100644
--- a/p2p/enr/enr_test.go
+++ b/p2p/enr/enr_test.go
@@ -18,23 +18,17 @@ package enr
import (
"bytes"
- "encoding/hex"
+ "encoding/binary"
"fmt"
"math/rand"
"testing"
"time"
- "github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/rlp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-var (
- privkey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- pubkey = &privkey.PublicKey
-)
-
var rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
func randomString(strlen int) string {
@@ -54,63 +48,51 @@ func TestGetSetID(t *testing.T) {
assert.Equal(t, id, id2)
}
-// TestGetSetIP4 tests encoding/decoding and setting/getting of the IP4 key.
+// TestGetSetIP4 tests encoding/decoding and setting/getting of the IP key.
func TestGetSetIP4(t *testing.T) {
- ip := IP4{192, 168, 0, 3}
+ ip := IP{192, 168, 0, 3}
var r Record
r.Set(ip)
- var ip2 IP4
+ var ip2 IP
require.NoError(t, r.Load(&ip2))
assert.Equal(t, ip, ip2)
}
-// TestGetSetIP6 tests encoding/decoding and setting/getting of the IP6 key.
+// TestGetSetIP6 tests encoding/decoding and setting/getting of the IP key.
func TestGetSetIP6(t *testing.T) {
- ip := IP6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}
+ ip := IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}
var r Record
r.Set(ip)
- var ip2 IP6
+ var ip2 IP
require.NoError(t, r.Load(&ip2))
assert.Equal(t, ip, ip2)
}
-// TestGetSetDiscPort tests encoding/decoding and setting/getting of the DiscPort key.
-func TestGetSetDiscPort(t *testing.T) {
- port := DiscPort(30309)
+// TestGetSetUDP tests encoding/decoding and setting/getting of the UDP key.
+func TestGetSetUDP(t *testing.T) {
+ port := UDP(30309)
var r Record
r.Set(port)
- var port2 DiscPort
+ var port2 UDP
require.NoError(t, r.Load(&port2))
assert.Equal(t, port, port2)
}
-// TestGetSetSecp256k1 tests encoding/decoding and setting/getting of the Secp256k1 key.
-func TestGetSetSecp256k1(t *testing.T) {
- var r Record
- if err := r.Sign(privkey); err != nil {
- t.Fatal(err)
- }
-
- var pk Secp256k1
- require.NoError(t, r.Load(&pk))
- assert.EqualValues(t, pubkey, &pk)
-}
-
func TestLoadErrors(t *testing.T) {
var r Record
- ip4 := IP4{127, 0, 0, 1}
+ ip4 := IP{127, 0, 0, 1}
r.Set(ip4)
// Check error for missing keys.
- var ip6 IP6
- err := r.Load(&ip6)
+ var udp UDP
+ err := r.Load(&udp)
if !IsNotFound(err) {
t.Error("IsNotFound should return true for missing key")
}
- assert.Equal(t, &KeyError{Key: ip6.ENRKey(), Err: errNotFound}, err)
+ assert.Equal(t, &KeyError{Key: udp.ENRKey(), Err: errNotFound}, err)
// Check error for invalid keys.
var list []uint
@@ -167,40 +149,49 @@ func TestSortedGetAndSet(t *testing.T) {
func TestDirty(t *testing.T) {
var r Record
- if r.Signed() {
- t.Error("Signed returned true for zero record")
- }
if _, err := rlp.EncodeToBytes(r); err != errEncodeUnsigned {
t.Errorf("expected errEncodeUnsigned, got %#v", err)
}
- require.NoError(t, r.Sign(privkey))
- if !r.Signed() {
- t.Error("Signed return false for signed record")
+ require.NoError(t, signTest([]byte{5}, &r))
+ if len(r.signature) == 0 {
+ t.Error("record is not signed")
}
_, err := rlp.EncodeToBytes(r)
assert.NoError(t, err)
r.SetSeq(3)
- if r.Signed() {
- t.Error("Signed returned true for modified record")
+ if len(r.signature) != 0 {
+ t.Error("signature still set after modification")
}
if _, err := rlp.EncodeToBytes(r); err != errEncodeUnsigned {
t.Errorf("expected errEncodeUnsigned, got %#v", err)
}
}
+func TestSeq(t *testing.T) {
+ var r Record
+
+ assert.Equal(t, uint64(0), r.Seq())
+ r.Set(UDP(1))
+ assert.Equal(t, uint64(0), r.Seq())
+ signTest([]byte{5}, &r)
+ assert.Equal(t, uint64(0), r.Seq())
+ r.Set(UDP(2))
+ assert.Equal(t, uint64(1), r.Seq())
+}
+
// TestGetSetOverwrite tests value overwrite when setting a new value with an existing key in record.
func TestGetSetOverwrite(t *testing.T) {
var r Record
- ip := IP4{192, 168, 0, 3}
+ ip := IP{192, 168, 0, 3}
r.Set(ip)
- ip2 := IP4{192, 168, 0, 4}
+ ip2 := IP{192, 168, 0, 4}
r.Set(ip2)
- var ip3 IP4
+ var ip3 IP
require.NoError(t, r.Load(&ip3))
assert.Equal(t, ip2, ip3)
}
@@ -208,9 +199,9 @@ func TestGetSetOverwrite(t *testing.T) {
// TestSignEncodeAndDecode tests signing, RLP encoding and RLP decoding of a record.
func TestSignEncodeAndDecode(t *testing.T) {
var r Record
- r.Set(DiscPort(30303))
- r.Set(IP4{127, 0, 0, 1})
- require.NoError(t, r.Sign(privkey))
+ r.Set(UDP(30303))
+ r.Set(IP{127, 0, 0, 1})
+ require.NoError(t, signTest([]byte{5}, &r))
blob, err := rlp.EncodeToBytes(r)
require.NoError(t, err)
@@ -224,62 +215,20 @@ func TestSignEncodeAndDecode(t *testing.T) {
assert.Equal(t, blob, blob2)
}
-func TestNodeAddr(t *testing.T) {
- var r Record
- if addr := r.NodeAddr(); addr != nil {
- t.Errorf("wrong address on empty record: got %v, want %v", addr, nil)
- }
-
- require.NoError(t, r.Sign(privkey))
- expected := "caaa1485d83b18b32ed9ad666026151bf0cae8a0a88c857ae2d4c5be2daa6726"
- assert.Equal(t, expected, hex.EncodeToString(r.NodeAddr()))
-}
-
-var pyRecord, _ = hex.DecodeString("f896b840954dc36583c1f4b69ab59b1375f362f06ee99f3723cd77e64b6de6d211c27d7870642a79d4516997f94091325d2a7ca6215376971455fb221d34f35b277149a1018664697363763582765f82696490736563703235366b312d6b656363616b83697034847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138")
-
-// TestPythonInterop checks that we can decode and verify a record produced by the Python
-// implementation.
-func TestPythonInterop(t *testing.T) {
- var r Record
- if err := rlp.DecodeBytes(pyRecord, &r); err != nil {
- t.Fatalf("can't decode: %v", err)
- }
-
- var (
- wantAddr, _ = hex.DecodeString("caaa1485d83b18b32ed9ad666026151bf0cae8a0a88c857ae2d4c5be2daa6726")
- wantSeq = uint64(1)
- wantIP = IP4{127, 0, 0, 1}
- wantDiscport = DiscPort(30303)
- )
- if r.Seq() != wantSeq {
- t.Errorf("wrong seq: got %d, want %d", r.Seq(), wantSeq)
- }
- if addr := r.NodeAddr(); !bytes.Equal(addr, wantAddr) {
- t.Errorf("wrong addr: got %x, want %x", addr, wantAddr)
- }
- want := map[Entry]interface{}{new(IP4): &wantIP, new(DiscPort): &wantDiscport}
- for k, v := range want {
- desc := fmt.Sprintf("loading key %q", k.ENRKey())
- if assert.NoError(t, r.Load(k), desc) {
- assert.Equal(t, k, v, desc)
- }
- }
-}
-
// TestRecordTooBig tests that records bigger than SizeLimit bytes cannot be signed.
func TestRecordTooBig(t *testing.T) {
var r Record
key := randomString(10)
// set a big value for random key, expect error
- r.Set(WithEntry(key, randomString(300)))
- if err := r.Sign(privkey); err != errTooBig {
+ r.Set(WithEntry(key, randomString(SizeLimit)))
+ if err := signTest([]byte{5}, &r); err != errTooBig {
t.Fatalf("expected to get errTooBig, got %#v", err)
}
// set an acceptable value for random key, expect no error
r.Set(WithEntry(key, randomString(100)))
- require.NoError(t, r.Sign(privkey))
+ require.NoError(t, signTest([]byte{5}, &r))
}
// TestSignEncodeAndDecodeRandom tests encoding/decoding of records containing random key/value pairs.
@@ -295,7 +244,7 @@ func TestSignEncodeAndDecodeRandom(t *testing.T) {
r.Set(WithEntry(key, &value))
}
- require.NoError(t, r.Sign(privkey))
+ require.NoError(t, signTest([]byte{5}, &r))
_, err := rlp.EncodeToBytes(r)
require.NoError(t, err)
@@ -308,11 +257,40 @@ func TestSignEncodeAndDecodeRandom(t *testing.T) {
}
}
-func BenchmarkDecode(b *testing.B) {
- var r Record
- for i := 0; i < b.N; i++ {
- rlp.DecodeBytes(pyRecord, &r)
+type testSig struct{}
+
+type testID []byte
+
+func (id testID) ENRKey() string { return "testid" }
+
+func signTest(id []byte, r *Record) error {
+ r.Set(ID("test"))
+ r.Set(testID(id))
+ return r.SetSig(testSig{}, makeTestSig(id, r.Seq()))
+}
+
+func makeTestSig(id []byte, seq uint64) []byte {
+ sig := make([]byte, 8, len(id)+8)
+ binary.BigEndian.PutUint64(sig[:8], seq)
+ sig = append(sig, id...)
+ return sig
+}
+
+func (testSig) Verify(r *Record, sig []byte) error {
+ var id []byte
+ if err := r.Load((*testID)(&id)); err != nil {
+ return err
+ }
+ if !bytes.Equal(sig, makeTestSig(id, r.Seq())) {
+ return ErrInvalidSig
+ }
+ return nil
+}
+
+func (testSig) NodeAddr(r *Record) []byte {
+ var id []byte
+ if err := r.Load((*testID)(&id)); err != nil {
+ return nil
}
- b.StopTimer()
- r.NodeAddr()
+ return id
}
diff --git a/p2p/enr/entries.go b/p2p/enr/entries.go
index 2d31bece09bb..22f839d836ec 100644
--- a/p2p/enr/entries.go
+++ b/p2p/enr/entries.go
@@ -17,12 +17,10 @@
package enr
import (
- "crypto/ecdsa"
"fmt"
"io"
"net"
- "github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
@@ -57,84 +55,44 @@ func WithEntry(k string, v interface{}) Entry {
return &generic{key: k, value: v}
}
-// DiscPort is the "discv5" key, which holds the UDP port for discovery v5.
-type DiscPort uint16
+// TCP is the "tcp" key, which holds the TCP port of the node.
+type TCP uint16
-func (v DiscPort) ENRKey() string { return "discv5" }
+func (v TCP) ENRKey() string { return "tcp" }
+
+// UDP is the "udp" key, which holds the UDP port of the node.
+type UDP uint16
+
+func (v UDP) ENRKey() string { return "udp" }
// ID is the "id" key, which holds the name of the identity scheme.
type ID string
+const IDv4 = ID("v4") // the default identity scheme
+
func (v ID) ENRKey() string { return "id" }
-// IP4 is the "ip4" key, which holds a 4-byte IPv4 address.
-type IP4 net.IP
+// IP is the "ip" key, which holds the IP address of the node.
+type IP net.IP
-func (v IP4) ENRKey() string { return "ip4" }
+func (v IP) ENRKey() string { return "ip" }
// EncodeRLP implements rlp.Encoder.
-func (v IP4) EncodeRLP(w io.Writer) error {
- ip4 := net.IP(v).To4()
- if ip4 == nil {
- return fmt.Errorf("invalid IPv4 address: %v", v)
+func (v IP) EncodeRLP(w io.Writer) error {
+ if ip4 := net.IP(v).To4(); ip4 != nil {
+ return rlp.Encode(w, ip4)
}
- return rlp.Encode(w, ip4)
+ return rlp.Encode(w, net.IP(v))
}
// DecodeRLP implements rlp.Decoder.
-func (v *IP4) DecodeRLP(s *rlp.Stream) error {
+func (v *IP) DecodeRLP(s *rlp.Stream) error {
if err := s.Decode((*net.IP)(v)); err != nil {
return err
}
- if len(*v) != 4 {
- return fmt.Errorf("invalid IPv4 address, want 4 bytes: %v", *v)
- }
- return nil
-}
-
-// IP6 is the "ip6" key, which holds a 16-byte IPv6 address.
-type IP6 net.IP
-
-func (v IP6) ENRKey() string { return "ip6" }
-
-// EncodeRLP implements rlp.Encoder.
-func (v IP6) EncodeRLP(w io.Writer) error {
- ip6 := net.IP(v)
- return rlp.Encode(w, ip6)
-}
-
-// DecodeRLP implements rlp.Decoder.
-func (v *IP6) DecodeRLP(s *rlp.Stream) error {
- if err := s.Decode((*net.IP)(v)); err != nil {
- return err
- }
- if len(*v) != 16 {
- return fmt.Errorf("invalid IPv6 address, want 16 bytes: %v", *v)
- }
- return nil
-}
-
-// Secp256k1 is the "secp256k1" key, which holds a public key.
-type Secp256k1 ecdsa.PublicKey
-
-func (v Secp256k1) ENRKey() string { return "secp256k1" }
-
-// EncodeRLP implements rlp.Encoder.
-func (v Secp256k1) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, crypto.CompressPubkey((*ecdsa.PublicKey)(&v)))
-}
-
-// DecodeRLP implements rlp.Decoder.
-func (v *Secp256k1) DecodeRLP(s *rlp.Stream) error {
- buf, err := s.Bytes()
- if err != nil {
- return err
- }
- pk, err := crypto.DecompressPubkey(buf)
- if err != nil {
- return err
+ if len(*v) != 4 && len(*v) != 16 {
+ return fmt.Errorf("invalid IP address, want 4 or 16 bytes: %v", *v)
}
- *v = (Secp256k1)(*pk)
return nil
}
diff --git a/p2p/message.go b/p2p/message.go
index d39bcb31f6b6..a517303622b3 100644
--- a/p2p/message.go
+++ b/p2p/message.go
@@ -25,7 +25,7 @@ import (
"time"
"github.com/XinFinOrg/XDPoSChain/event"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
@@ -252,13 +252,13 @@ type msgEventer struct {
MsgReadWriter
feed *event.Feed
- peerID discover.NodeID
+ peerID enode.ID
Protocol string
}
// newMsgEventer returns a msgEventer which sends message events to the given
// feed
-func newMsgEventer(rw MsgReadWriter, feed *event.Feed, peerID discover.NodeID, proto string) *msgEventer {
+func newMsgEventer(rw MsgReadWriter, feed *event.Feed, peerID enode.ID, proto string) *msgEventer {
return &msgEventer{
MsgReadWriter: rw,
feed: feed,
diff --git a/p2p/nat/nat.go b/p2p/nat/nat.go
index 48117bfab4ed..db3470bffe24 100644
--- a/p2p/nat/nat.go
+++ b/p2p/nat/nat.go
@@ -129,21 +129,15 @@ func Map(m Interface, c chan struct{}, protocol string, extport, intport int, na
// ExtIP assumes that the local machine is reachable on the given
// external IP address, and that any required ports were mapped manually.
// Mapping operations will not return an error but won't actually do anything.
-func ExtIP(ip net.IP) Interface {
- if ip == nil {
- panic("IP must not be nil")
- }
- return extIP(ip)
-}
+type ExtIP net.IP
-type extIP net.IP
-
-func (n extIP) ExternalIP() (net.IP, error) { return net.IP(n), nil }
-func (n extIP) String() string { return fmt.Sprintf("ExtIP(%v)", net.IP(n)) }
+func (n ExtIP) ExternalIP() (net.IP, error) { return net.IP(n), nil }
+func (n ExtIP) String() string { return fmt.Sprintf("ExtIP(%v)", net.IP(n)) }
// These do nothing.
-func (extIP) AddMapping(string, int, int, string, time.Duration) error { return nil }
-func (extIP) DeleteMapping(string, int, int) error { return nil }
+
+func (ExtIP) AddMapping(string, int, int, string, time.Duration) error { return nil }
+func (ExtIP) DeleteMapping(string, int, int) error { return nil }
// Any returns a port mapper that tries to discover any supported
// mechanism on the local network.
diff --git a/p2p/nat/nat_test.go b/p2p/nat/nat_test.go
index 469101e997e9..814e6d9e14c8 100644
--- a/p2p/nat/nat_test.go
+++ b/p2p/nat/nat_test.go
@@ -28,7 +28,7 @@ import (
func TestAutoDiscRace(t *testing.T) {
ad := startautodisc("thing", func() Interface {
time.Sleep(500 * time.Millisecond)
- return extIP{33, 44, 55, 66}
+ return ExtIP{33, 44, 55, 66}
})
// Spawn a few concurrent calls to ad.ExternalIP.
diff --git a/p2p/netutil/iptrack.go b/p2p/netutil/iptrack.go
new file mode 100644
index 000000000000..1a34ec81f5c9
--- /dev/null
+++ b/p2p/netutil/iptrack.go
@@ -0,0 +1,130 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package netutil
+
+import (
+ "time"
+
+ "github.com/XinFinOrg/XDPoSChain/common/mclock"
+)
+
+// IPTracker predicts the external endpoint, i.e. IP address and port, of the local host
+// based on statements made by other hosts.
+type IPTracker struct {
+ window time.Duration
+ contactWindow time.Duration
+ minStatements int
+ clock mclock.Clock
+ statements map[string]ipStatement
+ contact map[string]mclock.AbsTime
+ lastStatementGC mclock.AbsTime
+ lastContactGC mclock.AbsTime
+}
+
+type ipStatement struct {
+ endpoint string
+ time mclock.AbsTime
+}
+
+// NewIPTracker creates an IP tracker.
+//
+// The window parameters configure the amount of past network events which are kept. The
+// minStatements parameter enforces a minimum number of statements which must be recorded
+// before any prediction is made. Higher values for these parameters decrease 'flapping' of
+// predictions as network conditions change. Window duration values should typically be in
+// the range of minutes.
+func NewIPTracker(window, contactWindow time.Duration, minStatements int) *IPTracker {
+ return &IPTracker{
+ window: window,
+ contactWindow: contactWindow,
+ statements: make(map[string]ipStatement),
+ minStatements: minStatements,
+ contact: make(map[string]mclock.AbsTime),
+ clock: mclock.System{},
+ }
+}
+
+// PredictFullConeNAT checks whether the local host is behind full cone NAT. It predicts by
+// checking whether any statement has been received from a node we didn't contact before
+// the statement was made.
+func (it *IPTracker) PredictFullConeNAT() bool {
+ now := it.clock.Now()
+ it.gcContact(now)
+ it.gcStatements(now)
+ for host, st := range it.statements {
+ if c, ok := it.contact[host]; !ok || c > st.time {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictEndpoint returns the current prediction of the external endpoint.
+func (it *IPTracker) PredictEndpoint() string {
+ it.gcStatements(it.clock.Now())
+
+ // The current strategy is simple: find the endpoint with most statements.
+ counts := make(map[string]int)
+ maxcount, max := 0, ""
+ for _, s := range it.statements {
+ c := counts[s.endpoint] + 1
+ counts[s.endpoint] = c
+ if c > maxcount && c >= it.minStatements {
+ maxcount, max = c, s.endpoint
+ }
+ }
+ return max
+}
+
+// AddStatement records that a certain host thinks our external endpoint is the one given.
+func (it *IPTracker) AddStatement(host, endpoint string) {
+ now := it.clock.Now()
+ it.statements[host] = ipStatement{endpoint, now}
+ if time.Duration(now-it.lastStatementGC) >= it.window {
+ it.gcStatements(now)
+ }
+}
+
+// AddContact records that a packet containing our endpoint information has been sent to a
+// certain host.
+func (it *IPTracker) AddContact(host string) {
+ now := it.clock.Now()
+ it.contact[host] = now
+ if time.Duration(now-it.lastContactGC) >= it.contactWindow {
+ it.gcContact(now)
+ }
+}
+
+func (it *IPTracker) gcStatements(now mclock.AbsTime) {
+ it.lastStatementGC = now
+ cutoff := now.Add(-it.window)
+ for host, s := range it.statements {
+ if s.time < cutoff {
+ delete(it.statements, host)
+ }
+ }
+}
+
+func (it *IPTracker) gcContact(now mclock.AbsTime) {
+ it.lastContactGC = now
+ cutoff := now.Add(-it.contactWindow)
+ for host, ct := range it.contact {
+ if ct < cutoff {
+ delete(it.contact, host)
+ }
+ }
+}
diff --git a/p2p/netutil/iptrack_test.go b/p2p/netutil/iptrack_test.go
new file mode 100644
index 000000000000..1d70b8ab83c3
--- /dev/null
+++ b/p2p/netutil/iptrack_test.go
@@ -0,0 +1,138 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package netutil
+
+import (
+ "fmt"
+ mrand "math/rand"
+ "testing"
+ "time"
+
+ "github.com/XinFinOrg/XDPoSChain/common/mclock"
+)
+
+const (
+ opStatement = iota
+ opContact
+ opPredict
+ opCheckFullCone
+)
+
+type iptrackTestEvent struct {
+ op int
+ time int // absolute, in milliseconds
+ ip, from string
+}
+
+func TestIPTracker(t *testing.T) {
+ tests := map[string][]iptrackTestEvent{
+ "minStatements": {
+ {opPredict, 0, "", ""},
+ {opStatement, 0, "127.0.0.1", "127.0.0.2"},
+ {opPredict, 1000, "", ""},
+ {opStatement, 1000, "127.0.0.1", "127.0.0.3"},
+ {opPredict, 1000, "", ""},
+ {opStatement, 1000, "127.0.0.1", "127.0.0.4"},
+ {opPredict, 1000, "127.0.0.1", ""},
+ },
+ "window": {
+ {opStatement, 0, "127.0.0.1", "127.0.0.2"},
+ {opStatement, 2000, "127.0.0.1", "127.0.0.3"},
+ {opStatement, 3000, "127.0.0.1", "127.0.0.4"},
+ {opPredict, 10000, "127.0.0.1", ""},
+ {opPredict, 10001, "", ""}, // first statement expired
+ {opStatement, 10100, "127.0.0.1", "127.0.0.2"},
+ {opPredict, 10200, "127.0.0.1", ""},
+ },
+ "fullcone": {
+ {opContact, 0, "", "127.0.0.2"},
+ {opStatement, 10, "127.0.0.1", "127.0.0.2"},
+ {opContact, 2000, "", "127.0.0.3"},
+ {opStatement, 2010, "127.0.0.1", "127.0.0.3"},
+ {opContact, 3000, "", "127.0.0.4"},
+ {opStatement, 3010, "127.0.0.1", "127.0.0.4"},
+ {opCheckFullCone, 3500, "false", ""},
+ },
+ "fullcone_2": {
+ {opContact, 0, "", "127.0.0.2"},
+ {opStatement, 10, "127.0.0.1", "127.0.0.2"},
+ {opContact, 2000, "", "127.0.0.3"},
+ {opStatement, 2010, "127.0.0.1", "127.0.0.3"},
+ {opStatement, 3000, "127.0.0.1", "127.0.0.4"},
+ {opContact, 3010, "", "127.0.0.4"},
+ {opCheckFullCone, 3500, "true", ""},
+ },
+ }
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) { runIPTrackerTest(t, test) })
+ }
+}
+
+func runIPTrackerTest(t *testing.T, evs []iptrackTestEvent) {
+ var (
+ clock mclock.Simulated
+ it = NewIPTracker(10*time.Second, 10*time.Second, 3)
+ )
+ it.clock = &clock
+ for i, ev := range evs {
+ evtime := time.Duration(ev.time) * time.Millisecond
+ clock.Run(evtime - time.Duration(clock.Now()))
+ switch ev.op {
+ case opStatement:
+ it.AddStatement(ev.from, ev.ip)
+ case opContact:
+ it.AddContact(ev.from)
+ case opPredict:
+ if pred := it.PredictEndpoint(); pred != ev.ip {
+ t.Errorf("op %d: wrong prediction %q, want %q", i, pred, ev.ip)
+ }
+ case opCheckFullCone:
+ pred := fmt.Sprintf("%t", it.PredictFullConeNAT())
+ if pred != ev.ip {
+ t.Errorf("op %d: wrong prediction %s, want %s", i, pred, ev.ip)
+ }
+ }
+ }
+}
+
+// This checks that old statements and contacts are GCed even if Predict* isn't called.
+func TestIPTrackerForceGC(t *testing.T) {
+ var (
+ clock mclock.Simulated
+ window = 10 * time.Second
+ rate = 50 * time.Millisecond
+ max = int(window/rate) + 1
+ it = NewIPTracker(window, window, 3)
+ )
+ it.clock = &clock
+
+ for i := 0; i < 5*max; i++ {
+ e1 := make([]byte, 4)
+ e2 := make([]byte, 4)
+ mrand.Read(e1)
+ mrand.Read(e2)
+ it.AddStatement(string(e1), string(e2))
+ it.AddContact(string(e1))
+ clock.Run(rate)
+ }
+ if len(it.contact) > 2*max {
+ t.Errorf("contacts not GCed, have %d", len(it.contact))
+ }
+ if len(it.statements) > 2*max {
+ t.Errorf("statements not GCed, have %d", len(it.statements))
+ }
+}
diff --git a/p2p/peer.go b/p2p/peer.go
index 1d1cfc8906f6..523780d13200 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -28,10 +28,15 @@ import (
"github.com/XinFinOrg/XDPoSChain/common/mclock"
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
"github.com/XinFinOrg/XDPoSChain/rlp"
)
+var (
+ ErrShuttingDown = errors.New("shutting down")
+)
+
const (
baseProtocolVersion = 5
baseProtocolLength = uint64(16)
@@ -58,7 +63,7 @@ type protoHandshake struct {
Name string
Caps []Cap
ListenPort uint64
- ID discover.NodeID
+ ID []byte // secp256k1 public key
// Ignore additional fields (for forward compatibility).
Rest []rlp.RawValue `rlp:"tail"`
@@ -88,12 +93,12 @@ const (
// PeerEvent is an event emitted when peers are either added or dropped from
// a p2p.Server or when a message is sent or received on a peer connection
type PeerEvent struct {
- Type PeerEventType `json:"type"`
- Peer discover.NodeID `json:"peer"`
- Error string `json:"error,omitempty"`
- Protocol string `json:"protocol,omitempty"`
- MsgCode *uint64 `json:"msg_code,omitempty"`
- MsgSize *uint32 `json:"msg_size,omitempty"`
+ Type PeerEventType `json:"type"`
+ Peer enode.ID `json:"peer"`
+ Error string `json:"error,omitempty"`
+ Protocol string `json:"protocol,omitempty"`
+ MsgCode *uint64 `json:"msg_code,omitempty"`
+ MsgSize *uint32 `json:"msg_size,omitempty"`
}
// Peer represents a connected remote node.
@@ -115,17 +120,23 @@ type Peer struct {
}
// NewPeer returns a peer for testing purposes.
-func NewPeer(id discover.NodeID, name string, caps []Cap) *Peer {
+func NewPeer(id enode.ID, name string, caps []Cap) *Peer {
pipe, _ := net.Pipe()
- conn := &conn{fd: pipe, transport: nil, id: id, caps: caps, name: name}
+ node := enode.SignNull(new(enr.Record), id)
+ conn := &conn{fd: pipe, transport: nil, node: node, caps: caps, name: name}
peer := newPeer(conn, nil)
close(peer.closed) // ensures Disconnect doesn't block
return peer
}
// ID returns the node's public key.
-func (p *Peer) ID() discover.NodeID {
- return p.rw.id
+func (p *Peer) ID() enode.ID {
+ return p.rw.node.ID()
+}
+
+// Node returns the peer's node descriptor.
+func (p *Peer) Node() *enode.Node {
+ return p.rw.node
}
// Name returns the node name that the remote node advertised.
@@ -160,12 +171,13 @@ func (p *Peer) Disconnect(reason DiscReason) {
// String implements fmt.Stringer.
func (p *Peer) String() string {
- return fmt.Sprintf("Peer %x %v ", p.rw.id[:8], p.RemoteAddr())
+ id := p.ID()
+ return fmt.Sprintf("Peer %x %v", id[:8], p.RemoteAddr())
}
// Inbound returns true if the peer is an inbound connection
func (p *Peer) Inbound() bool {
- return p.rw.flags&inboundConn != 0
+ return p.rw.is(inboundConn)
}
func newPeer(conn *conn, protocols []Protocol) *Peer {
@@ -178,7 +190,7 @@ func newPeer(conn *conn, protocols []Protocol) *Peer {
protoErr: make(chan error, len(protomap)+1), // protocols + pingLoop
closed: make(chan struct{}),
pingRecv: make(chan struct{}, 16),
- log: log.New("id", conn.id, "conn", conn.flags),
+ log: log.New("id", conn.node.ID(), "conn", conn.flags),
}
return p
}
@@ -429,9 +441,11 @@ func (rw *protoRW) ReadMsg() (Msg, error) {
// peer. Sub-protocol independent fields are contained and initialized here, with
// protocol specifics delegated to all connected sub-protocols.
type PeerInfo struct {
- ID string `json:"id"` // Unique node identifier (also the encryption key)
- Name string `json:"name"` // Name of the node, including client type, version, OS, custom data
- Caps []string `json:"caps"` // Sum-protocols advertised by this particular peer
+ ENR string `json:"enr,omitempty"` // Ethereum Node Record
+ Enode string `json:"enode"` // Node URL
+ ID string `json:"id"` // Unique node identifier
+ Name string `json:"name"` // Name of the node, including client type, version, OS, custom data
+ Caps []string `json:"caps"` // Protocols advertised by this peer
Network struct {
LocalAddress string `json:"localAddress"` // Local endpoint of the TCP data connection
RemoteAddress string `json:"remoteAddress"` // Remote endpoint of the TCP data connection
@@ -451,11 +465,15 @@ func (p *Peer) Info() *PeerInfo {
}
// Assemble the generic peer metadata
info := &PeerInfo{
+ Enode: p.Node().String(),
ID: p.ID().String(),
Name: p.Name(),
Caps: caps,
Protocols: make(map[string]interface{}),
}
+ if p.Node().Seq() > 0 {
+ info.ENR = p.Node().String()
+ }
info.Network.LocalAddress = p.LocalAddr().String()
info.Network.RemoteAddress = p.RemoteAddr().String()
info.Network.Inbound = p.rw.is(inboundConn)
diff --git a/p2p/peer_test.go b/p2p/peer_test.go
index a3e1c74fd876..5aa64a32e190 100644
--- a/p2p/peer_test.go
+++ b/p2p/peer_test.go
@@ -45,8 +45,8 @@ var discard = Protocol{
func testPeer(protos []Protocol) (func(), *conn, *Peer, <-chan error) {
fd1, fd2 := net.Pipe()
- c1 := &conn{fd: fd1, transport: newTestTransport(randomID(), fd1)}
- c2 := &conn{fd: fd2, transport: newTestTransport(randomID(), fd2)}
+ c1 := &conn{fd: fd1, node: newNode(randomID(), nil), transport: newTestTransport(&newkey().PublicKey, fd1)}
+ c2 := &conn{fd: fd2, node: newNode(randomID(), nil), transport: newTestTransport(&newkey().PublicKey, fd2)}
for _, p := range protos {
c1.caps = append(c1.caps, p.cap())
c2.caps = append(c2.caps, p.cap())
diff --git a/p2p/protocol.go b/p2p/protocol.go
index deb86f985c34..2db3bc91a49b 100644
--- a/p2p/protocol.go
+++ b/p2p/protocol.go
@@ -19,7 +19,8 @@ package p2p
import (
"fmt"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
)
// Protocol represents a P2P subprotocol implementation.
@@ -51,7 +52,10 @@ type Protocol struct {
// PeerInfo is an optional helper method to retrieve protocol specific metadata
// about a certain peer in the network. If an info retrieval function is set,
// but returns nil, it is assumed that the protocol handshake is still running.
- PeerInfo func(id discover.NodeID) interface{}
+ PeerInfo func(id enode.ID) interface{}
+
+ // Attributes contains protocol specific information for the node record.
+ Attributes []enr.Entry
}
func (p Protocol) cap() Cap {
@@ -64,10 +68,6 @@ type Cap struct {
Version uint
}
-func (cap Cap) RlpData() interface{} {
- return []interface{}{cap.Name, cap.Version}
-}
-
func (cap Cap) String() string {
return fmt.Sprintf("%s/%d", cap.Name, cap.Version)
}
@@ -79,3 +79,5 @@ func (cs capsByNameAndVersion) Swap(i, j int) { cs[i], cs[j] = cs[j], cs[i] }
func (cs capsByNameAndVersion) Less(i, j int) bool {
return cs[i].Name < cs[j].Name || (cs[i].Name == cs[j].Name && cs[i].Version < cs[j].Version)
}
+
+func (capsByNameAndVersion) ENRKey() string { return "cap" }
diff --git a/p2p/protocols/protocol_test.go b/p2p/protocols/protocol_test.go
index bcd3186f6ce3..e13b85e96576 100644
--- a/p2p/protocols/protocol_test.go
+++ b/p2p/protocols/protocol_test.go
@@ -24,7 +24,7 @@ import (
"time"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
p2ptest "github.com/XinFinOrg/XDPoSChain/p2p/testing"
)
@@ -36,7 +36,7 @@ type hs0 struct {
// message to kill/drop the peer with nodeID
type kill struct {
- C discover.NodeID
+ C enode.ID
}
// message to drop connection
@@ -144,7 +144,7 @@ func protocolTester(t *testing.T, pp *p2ptest.TestPeerPool) *p2ptest.ProtocolTes
return p2ptest.NewProtocolTester(t, conf.ID, 2, newProtocol(pp))
}
-func protoHandshakeExchange(id discover.NodeID, proto *protoHandshake) []p2ptest.Exchange {
+func protoHandshakeExchange(id enode.ID, proto *protoHandshake) []p2ptest.Exchange {
return []p2ptest.Exchange{
{
@@ -172,13 +172,13 @@ func runProtoHandshake(t *testing.T, proto *protoHandshake, errs ...error) {
pp := p2ptest.NewTestPeerPool()
s := protocolTester(t, pp)
// TODO: make this more than one handshake
- id := s.IDs[0]
- if err := s.TestExchanges(protoHandshakeExchange(id, proto)...); err != nil {
+ node := s.Nodes[0]
+ if err := s.TestExchanges(protoHandshakeExchange(node.ID(), proto)...); err != nil {
t.Fatal(err)
}
var disconnects []*p2ptest.Disconnect
for i, err := range errs {
- disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.IDs[i], Error: err})
+ disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.Nodes[i].ID(), Error: err})
}
if err := s.TestDisconnected(disconnects...); err != nil {
t.Fatal(err)
@@ -197,7 +197,7 @@ func TestProtoHandshakeSuccess(t *testing.T) {
runProtoHandshake(t, &protoHandshake{42, "420"})
}
-func moduleHandshakeExchange(id discover.NodeID, resp uint) []p2ptest.Exchange {
+func moduleHandshakeExchange(id enode.ID, resp uint) []p2ptest.Exchange {
return []p2ptest.Exchange{
{
@@ -224,16 +224,16 @@ func moduleHandshakeExchange(id discover.NodeID, resp uint) []p2ptest.Exchange {
func runModuleHandshake(t *testing.T, resp uint, errs ...error) {
pp := p2ptest.NewTestPeerPool()
s := protocolTester(t, pp)
- id := s.IDs[0]
- if err := s.TestExchanges(protoHandshakeExchange(id, &protoHandshake{42, "420"})...); err != nil {
+ node := s.Nodes[0]
+ if err := s.TestExchanges(protoHandshakeExchange(node.ID(), &protoHandshake{42, "420"})...); err != nil {
t.Fatal(err)
}
- if err := s.TestExchanges(moduleHandshakeExchange(id, resp)...); err != nil {
+ if err := s.TestExchanges(moduleHandshakeExchange(node.ID(), resp)...); err != nil {
t.Fatal(err)
}
var disconnects []*p2ptest.Disconnect
for i, err := range errs {
- disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.IDs[i], Error: err})
+ disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.Nodes[i].ID(), Error: err})
}
if err := s.TestDisconnected(disconnects...); err != nil {
t.Fatal(err)
@@ -249,7 +249,7 @@ func TestModuleHandshakeSuccess(t *testing.T) {
}
// testing complex interactions over multiple peers, relaying, dropping
-func testMultiPeerSetup(a, b discover.NodeID) []p2ptest.Exchange {
+func testMultiPeerSetup(a, b enode.ID) []p2ptest.Exchange {
return []p2ptest.Exchange{
{
@@ -305,7 +305,7 @@ func runMultiplePeers(t *testing.T, peer int, errs ...error) {
pp := p2ptest.NewTestPeerPool()
s := protocolTester(t, pp)
- if err := s.TestExchanges(testMultiPeerSetup(s.IDs[0], s.IDs[1])...); err != nil {
+ if err := s.TestExchanges(testMultiPeerSetup(s.Nodes[0].ID(), s.Nodes[1].ID())...); err != nil {
t.Fatal(err)
}
// after some exchanges of messages, we can test state changes
@@ -318,15 +318,15 @@ WAIT:
for {
select {
case <-tick.C:
- if pp.Has(s.IDs[0]) {
+ if pp.Has(s.Nodes[0].ID()) {
break WAIT
}
case <-timeout.C:
t.Fatal("timeout")
}
}
- if !pp.Has(s.IDs[1]) {
- t.Fatalf("missing peer test-1: %v (%v)", pp, s.IDs)
+ if !pp.Has(s.Nodes[1].ID()) {
+ t.Fatalf("missing peer test-1: %v (%v)", pp, s.Nodes)
}
// peer 0 sends kill request for peer with index
@@ -334,8 +334,8 @@ WAIT:
Triggers: []p2ptest.Trigger{
{
Code: 2,
- Msg: &kill{s.IDs[peer]},
- Peer: s.IDs[0],
+ Msg: &kill{s.Nodes[peer].ID()},
+ Peer: s.Nodes[0].ID(),
},
},
})
@@ -350,7 +350,7 @@ WAIT:
{
Code: 3,
Msg: &drop{},
- Peer: s.IDs[(peer+1)%2],
+ Peer: s.Nodes[(peer+1)%2].ID(),
},
},
})
@@ -362,14 +362,14 @@ WAIT:
// check the actual discconnect errors on the individual peers
var disconnects []*p2ptest.Disconnect
for i, err := range errs {
- disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.IDs[i], Error: err})
+ disconnects = append(disconnects, &p2ptest.Disconnect{Peer: s.Nodes[i].ID(), Error: err})
}
if err := s.TestDisconnected(disconnects...); err != nil {
t.Fatal(err)
}
// test if disconnected peers have been removed from peerPool
- if pp.Has(s.IDs[peer]) {
- t.Fatalf("peer test-%v not dropped: %v (%v)", peer, pp, s.IDs)
+ if pp.Has(s.Nodes[peer].ID()) {
+ t.Fatalf("peer test-%v not dropped: %v (%v)", peer, pp, s.Nodes)
}
}
diff --git a/p2p/rlpx.go b/p2p/rlpx.go
index 5ceb897eae09..3244f294c766 100644
--- a/p2p/rlpx.go
+++ b/p2p/rlpx.go
@@ -34,11 +34,11 @@ import (
"sync"
"time"
+ "github.com/XinFinOrg/XDPoSChain/common/bitutil"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/crypto/ecies"
"github.com/XinFinOrg/XDPoSChain/crypto/secp256k1"
"github.com/XinFinOrg/XDPoSChain/crypto/sha3"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
"github.com/XinFinOrg/XDPoSChain/rlp"
"github.com/golang/snappy"
)
@@ -165,7 +165,7 @@ func readProtocolHandshake(rw MsgReader, our *protoHandshake) (*protoHandshake,
if err := msg.Decode(&hs); err != nil {
return nil, err
}
- if (hs.ID == discover.NodeID{}) {
+ if len(hs.ID) != 64 || !bitutil.TestBytes(hs.ID) {
return nil, DiscInvalidIdentity
}
return &hs, nil
@@ -175,7 +175,7 @@ func readProtocolHandshake(rw MsgReader, our *protoHandshake) (*protoHandshake,
// messages. the protocol handshake is the first authenticated message
// and also verifies whether the encryption handshake 'worked' and the
// remote side actually provided the right public key.
-func (t *rlpx) doEncHandshake(prv *ecdsa.PrivateKey, dial *discover.Node) (discover.NodeID, error) {
+func (t *rlpx) doEncHandshake(prv *ecdsa.PrivateKey, dial *ecdsa.PublicKey) (*ecdsa.PublicKey, error) {
var (
sec secrets
err error
@@ -183,23 +183,21 @@ func (t *rlpx) doEncHandshake(prv *ecdsa.PrivateKey, dial *discover.Node) (disco
if dial == nil {
sec, err = receiverEncHandshake(t.fd, prv, nil)
} else {
- sec, err = initiatorEncHandshake(t.fd, prv, dial.ID, nil)
+ sec, err = initiatorEncHandshake(t.fd, prv, dial)
}
if err != nil {
- return discover.NodeID{}, err
+ return nil, err
}
t.wmu.Lock()
t.rw = newRLPXFrameRW(t.fd, sec)
t.wmu.Unlock()
- return sec.RemoteID, nil
+ return sec.Remote.ExportECDSA(), nil
}
// encHandshake contains the state of the encryption handshake.
type encHandshake struct {
- initiator bool
- remoteID discover.NodeID
-
- remotePub *ecies.PublicKey // remote-pubk
+ initiator bool
+ remote *ecies.PublicKey // remote-pubk
initNonce, respNonce []byte // nonce
randomPrivKey *ecies.PrivateKey // ecdhe-random
remoteRandomPub *ecies.PublicKey // ecdhe-random-pubk
@@ -208,7 +206,7 @@ type encHandshake struct {
// secrets represents the connection secrets
// which are negotiated during the encryption handshake.
type secrets struct {
- RemoteID discover.NodeID
+ Remote *ecies.PublicKey
AES, MAC []byte
EgressMAC, IngressMAC hash.Hash
Token []byte
@@ -249,9 +247,9 @@ func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
sharedSecret := crypto.Keccak256(ecdheSecret, crypto.Keccak256(h.respNonce, h.initNonce))
aesSecret := crypto.Keccak256(ecdheSecret, sharedSecret)
s := secrets{
- RemoteID: h.remoteID,
- AES: aesSecret,
- MAC: crypto.Keccak256(ecdheSecret, aesSecret),
+ Remote: h.remote,
+ AES: aesSecret,
+ MAC: crypto.Keccak256(ecdheSecret, aesSecret),
}
// setup sha3 instances for the MACs
@@ -273,16 +271,16 @@ func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) {
// staticSharedSecret returns the static shared secret, the result
// of key agreement between the local and remote static node key.
func (h *encHandshake) staticSharedSecret(prv *ecdsa.PrivateKey) ([]byte, error) {
- return ecies.ImportECDSA(prv).GenerateShared(h.remotePub, sskLen, sskLen)
+ return ecies.ImportECDSA(prv).GenerateShared(h.remote, sskLen, sskLen)
}
// initiatorEncHandshake negotiates a session token on conn.
// it should be called on the dialing side of the connection.
//
// prv is the local client's private key.
-func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remoteID discover.NodeID, token []byte) (s secrets, err error) {
- h := &encHandshake{initiator: true, remoteID: remoteID}
- authMsg, err := h.makeAuthMsg(prv, token)
+func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remote *ecdsa.PublicKey) (s secrets, err error) {
+ h := &encHandshake{initiator: true, remote: ecies.ImportECDSAPublic(remote)}
+ authMsg, err := h.makeAuthMsg(prv)
if err != nil {
return s, err
}
@@ -306,15 +304,11 @@ func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remoteID d
}
// makeAuthMsg creates the initiator handshake message.
-func (h *encHandshake) makeAuthMsg(prv *ecdsa.PrivateKey, token []byte) (*authMsgV4, error) {
- rpub, err := h.remoteID.Pubkey()
- if err != nil {
- return nil, fmt.Errorf("bad remoteID: %v", err)
- }
- h.remotePub = ecies.ImportECDSAPublic(rpub)
+func (h *encHandshake) makeAuthMsg(prv *ecdsa.PrivateKey) (*authMsgV4, error) {
// Generate random initiator nonce.
h.initNonce = make([]byte, shaLen)
- if _, err := rand.Read(h.initNonce); err != nil {
+ _, err := rand.Read(h.initNonce)
+ if err != nil {
return nil, err
}
// Generate random keypair to for ECDH.
@@ -324,7 +318,7 @@ func (h *encHandshake) makeAuthMsg(prv *ecdsa.PrivateKey, token []byte) (*authMs
}
// Sign known message: static-shared-secret ^ nonce
- token, err = h.staticSharedSecret(prv)
+ token, err := h.staticSharedSecret(prv)
if err != nil {
return nil, err
}
@@ -385,13 +379,12 @@ func receiverEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, token []byt
func (h *encHandshake) handleAuthMsg(msg *authMsgV4, prv *ecdsa.PrivateKey) error {
// Import the remote identity.
- h.initNonce = msg.Nonce[:]
- h.remoteID = msg.InitiatorPubkey
- rpub, err := h.remoteID.Pubkey()
+ rpub, err := importPublicKey(msg.InitiatorPubkey[:])
if err != nil {
- return fmt.Errorf("bad remoteID: %#v", err)
+ return err
}
- h.remotePub = ecies.ImportECDSAPublic(rpub)
+ h.initNonce = msg.Nonce[:]
+ h.remote = rpub
// Generate random keypair for ECDH.
// If a private key is already set, use it instead of generating one (for testing).
@@ -437,7 +430,7 @@ func (msg *authMsgV4) sealPlain(h *encHandshake) ([]byte, error) {
n += copy(buf[n:], msg.InitiatorPubkey[:])
n += copy(buf[n:], msg.Nonce[:])
buf[n] = 0 // token-flag
- return ecies.Encrypt(rand.Reader, h.remotePub, buf, nil, nil)
+ return ecies.Encrypt(rand.Reader, h.remote, buf, nil, nil)
}
func (msg *authMsgV4) decodePlain(input []byte) {
@@ -453,7 +446,7 @@ func (msg *authRespV4) sealPlain(hs *encHandshake) ([]byte, error) {
buf := make([]byte, authRespLen)
n := copy(buf, msg.RandomPubkey[:])
copy(buf[n:], msg.Nonce[:])
- return ecies.Encrypt(rand.Reader, hs.remotePub, buf, nil, nil)
+ return ecies.Encrypt(rand.Reader, hs.remote, buf, nil, nil)
}
func (msg *authRespV4) decodePlain(input []byte) {
@@ -476,7 +469,7 @@ func sealEIP8(msg interface{}, h *encHandshake) ([]byte, error) {
prefix := make([]byte, 2)
binary.BigEndian.PutUint16(prefix, uint16(buf.Len()+eciesOverhead))
- enc, err := ecies.Encrypt(rand.Reader, h.remotePub, buf.Bytes(), nil, prefix)
+ enc, err := ecies.Encrypt(rand.Reader, h.remote, buf.Bytes(), nil, prefix)
return append(prefix, enc...), err
}
diff --git a/p2p/rlpx_test.go b/p2p/rlpx_test.go
index 903a91c7699e..7191747cc8c6 100644
--- a/p2p/rlpx_test.go
+++ b/p2p/rlpx_test.go
@@ -18,6 +18,7 @@ package p2p
import (
"bytes"
+ "crypto/ecdsa"
"crypto/rand"
"errors"
"fmt"
@@ -32,7 +33,6 @@ import (
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/crypto/ecies"
"github.com/XinFinOrg/XDPoSChain/crypto/sha3"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
"github.com/XinFinOrg/XDPoSChain/rlp"
"github.com/davecgh/go-spew/spew"
)
@@ -78,9 +78,9 @@ func TestEncHandshake(t *testing.T) {
func testEncHandshake(token []byte) error {
type result struct {
- side string
- id discover.NodeID
- err error
+ side string
+ pubkey *ecdsa.PublicKey
+ err error
}
var (
prv0, _ = crypto.GenerateKey()
@@ -95,14 +95,12 @@ func testEncHandshake(token []byte) error {
defer func() { output <- r }()
defer fd0.Close()
- dest := &discover.Node{ID: discover.PubkeyID(&prv1.PublicKey)}
- r.id, r.err = c0.doEncHandshake(prv0, dest)
+ r.pubkey, r.err = c0.doEncHandshake(prv0, &prv1.PublicKey)
if r.err != nil {
return
}
- id1 := discover.PubkeyID(&prv1.PublicKey)
- if r.id != id1 {
- r.err = fmt.Errorf("remote ID mismatch: got %v, want: %v", r.id, id1)
+ if !reflect.DeepEqual(r.pubkey, &prv1.PublicKey) {
+ r.err = fmt.Errorf("remote pubkey mismatch: got %v, want: %v", r.pubkey, &prv1.PublicKey)
}
}()
go func() {
@@ -110,13 +108,12 @@ func testEncHandshake(token []byte) error {
defer func() { output <- r }()
defer fd1.Close()
- r.id, r.err = c1.doEncHandshake(prv1, nil)
+ r.pubkey, r.err = c1.doEncHandshake(prv1, nil)
if r.err != nil {
return
}
- id0 := discover.PubkeyID(&prv0.PublicKey)
- if r.id != id0 {
- r.err = fmt.Errorf("remote ID mismatch: got %v, want: %v", r.id, id0)
+ if !reflect.DeepEqual(r.pubkey, &prv0.PublicKey) {
+ r.err = fmt.Errorf("remote ID mismatch: got %v, want: %v", r.pubkey, &prv0.PublicKey)
}
}()
@@ -148,12 +145,12 @@ func testEncHandshake(token []byte) error {
func TestProtocolHandshake(t *testing.T) {
var (
prv0, _ = crypto.GenerateKey()
- node0 = &discover.Node{ID: discover.PubkeyID(&prv0.PublicKey), IP: net.IP{1, 2, 3, 4}, TCP: 33}
- hs0 = &protoHandshake{Version: 3, ID: node0.ID, Caps: []Cap{{"a", 0}, {"b", 2}}}
+ pub0 = crypto.FromECDSAPub(&prv0.PublicKey)[1:]
+ hs0 = &protoHandshake{Version: 3, ID: pub0, Caps: []Cap{{"a", 0}, {"b", 2}}}
prv1, _ = crypto.GenerateKey()
- node1 = &discover.Node{ID: discover.PubkeyID(&prv1.PublicKey), IP: net.IP{5, 6, 7, 8}, TCP: 44}
- hs1 = &protoHandshake{Version: 3, ID: node1.ID, Caps: []Cap{{"c", 1}, {"d", 3}}}
+ pub1 = crypto.FromECDSAPub(&prv1.PublicKey)[1:]
+ hs1 = &protoHandshake{Version: 3, ID: pub1, Caps: []Cap{{"c", 1}, {"d", 3}}}
wg sync.WaitGroup
)
@@ -168,13 +165,13 @@ func TestProtocolHandshake(t *testing.T) {
defer wg.Done()
defer fd0.Close()
rlpx := newRLPX(fd0)
- remid, err := rlpx.doEncHandshake(prv0, node1)
+ rpubkey, err := rlpx.doEncHandshake(prv0, &prv1.PublicKey)
if err != nil {
t.Errorf("dial side enc handshake failed: %v", err)
return
}
- if remid != node1.ID {
- t.Errorf("dial side remote id mismatch: got %v, want %v", remid, node1.ID)
+ if !reflect.DeepEqual(rpubkey, &prv1.PublicKey) {
+ t.Errorf("dial side remote pubkey mismatch: got %v, want %v", rpubkey, &prv1.PublicKey)
return
}
@@ -194,13 +191,13 @@ func TestProtocolHandshake(t *testing.T) {
defer wg.Done()
defer fd1.Close()
rlpx := newRLPX(fd1)
- remid, err := rlpx.doEncHandshake(prv1, nil)
+ rpubkey, err := rlpx.doEncHandshake(prv1, nil)
if err != nil {
t.Errorf("listen side enc handshake failed: %v", err)
return
}
- if remid != node0.ID {
- t.Errorf("listen side remote id mismatch: got %v, want %v", remid, node0.ID)
+ if !reflect.DeepEqual(rpubkey, &prv0.PublicKey) {
+ t.Errorf("listen side remote pubkey mismatch: got %v, want %v", rpubkey, &prv0.PublicKey)
return
}
diff --git a/p2p/server.go b/p2p/server.go
index 2ccb4cf17a84..917c7186a9f4 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -18,20 +18,29 @@
package p2p
import (
+ "bytes"
"crypto/ecdsa"
+ "encoding/hex"
"errors"
+ "fmt"
"net"
+ "sort"
"sync"
+ "sync/atomic"
"time"
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/common/mclock"
+ "github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/p2p/discover"
"github.com/XinFinOrg/XDPoSChain/p2p/discv5"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
"github.com/XinFinOrg/XDPoSChain/p2p/nat"
"github.com/XinFinOrg/XDPoSChain/p2p/netutil"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
)
const (
@@ -85,7 +94,7 @@ type Config struct {
// BootstrapNodes are used to establish connectivity
// with the rest of the network.
- BootstrapNodes []*discover.Node
+ BootstrapNodes []*enode.Node
// BootstrapNodesV5 are used to establish connectivity
// with the rest of the network using the V5 discovery
@@ -94,11 +103,11 @@ type Config struct {
// Static nodes are used as pre-configured connections which are always
// maintained and re-connected on disconnects.
- StaticNodes []*discover.Node
+ StaticNodes []*enode.Node
// Trusted nodes are used as pre-configured connections which are always
// allowed to connect, even above the peer limit.
- TrustedNodes []*discover.Node
+ TrustedNodes []*enode.Node
// Connectivity can be restricted to certain IP networks.
// If this option is set to a non-nil value, only hosts which match one of the
@@ -155,6 +164,8 @@ type Server struct {
lock sync.Mutex // protects running
running bool
+ nodedb *enode.DB
+ localnode *enode.LocalNode
ntab discoverTable
listener net.Listener
ourHandshake *protoHandshake
@@ -166,8 +177,10 @@ type Server struct {
peerOpDone chan struct{}
quit chan struct{}
- addstatic chan *discover.Node
- removestatic chan *discover.Node
+ addstatic chan *enode.Node
+ removestatic chan *enode.Node
+ addtrusted chan *enode.Node
+ removetrusted chan *enode.Node
posthandshake chan *conn
addpeer chan *conn
delpeer chan peerDrop
@@ -176,7 +189,7 @@ type Server struct {
log log.Logger
}
-type peerOpFunc func(map[discover.NodeID]*Peer)
+type peerOpFunc func(map[enode.ID]*Peer)
type peerDrop struct {
*Peer
@@ -184,7 +197,7 @@ type peerDrop struct {
requested bool // true if signaled by the peer
}
-type connFlag int
+type connFlag int32
const (
dynDialedConn connFlag = 1 << iota
@@ -198,16 +211,16 @@ const (
type conn struct {
fd net.Conn
transport
+ node *enode.Node
flags connFlag
- cont chan error // The run loop uses cont to signal errors to SetupConn.
- id discover.NodeID // valid after the encryption handshake
- caps []Cap // valid after the protocol handshake
- name string // valid after the protocol handshake
+ cont chan error // The run loop uses cont to signal errors to SetupConn.
+ caps []Cap // valid after the protocol handshake
+ name string // valid after the protocol handshake
}
type transport interface {
// The two handshakes.
- doEncHandshake(prv *ecdsa.PrivateKey, dialDest *discover.Node) (discover.NodeID, error)
+ doEncHandshake(prv *ecdsa.PrivateKey, dialDest *ecdsa.PublicKey) (*ecdsa.PublicKey, error)
doProtoHandshake(our *protoHandshake) (*protoHandshake, error)
// The MsgReadWriter can only be used after the encryption
// handshake has completed. The code uses conn.id to track this
@@ -221,8 +234,8 @@ type transport interface {
func (c *conn) String() string {
s := c.flags.String()
- if (c.id != discover.NodeID{}) {
- s += " " + c.id.String()
+ if (c.node.ID() != enode.ID{}) {
+ s += " " + c.node.ID().String()
}
s += " " + c.fd.RemoteAddr().String()
return s
@@ -249,7 +262,23 @@ func (f connFlag) String() string {
}
func (c *conn) is(f connFlag) bool {
- return c.flags&f != 0
+ flags := connFlag(atomic.LoadInt32((*int32)(&c.flags)))
+ return flags&f != 0
+}
+
+func (c *conn) set(f connFlag, val bool) {
+ flags := connFlag(atomic.LoadInt32((*int32)(&c.flags)))
+ if val {
+ flags |= f
+ } else {
+ flags &= ^f
+ }
+ atomic.StoreInt32((*int32)(&c.flags), int32(flags))
+}
+
+// LocalNode returns the local node record.
+func (srv *Server) LocalNode() *enode.LocalNode {
+ return srv.localnode
}
// Peers returns all connected peers.
@@ -259,7 +288,7 @@ func (srv *Server) Peers() []*Peer {
// Note: We'd love to put this function into a variable but
// that seems to cause a weird compiler error in some
// environments.
- case srv.peerOp <- func(peers map[discover.NodeID]*Peer) {
+ case srv.peerOp <- func(peers map[enode.ID]*Peer) {
for _, p := range peers {
ps = append(ps, p)
}
@@ -274,7 +303,7 @@ func (srv *Server) Peers() []*Peer {
func (srv *Server) PeerCount() int {
var count int
select {
- case srv.peerOp <- func(ps map[discover.NodeID]*Peer) { count = len(ps) }:
+ case srv.peerOp <- func(ps map[enode.ID]*Peer) { count = len(ps) }:
<-srv.peerOpDone
case <-srv.quit:
}
@@ -284,8 +313,7 @@ func (srv *Server) PeerCount() int {
// AddPeer connects to the given node and maintains the connection until the
// server is shut down. If the connection fails for any reason, the server will
// attempt to reconnect the peer.
-func (srv *Server) AddPeer(node *discover.Node) {
-
+func (srv *Server) AddPeer(node *enode.Node) {
select {
case srv.addstatic <- node:
case <-srv.quit:
@@ -293,47 +321,45 @@ func (srv *Server) AddPeer(node *discover.Node) {
}
// RemovePeer disconnects from the given node
-func (srv *Server) RemovePeer(node *discover.Node) {
+func (srv *Server) RemovePeer(node *enode.Node) {
select {
case srv.removestatic <- node:
case <-srv.quit:
}
}
+// AddTrustedPeer adds the given node to a reserved whitelist which allows the
+// node to always connect, even if the slot are full.
+func (srv *Server) AddTrustedPeer(node *enode.Node) {
+ select {
+ case srv.addtrusted <- node:
+ case <-srv.quit:
+ }
+}
+
+// RemoveTrustedPeer removes the given node from the trusted peer set.
+func (srv *Server) RemoveTrustedPeer(node *enode.Node) {
+ select {
+ case srv.removetrusted <- node:
+ case <-srv.quit:
+ }
+}
+
// SubscribePeers subscribes the given channel to peer events
func (srv *Server) SubscribeEvents(ch chan *PeerEvent) event.Subscription {
return srv.peerFeed.Subscribe(ch)
}
// Self returns the local node's endpoint information.
-func (srv *Server) Self() *discover.Node {
+func (srv *Server) Self() *enode.Node {
srv.lock.Lock()
- defer srv.lock.Unlock()
-
- if !srv.running {
- return &discover.Node{IP: net.ParseIP("0.0.0.0")}
- }
- return srv.makeSelf(srv.listener, srv.ntab)
-}
+ ln := srv.localnode
+ srv.lock.Unlock()
-func (srv *Server) makeSelf(listener net.Listener, ntab discoverTable) *discover.Node {
- // If the server's not running, return an empty node.
- // If the node is running but discovery is off, manually assemble the node infos.
- if ntab == nil {
- // Inbound connections disabled, use zero address.
- if listener == nil {
- return &discover.Node{IP: net.ParseIP("0.0.0.0"), ID: discover.PubkeyID(&srv.PrivateKey.PublicKey)}
- }
- // Otherwise inject the listener address too
- addr := listener.Addr().(*net.TCPAddr)
- return &discover.Node{
- ID: discover.PubkeyID(&srv.PrivateKey.PublicKey),
- IP: addr.IP,
- TCP: uint16(addr.Port),
- }
+ if ln == nil {
+ return enode.NewV4(&srv.PrivateKey.PublicKey, net.ParseIP("0.0.0.0"), 0, 0)
}
- // Otherwise return the discovery node.
- return ntab.Self()
+ return ln.Node()
}
// Stop terminates the server and all active peer connections.
@@ -392,7 +418,9 @@ func (srv *Server) Start() (err error) {
if srv.log == nil {
srv.log = log.New()
}
- srv.log.Info("Starting P2P networking")
+ if srv.NoDial && srv.ListenAddr == "" {
+ srv.log.Warn("P2P server will be useless, neither dialing nor listening")
+ }
// static fields
if srv.PrivateKey == nil {
@@ -408,70 +436,127 @@ func (srv *Server) Start() (err error) {
srv.addpeer = make(chan *conn)
srv.delpeer = make(chan peerDrop)
srv.posthandshake = make(chan *conn)
- srv.addstatic = make(chan *discover.Node)
- srv.removestatic = make(chan *discover.Node)
+ srv.addstatic = make(chan *enode.Node)
+ srv.removestatic = make(chan *enode.Node)
+ srv.addtrusted = make(chan *enode.Node)
+ srv.removetrusted = make(chan *enode.Node)
srv.peerOp = make(chan peerOpFunc)
srv.peerOpDone = make(chan struct{})
- var (
- conn *net.UDPConn
- sconn *sharedUDPConn
- realaddr *net.UDPAddr
- unhandled chan discover.ReadPacket
- )
-
- if !srv.NoDiscovery || srv.DiscoveryV5 {
- addr, err := net.ResolveUDPAddr("udp", srv.ListenAddr)
- if err != nil {
+ if err := srv.setupLocalNode(); err != nil {
+ return err
+ }
+ if srv.ListenAddr != "" {
+ if err := srv.setupListening(); err != nil {
return err
}
- conn, err = net.ListenUDP("udp", addr)
- if err != nil {
- return err
+ }
+ if err := srv.setupDiscovery(); err != nil {
+ return err
+ }
+
+ dynPeers := srv.maxDialedConns()
+ dialer := newDialState(srv.localnode.ID(), srv.StaticNodes, srv.BootstrapNodes, srv.ntab, dynPeers, srv.NetRestrict)
+ srv.loopWG.Add(1)
+ go srv.run(dialer)
+ return nil
+}
+
+func (srv *Server) setupLocalNode() error {
+ // Create the devp2p handshake.
+ pubkey := crypto.FromECDSAPub(&srv.PrivateKey.PublicKey)
+ srv.ourHandshake = &protoHandshake{Version: baseProtocolVersion, Name: srv.Name, ID: pubkey[1:]}
+ for _, p := range srv.Protocols {
+ srv.ourHandshake.Caps = append(srv.ourHandshake.Caps, p.cap())
+ }
+ sort.Sort(capsByNameAndVersion(srv.ourHandshake.Caps))
+
+ // Create the local node.
+ db, err := enode.OpenDB(srv.Config.NodeDatabase)
+ if err != nil {
+ return err
+ }
+ srv.nodedb = db
+ srv.localnode = enode.NewLocalNode(db, srv.PrivateKey)
+ srv.localnode.SetFallbackIP(net.IP{127, 0, 0, 1})
+ srv.localnode.Set(capsByNameAndVersion(srv.ourHandshake.Caps))
+ // TODO: check conflicts
+ for _, p := range srv.Protocols {
+ for _, e := range p.Attributes {
+ srv.localnode.Set(e)
}
- realaddr = conn.LocalAddr().(*net.UDPAddr)
- if srv.NAT != nil {
- if !realaddr.IP.IsLoopback() {
- go nat.Map(srv.NAT, srv.quit, "udp", realaddr.Port, realaddr.Port, "ethereum discovery")
- }
- // TODO: react to external IP changes over time.
- if ext, err := srv.NAT.ExternalIP(); err == nil {
- realaddr = &net.UDPAddr{IP: ext, Port: realaddr.Port}
+ }
+ switch srv.NAT.(type) {
+ case nil:
+ // No NAT interface, do nothing.
+ case nat.ExtIP:
+ // ExtIP doesn't block, set the IP right away.
+ ip, _ := srv.NAT.ExternalIP()
+ srv.localnode.SetStaticIP(ip)
+ default:
+ // Ask the router about the IP. This takes a while and blocks startup,
+ // do it in the background.
+ srv.loopWG.Add(1)
+ go func() {
+ defer srv.loopWG.Done()
+ if ip, err := srv.NAT.ExternalIP(); err == nil {
+ srv.localnode.SetStaticIP(ip)
}
- }
+ }()
+ }
+ return nil
+}
+
+func (srv *Server) setupDiscovery() error {
+ if srv.NoDiscovery && !srv.DiscoveryV5 {
+ return nil
}
- if !srv.NoDiscovery && srv.DiscoveryV5 {
- unhandled = make(chan discover.ReadPacket, 100)
- sconn = &sharedUDPConn{conn, unhandled}
+ addr, err := net.ResolveUDPAddr("udp", srv.ListenAddr)
+ if err != nil {
+ return err
+ }
+ conn, err := net.ListenUDP("udp", addr)
+ if err != nil {
+ return err
+ }
+ realaddr := conn.LocalAddr().(*net.UDPAddr)
+ srv.log.Debug("UDP listener up", "addr", realaddr)
+ if srv.NAT != nil {
+ if !realaddr.IP.IsLoopback() {
+ go nat.Map(srv.NAT, srv.quit, "udp", realaddr.Port, realaddr.Port, "ethereum discovery")
+ }
}
+ srv.localnode.SetFallbackUDP(realaddr.Port)
- // node table
+ // Discovery V4
+ var unhandled chan discover.ReadPacket
+ var sconn *sharedUDPConn
if !srv.NoDiscovery {
+ if srv.DiscoveryV5 {
+ unhandled = make(chan discover.ReadPacket, 100)
+ sconn = &sharedUDPConn{conn, unhandled}
+ }
cfg := discover.Config{
- PrivateKey: srv.PrivateKey,
- AnnounceAddr: realaddr,
- NodeDBPath: srv.NodeDatabase,
- NetRestrict: srv.NetRestrict,
- Bootnodes: srv.BootstrapNodes,
- Unhandled: unhandled,
+ PrivateKey: srv.PrivateKey,
+ NetRestrict: srv.NetRestrict,
+ Bootnodes: srv.BootstrapNodes,
+ Unhandled: unhandled,
}
- ntab, err := discover.ListenUDP(conn, cfg)
+ ntab, err := discover.ListenUDP(conn, srv.localnode, cfg)
if err != nil {
return err
}
srv.ntab = ntab
}
-
+ // Discovery V5
if srv.DiscoveryV5 {
- var (
- ntab *discv5.Network
- err error
- )
+ var ntab *discv5.Network
+ var err error
if sconn != nil {
- ntab, err = discv5.ListenUDP(srv.PrivateKey, sconn, realaddr, "", srv.NetRestrict) //srv.NodeDatabase)
+ ntab, err = discv5.ListenUDP(srv.PrivateKey, sconn, "", srv.NetRestrict)
} else {
- ntab, err = discv5.ListenUDP(srv.PrivateKey, conn, realaddr, "", srv.NetRestrict) //srv.NodeDatabase)
+ ntab, err = discv5.ListenUDP(srv.PrivateKey, conn, "", srv.NetRestrict)
}
if err != nil {
return err
@@ -481,32 +566,10 @@ func (srv *Server) Start() (err error) {
}
srv.DiscV5 = ntab
}
-
- dynPeers := srv.maxDialedConns()
- dialer := newDialState(srv.StaticNodes, srv.BootstrapNodes, srv.ntab, dynPeers, srv.NetRestrict)
-
- // handshake
- srv.ourHandshake = &protoHandshake{Version: baseProtocolVersion, Name: srv.Name, ID: discover.PubkeyID(&srv.PrivateKey.PublicKey)}
- for _, p := range srv.Protocols {
- srv.ourHandshake.Caps = append(srv.ourHandshake.Caps, p.cap())
- }
- // listen/dial
- if srv.ListenAddr != "" {
- if err := srv.startListening(); err != nil {
- return err
- }
- }
- if srv.NoDial && srv.ListenAddr == "" {
- srv.log.Warn("P2P server will be useless, neither dialing nor listening")
- }
-
- srv.loopWG.Add(1)
- go srv.run(dialer)
- srv.running = true
return nil
}
-func (srv *Server) startListening() error {
+func (srv *Server) setupListening() error {
// Launch the TCP listener.
listener, err := net.Listen("tcp", srv.ListenAddr)
if err != nil {
@@ -515,8 +578,11 @@ func (srv *Server) startListening() error {
laddr := listener.Addr().(*net.TCPAddr)
srv.ListenAddr = laddr.String()
srv.listener = listener
+ srv.localnode.Set(enr.TCP(laddr.Port))
+
srv.loopWG.Add(1)
go srv.listenLoop()
+
// Map the TCP listening port if NAT is configured.
if !laddr.IP.IsLoopback() && srv.NAT != nil {
srv.loopWG.Add(1)
@@ -529,27 +595,29 @@ func (srv *Server) startListening() error {
}
type dialer interface {
- newTasks(running int, peers map[discover.NodeID]*Peer, now time.Time) []task
+ newTasks(running int, peers map[enode.ID]*Peer, now time.Time) []task
taskDone(task, time.Time)
- addStatic(*discover.Node)
- removeStatic(*discover.Node)
+ addStatic(*enode.Node)
+ removeStatic(*enode.Node)
}
func (srv *Server) run(dialstate dialer) {
+ srv.log.Info("Started P2P networking", "self", srv.localnode.Node())
defer srv.loopWG.Done()
+ defer srv.nodedb.Close()
+
var (
- peers = make(map[discover.NodeID]*Peer)
+ peers = make(map[enode.ID]*Peer)
inboundCount = 0
- trusted = make(map[discover.NodeID]bool, len(srv.TrustedNodes))
+ trusted = make(map[enode.ID]bool, len(srv.TrustedNodes))
taskdone = make(chan task, maxActiveDialTasks)
runningTasks []task
queuedTasks []task // tasks that can't run yet
)
// Put trusted nodes into a map to speed up checks.
- // Trusted peers are loaded on startup and cannot be
- // modified while the server is running.
+ // Trusted peers are loaded on startup or added via AddTrustedPeer RPC.
for _, n := range srv.TrustedNodes {
- trusted[n.ID] = true
+ trusted[n.ID()] = true
}
// removes t from runningTasks
@@ -599,12 +667,32 @@ running:
case n := <-srv.removestatic:
// This channel is used by RemovePeer to send a
// disconnect request to a peer and begin the
- // stop keeping the node connected
- srv.log.Debug("Removing static node", "node", n)
+ // stop keeping the node connected.
+ srv.log.Trace("Removing static node", "node", n)
dialstate.removeStatic(n)
- if p, ok := peers[n.ID]; ok {
+ if p, ok := peers[n.ID()]; ok {
p.Disconnect(DiscRequested)
}
+ case n := <-srv.addtrusted:
+ // This channel is used by AddTrustedPeer to add an enode
+ // to the trusted node set.
+ srv.log.Trace("Adding trusted node", "node", n)
+ trusted[n.ID()] = true
+ // Mark any already-connected peer as trusted
+ if p, ok := peers[n.ID()]; ok {
+ p.rw.set(trustedConn, true)
+ }
+ case n := <-srv.removetrusted:
+ // This channel is used by RemoveTrustedPeer to remove an enode
+ // from the trusted node set.
+ srv.log.Trace("Removing trusted node", "node", n)
+ if _, ok := trusted[n.ID()]; ok {
+ delete(trusted, n.ID())
+ }
+ // Unmark any already-connected peer as trusted
+ if p, ok := peers[n.ID()]; ok {
+ p.rw.set(trustedConn, false)
+ }
case op := <-srv.peerOp:
// This channel is used by Peers and PeerCount.
op(peers)
@@ -619,7 +707,7 @@ running:
case c := <-srv.posthandshake:
// A connection has passed the encryption handshake so
// the remote identity is known (but hasn't been verified yet).
- if trusted[c.id] {
+ if trusted[c.node.ID()] {
// Ensure that the trusted flag is set before checking against MaxPeers.
c.flags |= trustedConn
}
@@ -641,19 +729,21 @@ running:
if srv.EnableMsgEvents {
p.events = &srv.peerFeed
}
- name := truncateName(c.name)
-
go srv.runPeer(p)
- if peers[c.id] != nil {
- peers[c.id].PairPeer = p
+
+ name := truncateName(c.name)
+ if peers[c.node.ID()] != nil {
+ peers[c.node.ID()].PairPeer = p
srv.log.Debug("Adding p2p pair peer", "name", name, "addr", c.fd.RemoteAddr(), "peers", len(peers)+1)
} else {
- peers[c.id] = p
+ peers[c.node.ID()] = p
srv.log.Debug("Adding p2p peer", "name", name, "addr", c.fd.RemoteAddr(), "peers", len(peers)+1)
}
if p.Inbound() {
inboundCount++
}
+ } else {
+ srv.log.Debug("Error adding p2p peer", "err", err)
}
// The dialer logic relies on the assumption that
// dial tasks complete after the peer has been added or
@@ -697,7 +787,7 @@ running:
}
}
-func (srv *Server) protoHandshakeChecks(peers map[discover.NodeID]*Peer, inboundCount int, c *conn) error {
+func (srv *Server) protoHandshakeChecks(peers map[enode.ID]*Peer, inboundCount int, c *conn) error {
// Drop connections with no matching protocols.
if len(srv.Protocols) > 0 && countMatchingProtocols(srv.Protocols, c.caps) == 0 {
return DiscUselessPeer
@@ -707,19 +797,15 @@ func (srv *Server) protoHandshakeChecks(peers map[discover.NodeID]*Peer, inbound
return srv.encHandshakeChecks(peers, inboundCount, c)
}
-func (srv *Server) encHandshakeChecks(peers map[discover.NodeID]*Peer, inboundCount int, c *conn) error {
+func (srv *Server) encHandshakeChecks(peers map[enode.ID]*Peer, inboundCount int, c *conn) error {
switch {
case !c.is(trustedConn|staticDialedConn) && len(peers) >= srv.MaxPeers:
return DiscTooManyPeers
case !c.is(trustedConn) && c.is(inboundConn) && inboundCount >= srv.maxInboundConns():
return DiscTooManyPeers
- case peers[c.id] != nil:
- exitPeer := peers[c.id]
- if exitPeer.PairPeer != nil {
- return DiscAlreadyConnected
- }
- return nil
- case c.id == srv.Self().ID:
+ case peers[c.node.ID()] != nil:
+ return DiscAlreadyConnected
+ case c.node.ID() == srv.localnode.ID():
return DiscSelf
default:
return nil
@@ -729,7 +815,6 @@ func (srv *Server) encHandshakeChecks(peers map[discover.NodeID]*Peer, inboundCo
func (srv *Server) maxInboundConns() int {
return srv.MaxPeers - srv.maxDialedConns()
}
-
func (srv *Server) maxDialedConns() int {
if srv.NoDiscovery || srv.NoDial {
return 0
@@ -741,15 +826,11 @@ func (srv *Server) maxDialedConns() int {
return srv.MaxPeers / r
}
-type tempError interface {
- Temporary() bool
-}
-
// listenLoop runs in its own goroutine and accepts
// inbound connections.
func (srv *Server) listenLoop() {
defer srv.loopWG.Done()
- srv.log.Info("RLPx listener up", "self", srv.makeSelf(srv.listener, srv.ntab))
+ srv.log.Debug("TCP listener up", "addr", srv.listener.Addr())
tokens := defaultMaxPendingPeers
if srv.MaxPendingPeers > 0 {
@@ -770,7 +851,7 @@ func (srv *Server) listenLoop() {
)
for {
fd, err = srv.listener.Accept()
- if tempErr, ok := err.(tempError); ok && tempErr.Temporary() {
+ if netutil.IsTemporaryError(err) {
srv.log.Debug("Temporary read error", "err", err)
continue
} else if err != nil {
@@ -802,21 +883,17 @@ func (srv *Server) listenLoop() {
// SetupConn runs the handshakes and attempts to add the connection
// as a peer. It returns when the connection has been added as a peer
// or the handshakes have failed.
-func (srv *Server) SetupConn(fd net.Conn, flags connFlag, dialDest *discover.Node) error {
- self := srv.Self()
- if self == nil {
- return errors.New("shutdown")
- }
+func (srv *Server) SetupConn(fd net.Conn, flags connFlag, dialDest *enode.Node) error {
c := &conn{fd: fd, transport: srv.newTransport(fd), flags: flags, cont: make(chan error)}
err := srv.setupConn(c, flags, dialDest)
if err != nil {
c.close(err)
- srv.log.Trace("Setting up connection failed", "id", c.id, "err", err)
+ srv.log.Trace("Setting up connection failed", "addr", fd.RemoteAddr(), "err", err)
}
return err
}
-func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *discover.Node) error {
+func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) error {
// Prevent leftover pending conns from entering the handshake.
srv.lock.Lock()
running := srv.running
@@ -824,18 +901,30 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *discover.Node) e
if !running {
return errServerStopped
}
+ // If dialing, figure out the remote public key.
+ var dialPubkey *ecdsa.PublicKey
+ if dialDest != nil {
+ dialPubkey = new(ecdsa.PublicKey)
+ if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil {
+ return fmt.Errorf("dial destination doesn't have a secp256k1 public key")
+ }
+ }
// Run the encryption handshake.
- var err error
- if c.id, err = c.doEncHandshake(srv.PrivateKey, dialDest); err != nil {
+ remotePubkey, err := c.doEncHandshake(srv.PrivateKey, dialPubkey)
+ if err != nil {
srv.log.Trace("Failed RLPx handshake", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err)
return err
}
- clog := srv.log.New("id", c.id, "addr", c.fd.RemoteAddr(), "conn", c.flags)
- // For dialed connections, check that the remote public key matches.
- if dialDest != nil && c.id != dialDest.ID {
- clog.Trace("Dialed identity mismatch", "want", c, dialDest.ID)
- return DiscUnexpectedIdentity
+ if dialDest != nil {
+ // For dialed connections, check that the remote public key matches.
+ if dialPubkey.X.Cmp(remotePubkey.X) != 0 || dialPubkey.Y.Cmp(remotePubkey.Y) != 0 {
+ return DiscUnexpectedIdentity
+ }
+ c.node = dialDest
+ } else {
+ c.node = nodeFromConn(remotePubkey, c.fd)
}
+ clog := srv.log.New("id", c.node.ID(), "addr", c.fd.RemoteAddr(), "conn", c.flags)
err = srv.checkpoint(c, srv.posthandshake)
if err != nil {
clog.Trace("Rejected peer before protocol handshake", "err", err)
@@ -847,13 +936,14 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *discover.Node) e
clog.Trace("Failed proto handshake", "err", err)
return err
}
- if phs.ID != c.id {
- clog.Trace("Wrong devp2p handshake identity", "err", phs.ID)
+ if id := c.node.ID(); !bytes.Equal(crypto.Keccak256(phs.ID), id[:]) {
+ clog.Trace("Wrong devp2p handshake identity", "phsid", fmt.Sprintf("%x", phs.ID))
return DiscUnexpectedIdentity
}
c.caps, c.name = phs.Caps, phs.Name
err = srv.checkpoint(c, srv.addpeer)
if err != nil {
+ clog.Debug("Rejected peer", "err", err, "c.node.ID()", c.node.ID())
clog.Trace("Rejected peer", "err", err)
return err
}
@@ -863,6 +953,16 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *discover.Node) e
return nil
}
+func nodeFromConn(pubkey *ecdsa.PublicKey, conn net.Conn) *enode.Node {
+ var ip net.IP
+ var port int
+ if tcp, ok := conn.RemoteAddr().(*net.TCPAddr); ok {
+ ip = tcp.IP
+ port = tcp.Port
+ }
+ return enode.NewV4(pubkey, ip, port, port)
+}
+
func truncateName(s string) string {
if len(s) > 20 {
return s[:20] + "..."
@@ -920,6 +1020,7 @@ type NodeInfo struct {
ID string `json:"id"` // Unique node identifier (also the encryption key)
Name string `json:"name"` // Name of the node, including client type, version, OS, custom data
Enode string `json:"enode"` // Enode URL for adding this peer from remote peers
+ ENR string `json:"enr"` // Ethereum Node Record
IP string `json:"ip"` // IP address of the node
Ports struct {
Discovery int `json:"discovery"` // UDP listening port for discovery protocol
@@ -931,19 +1032,21 @@ type NodeInfo struct {
// NodeInfo gathers and returns a collection of metadata known about the host.
func (srv *Server) NodeInfo() *NodeInfo {
- node := srv.Self()
-
// Gather and assemble the generic node infos
+ node := srv.Self()
info := &NodeInfo{
Name: srv.Name,
Enode: node.String(),
- ID: node.ID.String(),
- IP: node.IP.String(),
+ ID: node.ID().String(),
+ IP: node.IP().String(),
ListenAddr: srv.ListenAddr,
Protocols: make(map[string]interface{}),
}
- info.Ports.Discovery = int(node.UDP)
- info.Ports.Listener = int(node.TCP)
+ info.Ports.Discovery = node.UDP()
+ info.Ports.Listener = node.TCP()
+ if enc, err := rlp.EncodeToBytes(node.Record()); err == nil {
+ info.ENR = "0x" + hex.EncodeToString(enc)
+ }
// Gather all the running protocol infos (only once per protocol type)
for _, proto := range srv.Protocols {
diff --git a/p2p/server_test.go b/p2p/server_test.go
index ceda0f8d6b54..9738b16e0ff6 100644
--- a/p2p/server_test.go
+++ b/p2p/server_test.go
@@ -19,6 +19,7 @@ package p2p
import (
"crypto/ecdsa"
"errors"
+ "fmt"
"math/rand"
"net"
"reflect"
@@ -28,21 +29,22 @@ import (
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/crypto/sha3"
"github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enr"
)
-func init() {
- // log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
-}
+// func init() {
+// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
+// }
type testTransport struct {
- id discover.NodeID
+ rpub *ecdsa.PublicKey
*rlpx
closeErr error
}
-func newTestTransport(id discover.NodeID, fd net.Conn) transport {
+func newTestTransport(rpub *ecdsa.PublicKey, fd net.Conn) transport {
wrapped := newRLPX(fd).(*rlpx)
wrapped.rw = newRLPXFrameRW(fd, secrets{
MAC: zero16,
@@ -50,15 +52,16 @@ func newTestTransport(id discover.NodeID, fd net.Conn) transport {
IngressMAC: sha3.NewKeccak256(),
EgressMAC: sha3.NewKeccak256(),
})
- return &testTransport{id: id, rlpx: wrapped}
+ return &testTransport{rpub: rpub, rlpx: wrapped}
}
-func (c *testTransport) doEncHandshake(prv *ecdsa.PrivateKey, dialDest *discover.Node) (discover.NodeID, error) {
- return c.id, nil
+func (c *testTransport) doEncHandshake(prv *ecdsa.PrivateKey, dialDest *ecdsa.PublicKey) (*ecdsa.PublicKey, error) {
+ return c.rpub, nil
}
func (c *testTransport) doProtoHandshake(our *protoHandshake) (*protoHandshake, error) {
- return &protoHandshake{ID: c.id, Name: "test"}, nil
+ pubkey := crypto.FromECDSAPub(c.rpub)[1:]
+ return &protoHandshake{ID: pubkey, Name: "test"}, nil
}
func (c *testTransport) close(err error) {
@@ -66,7 +69,7 @@ func (c *testTransport) close(err error) {
c.closeErr = err
}
-func startTestServer(t *testing.T, id discover.NodeID, pf func(*Peer)) *Server {
+func startTestServer(t *testing.T, remoteKey *ecdsa.PublicKey, pf func(*Peer)) *Server {
config := Config{
Name: "test",
MaxPeers: 10,
@@ -76,7 +79,7 @@ func startTestServer(t *testing.T, id discover.NodeID, pf func(*Peer)) *Server {
server := &Server{
Config: config,
newPeerHook: pf,
- newTransport: func(fd net.Conn) transport { return newTestTransport(id, fd) },
+ newTransport: func(fd net.Conn) transport { return newTestTransport(remoteKey, fd) },
}
if err := server.Start(); err != nil {
t.Fatalf("Could not start server: %v", err)
@@ -87,14 +90,11 @@ func startTestServer(t *testing.T, id discover.NodeID, pf func(*Peer)) *Server {
func TestServerListen(t *testing.T) {
// start the test server
connected := make(chan *Peer)
- remid := randomID()
+ remid := &newkey().PublicKey
srv := startTestServer(t, remid, func(p *Peer) {
- if p.ID() != remid {
+ if p.ID() != enode.PubkeyToIDV4(remid) {
t.Error("peer func called with wrong node id")
}
- if p == nil {
- t.Error("peer func called with nil conn")
- }
connected <- p
})
defer close(connected)
@@ -141,21 +141,22 @@ func TestServerDial(t *testing.T) {
// start the server
connected := make(chan *Peer)
- remid := randomID()
+ remid := &newkey().PublicKey
srv := startTestServer(t, remid, func(p *Peer) { connected <- p })
defer close(connected)
defer srv.Stop()
// tell the server to connect
tcpAddr := listener.Addr().(*net.TCPAddr)
- srv.AddPeer(&discover.Node{ID: remid, IP: tcpAddr.IP, TCP: uint16(tcpAddr.Port)})
+ node := enode.NewV4(remid, tcpAddr.IP, tcpAddr.Port, 0)
+ srv.AddPeer(node)
select {
case conn := <-accepted:
defer conn.Close()
select {
case peer := <-connected:
- if peer.ID() != remid {
+ if peer.ID() != enode.PubkeyToIDV4(remid) {
t.Errorf("peer has wrong id")
}
if peer.Name() != "test" {
@@ -169,26 +170,35 @@ func TestServerDial(t *testing.T) {
if !reflect.DeepEqual(peers, []*Peer{peer}) {
t.Errorf("Peers mismatch: got %v, want %v", peers, []*Peer{peer})
}
- case <-time.After(1 * time.Second):
- t.Error("server did not launch peer within one second")
- }
- select {
- case peer := <-connected:
- if peer.ID() != remid {
- t.Errorf("peer has wrong id")
- }
- if peer.Name() != "test" {
- t.Errorf("peer has wrong name")
- }
- if peer.RemoteAddr().String() != conn.LocalAddr().String() {
- t.Errorf("peer started with wrong conn: got %v, want %v",
- peer.RemoteAddr(), conn.LocalAddr())
+ // Test AddTrustedPeer/RemoveTrustedPeer and changing Trusted flags
+ // Particularly for race conditions on changing the flag state.
+ if peer := srv.Peers()[0]; peer.Info().Network.Trusted {
+ t.Errorf("peer is trusted prematurely: %v", peer)
}
+ done := make(chan bool)
+ go func() {
+ srv.AddTrustedPeer(node)
+ if peer := srv.Peers()[0]; !peer.Info().Network.Trusted {
+ t.Errorf("peer is not trusted after AddTrustedPeer: %v", peer)
+ }
+ srv.RemoveTrustedPeer(node)
+ if peer := srv.Peers()[0]; peer.Info().Network.Trusted {
+ t.Errorf("peer is trusted after RemoveTrustedPeer: %v", peer)
+ }
+ done <- true
+ }()
+ // Trigger potential race conditions
+ peer = srv.Peers()[0]
+ _ = peer.Inbound()
+ _ = peer.Info()
+ <-done
case <-time.After(1 * time.Second):
t.Error("server did not launch peer within one second")
}
+
case <-time.After(1 * time.Second):
+ fmt.Println("step 1: didn't work")
t.Error("server did not connect within one second")
}
}
@@ -201,7 +211,7 @@ func TestServerTaskScheduling(t *testing.T) {
quit, returned = make(chan struct{}), make(chan struct{})
tc = 0
tg = taskgen{
- newFunc: func(running int, peers map[discover.NodeID]*Peer) []task {
+ newFunc: func(running int, peers map[enode.ID]*Peer) []task {
tc++
return []task{&testTask{index: tc - 1}}
},
@@ -216,12 +226,15 @@ func TestServerTaskScheduling(t *testing.T) {
// The Server in this test isn't actually running
// because we're only interested in what run does.
+ db, _ := enode.OpenDB("")
srv := &Server{
- Config: Config{MaxPeers: 10},
- quit: make(chan struct{}),
- ntab: fakeTable{},
- running: true,
- log: log.New(),
+ Config: Config{MaxPeers: 10},
+ localnode: enode.NewLocalNode(db, newkey()),
+ nodedb: db,
+ quit: make(chan struct{}),
+ ntab: fakeTable{},
+ running: true,
+ log: log.New(),
}
srv.loopWG.Add(1)
go func() {
@@ -262,11 +275,14 @@ func TestServerManyTasks(t *testing.T) {
}
var (
- srv = &Server{
- quit: make(chan struct{}),
- ntab: fakeTable{},
- running: true,
- log: log.New(),
+ db, _ = enode.OpenDB("")
+ srv = &Server{
+ quit: make(chan struct{}),
+ localnode: enode.NewLocalNode(db, newkey()),
+ nodedb: db,
+ ntab: fakeTable{},
+ running: true,
+ log: log.New(),
}
done = make(chan *testTask)
start, end = 0, 0
@@ -274,7 +290,7 @@ func TestServerManyTasks(t *testing.T) {
defer srv.Stop()
srv.loopWG.Add(1)
go srv.run(taskgen{
- newFunc: func(running int, peers map[discover.NodeID]*Peer) []task {
+ newFunc: func(running int, peers map[enode.ID]*Peer) []task {
start, end = end, end+maxActiveDialTasks+10
if end > len(alltasks) {
end = len(alltasks)
@@ -309,19 +325,19 @@ func TestServerManyTasks(t *testing.T) {
}
type taskgen struct {
- newFunc func(running int, peers map[discover.NodeID]*Peer) []task
+ newFunc func(running int, peers map[enode.ID]*Peer) []task
doneFunc func(task)
}
-func (tg taskgen) newTasks(running int, peers map[discover.NodeID]*Peer, now time.Time) []task {
+func (tg taskgen) newTasks(running int, peers map[enode.ID]*Peer, now time.Time) []task {
return tg.newFunc(running, peers)
}
func (tg taskgen) taskDone(t task, now time.Time) {
tg.doneFunc(t)
}
-func (tg taskgen) addStatic(*discover.Node) {
+func (tg taskgen) addStatic(*enode.Node) {
}
-func (tg taskgen) removeStatic(*discover.Node) {
+func (tg taskgen) removeStatic(*enode.Node) {
}
type testTask struct {
@@ -337,13 +353,14 @@ func (t *testTask) Do(srv *Server) {
// just after the encryption handshake when the server is
// at capacity. Trusted connections should still be accepted.
func TestServerAtCap(t *testing.T) {
- trustedID := randomID()
+ trustedNode := newkey()
+ trustedID := enode.PubkeyToIDV4(&trustedNode.PublicKey)
srv := &Server{
Config: Config{
PrivateKey: newkey(),
MaxPeers: 10,
NoDial: true,
- TrustedNodes: []*discover.Node{{ID: trustedID}},
+ TrustedNodes: []*enode.Node{newNode(trustedID, nil)},
},
}
if err := srv.Start(); err != nil {
@@ -351,10 +368,11 @@ func TestServerAtCap(t *testing.T) {
}
defer srv.Stop()
- newconn := func(id discover.NodeID) *conn {
+ newconn := func(id enode.ID) *conn {
fd, _ := net.Pipe()
- tx := newTestTransport(id, fd)
- return &conn{fd: fd, transport: tx, flags: inboundConn, id: id, cont: make(chan error)}
+ tx := newTestTransport(&trustedNode.PublicKey, fd)
+ node := enode.SignNull(new(enr.Record), id)
+ return &conn{fd: fd, transport: tx, flags: inboundConn, node: node, cont: make(chan error)}
}
// Inject a few connections to fill up the peer set.
@@ -365,7 +383,8 @@ func TestServerAtCap(t *testing.T) {
}
}
// Try inserting a non-trusted connection.
- c := newconn(randomID())
+ anotherID := randomID()
+ c := newconn(anotherID)
if err := srv.checkpoint(c, srv.posthandshake); err != DiscTooManyPeers {
t.Error("wrong error for insert:", err)
}
@@ -378,62 +397,144 @@ func TestServerAtCap(t *testing.T) {
t.Error("Server did not set trusted flag")
}
+ // Remove from trusted set and try again
+ srv.RemoveTrustedPeer(newNode(trustedID, nil))
+ c = newconn(trustedID)
+ if err := srv.checkpoint(c, srv.posthandshake); err != DiscTooManyPeers {
+ t.Error("wrong error for insert:", err)
+ }
+
+ // Add anotherID to trusted set and try again
+ srv.AddTrustedPeer(newNode(anotherID, nil))
+ c = newconn(anotherID)
+ if err := srv.checkpoint(c, srv.posthandshake); err != nil {
+ t.Error("unexpected error for trusted conn @posthandshake:", err)
+ }
+ if !c.is(trustedConn) {
+ t.Error("Server did not set trusted flag")
+ }
+}
+
+func TestServerPeerLimits(t *testing.T) {
+ srvkey := newkey()
+ clientkey := newkey()
+ clientnode := enode.NewV4(&clientkey.PublicKey, nil, 0, 0)
+
+ var tp = &setupTransport{
+ pubkey: &clientkey.PublicKey,
+ phs: protoHandshake{
+ ID: crypto.FromECDSAPub(&clientkey.PublicKey)[1:],
+ // Force "DiscUselessPeer" due to unmatching caps
+ // Caps: []Cap{discard.cap()},
+ },
+ }
+
+ srv := &Server{
+ Config: Config{
+ PrivateKey: srvkey,
+ MaxPeers: 0,
+ NoDial: true,
+ Protocols: []Protocol{discard},
+ },
+ newTransport: func(fd net.Conn) transport { return tp },
+ log: log.New(),
+ }
+ if err := srv.Start(); err != nil {
+ t.Fatalf("couldn't start server: %v", err)
+ }
+ defer srv.Stop()
+
+ // Check that server is full (MaxPeers=0)
+ flags := dynDialedConn
+ dialDest := clientnode
+ conn, _ := net.Pipe()
+ srv.SetupConn(conn, flags, dialDest)
+ if tp.closeErr != DiscTooManyPeers {
+ t.Errorf("unexpected close error: %q", tp.closeErr)
+ }
+ conn.Close()
+
+ srv.AddTrustedPeer(clientnode)
+
+ // Check that server allows a trusted peer despite being full.
+ conn, _ = net.Pipe()
+ srv.SetupConn(conn, flags, dialDest)
+ if tp.closeErr == DiscTooManyPeers {
+ t.Errorf("failed to bypass MaxPeers with trusted node: %q", tp.closeErr)
+ }
+
+ if tp.closeErr != DiscUselessPeer {
+ t.Errorf("unexpected close error: %q", tp.closeErr)
+ }
+ conn.Close()
+
+ srv.RemoveTrustedPeer(clientnode)
+
+ // Check that server is full again.
+ conn, _ = net.Pipe()
+ srv.SetupConn(conn, flags, dialDest)
+ if tp.closeErr != DiscTooManyPeers {
+ t.Errorf("unexpected close error: %q", tp.closeErr)
+ }
+ conn.Close()
}
func TestServerSetupConn(t *testing.T) {
- id := randomID()
- srvkey := newkey()
- srvid := discover.PubkeyID(&srvkey.PublicKey)
+ var (
+ clientkey, srvkey = newkey(), newkey()
+ clientpub = &clientkey.PublicKey
+ srvpub = &srvkey.PublicKey
+ )
tests := []struct {
dontstart bool
tt *setupTransport
flags connFlag
- dialDest *discover.Node
+ dialDest *enode.Node
wantCloseErr error
wantCalls string
}{
{
dontstart: true,
- tt: &setupTransport{id: id},
+ tt: &setupTransport{pubkey: clientpub},
wantCalls: "close,",
wantCloseErr: errServerStopped,
},
{
- tt: &setupTransport{id: id, encHandshakeErr: errors.New("read error")},
+ tt: &setupTransport{pubkey: clientpub, encHandshakeErr: errors.New("read error")},
flags: inboundConn,
wantCalls: "doEncHandshake,close,",
wantCloseErr: errors.New("read error"),
},
{
- tt: &setupTransport{id: id},
- dialDest: &discover.Node{ID: randomID()},
+ tt: &setupTransport{pubkey: clientpub},
+ dialDest: enode.NewV4(&newkey().PublicKey, nil, 0, 0),
flags: dynDialedConn,
wantCalls: "doEncHandshake,close,",
wantCloseErr: DiscUnexpectedIdentity,
},
{
- tt: &setupTransport{id: id, phs: &protoHandshake{ID: randomID()}},
- dialDest: &discover.Node{ID: id},
+ tt: &setupTransport{pubkey: clientpub, phs: protoHandshake{ID: randomID().Bytes()}},
+ dialDest: enode.NewV4(clientpub, nil, 0, 0),
flags: dynDialedConn,
wantCalls: "doEncHandshake,doProtoHandshake,close,",
wantCloseErr: DiscUnexpectedIdentity,
},
{
- tt: &setupTransport{id: id, protoHandshakeErr: errors.New("foo")},
- dialDest: &discover.Node{ID: id},
+ tt: &setupTransport{pubkey: clientpub, protoHandshakeErr: errors.New("foo")},
+ dialDest: enode.NewV4(clientpub, nil, 0, 0),
flags: dynDialedConn,
wantCalls: "doEncHandshake,doProtoHandshake,close,",
wantCloseErr: errors.New("foo"),
},
{
- tt: &setupTransport{id: srvid, phs: &protoHandshake{ID: srvid}},
+ tt: &setupTransport{pubkey: srvpub, phs: protoHandshake{ID: crypto.FromECDSAPub(srvpub)[1:]}},
flags: inboundConn,
wantCalls: "doEncHandshake,close,",
wantCloseErr: DiscSelf,
},
{
- tt: &setupTransport{id: id, phs: &protoHandshake{ID: id}},
+ tt: &setupTransport{pubkey: clientpub, phs: protoHandshake{ID: crypto.FromECDSAPub(clientpub)[1:]}},
flags: inboundConn,
wantCalls: "doEncHandshake,doProtoHandshake,close,",
wantCloseErr: DiscUselessPeer,
@@ -468,26 +569,26 @@ func TestServerSetupConn(t *testing.T) {
}
type setupTransport struct {
- id discover.NodeID
- encHandshakeErr error
-
- phs *protoHandshake
+ pubkey *ecdsa.PublicKey
+ encHandshakeErr error
+ phs protoHandshake
protoHandshakeErr error
calls string
closeErr error
}
-func (c *setupTransport) doEncHandshake(prv *ecdsa.PrivateKey, dialDest *discover.Node) (discover.NodeID, error) {
+func (c *setupTransport) doEncHandshake(prv *ecdsa.PrivateKey, dialDest *ecdsa.PublicKey) (*ecdsa.PublicKey, error) {
c.calls += "doEncHandshake,"
- return c.id, c.encHandshakeErr
+ return c.pubkey, c.encHandshakeErr
}
+
func (c *setupTransport) doProtoHandshake(our *protoHandshake) (*protoHandshake, error) {
c.calls += "doProtoHandshake,"
if c.protoHandshakeErr != nil {
return nil, c.protoHandshakeErr
}
- return c.phs, nil
+ return &c.phs, nil
}
func (c *setupTransport) close(err error) {
c.calls += "close,"
@@ -510,7 +611,7 @@ func newkey() *ecdsa.PrivateKey {
return key
}
-func randomID() (id discover.NodeID) {
+func randomID() (id enode.ID) {
for i := range id {
id[i] = byte(rand.Intn(255))
}
diff --git a/p2p/simulations/adapters/docker.go b/p2p/simulations/adapters/docker.go
index 712ad0d895c6..dad46f34cac3 100644
--- a/p2p/simulations/adapters/docker.go
+++ b/p2p/simulations/adapters/docker.go
@@ -28,10 +28,14 @@ import (
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/node"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/docker/docker/pkg/reexec"
)
+var (
+ ErrLinuxOnly = errors.New("DockerAdapter can only be used on Linux as it uses the current binary (which must be a Linux binary)")
+)
+
// DockerAdapter is a NodeAdapter which runs simulation nodes inside Docker
// containers.
//
@@ -60,7 +64,7 @@ func NewDockerAdapter() (*DockerAdapter, error) {
return &DockerAdapter{
ExecAdapter{
- nodes: make(map[discover.NodeID]*ExecNode),
+ nodes: make(map[enode.ID]*ExecNode),
},
}, nil
}
diff --git a/p2p/simulations/adapters/exec.go b/p2p/simulations/adapters/exec.go
index 1ad3961fea8b..762223996cb0 100644
--- a/p2p/simulations/adapters/exec.go
+++ b/p2p/simulations/adapters/exec.go
@@ -38,7 +38,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/node"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/rpc"
"github.com/docker/docker/pkg/reexec"
"github.com/gorilla/websocket"
@@ -55,7 +55,7 @@ type ExecAdapter struct {
// simulation node are created.
BaseDir string
- nodes map[discover.NodeID]*ExecNode
+ nodes map[enode.ID]*ExecNode
}
// NewExecAdapter returns an ExecAdapter which stores node data in
@@ -63,7 +63,7 @@ type ExecAdapter struct {
func NewExecAdapter(baseDir string) *ExecAdapter {
return &ExecAdapter{
BaseDir: baseDir,
- nodes: make(map[discover.NodeID]*ExecNode),
+ nodes: make(map[enode.ID]*ExecNode),
}
}
@@ -123,7 +123,7 @@ func (e *ExecAdapter) NewNode(config *NodeConfig) (Node, error) {
// ExecNode starts a simulation node by exec'ing the current binary and
// running the configured services
type ExecNode struct {
- ID discover.NodeID
+ ID enode.ID
Dir string
Config *execNodeConfig
Cmd *exec.Cmd
@@ -504,7 +504,7 @@ type wsRPCDialer struct {
// DialRPC implements the RPCDialer interface by creating a WebSocket RPC
// client of the given node
-func (w *wsRPCDialer) DialRPC(id discover.NodeID) (*rpc.Client, error) {
+func (w *wsRPCDialer) DialRPC(id enode.ID) (*rpc.Client, error) {
addr, ok := w.addrs[id.String()]
if !ok {
return nil, fmt.Errorf("unknown node: %s", id)
diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go
index fce627d90605..585cd0c40354 100644
--- a/p2p/simulations/adapters/inproc.go
+++ b/p2p/simulations/adapters/inproc.go
@@ -27,7 +27,8 @@ import (
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/node"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/p2p/simulations/pipes"
"github.com/XinFinOrg/XDPoSChain/rpc"
"github.com/gorilla/websocket"
)
@@ -35,8 +36,9 @@ import (
// SimAdapter is a NodeAdapter which creates in-memory simulation nodes and
// connects them using in-memory net.Pipe connections
type SimAdapter struct {
+ pipe func() (net.Conn, net.Conn, error)
mtx sync.RWMutex
- nodes map[discover.NodeID]*SimNode
+ nodes map[enode.ID]*SimNode
services map[string]ServiceFunc
}
@@ -45,7 +47,16 @@ type SimAdapter struct {
// particular node are passed to the NewNode function in the NodeConfig)
func NewSimAdapter(services map[string]ServiceFunc) *SimAdapter {
return &SimAdapter{
- nodes: make(map[discover.NodeID]*SimNode),
+ pipe: pipes.NetPipe,
+ nodes: make(map[enode.ID]*SimNode),
+ services: services,
+ }
+}
+
+func NewTCPAdapter(services map[string]ServiceFunc) *SimAdapter {
+ return &SimAdapter{
+ pipe: pipes.TCPPipe,
+ nodes: make(map[enode.ID]*SimNode),
services: services,
}
}
@@ -92,40 +103,35 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) {
}
simNode := &SimNode{
- ID: id,
- config: config,
- node: n,
- adapter: s,
- running: make(map[string]node.Service),
- connected: make(map[discover.NodeID]bool),
+ ID: id,
+ config: config,
+ node: n,
+ adapter: s,
+ running: make(map[string]node.Service),
}
s.nodes[id] = simNode
return simNode, nil
}
// Dial implements the p2p.NodeDialer interface by connecting to the node using
-// an in-memory net.Pipe connection
-func (s *SimAdapter) Dial(dest *discover.Node) (conn net.Conn, err error) {
- node, ok := s.GetNode(dest.ID)
+// an in-memory net.Pipe
+func (s *SimAdapter) Dial(dest *enode.Node) (conn net.Conn, err error) {
+ node, ok := s.GetNode(dest.ID())
if !ok {
- return nil, fmt.Errorf("unknown node: %s", dest.ID)
- }
- if node.connected[dest.ID] {
- return nil, fmt.Errorf("dialed node: %s", dest.ID)
+ return nil, fmt.Errorf("unknown node: %s", dest.ID())
}
srv := node.Server()
if srv == nil {
- return nil, fmt.Errorf("node not running: %s", dest.ID)
+ return nil, fmt.Errorf("node not running: %s", dest.ID())
}
pipe1, pipe2 := net.Pipe()
go srv.SetupConn(pipe1, 0, nil)
- node.connected[dest.ID] = true
return pipe2, nil
}
// DialRPC implements the RPCDialer interface by creating an in-memory RPC
// client of the given node
-func (s *SimAdapter) DialRPC(id discover.NodeID) (*rpc.Client, error) {
+func (s *SimAdapter) DialRPC(id enode.ID) (*rpc.Client, error) {
node, ok := s.GetNode(id)
if !ok {
return nil, fmt.Errorf("unknown node: %s", id)
@@ -138,7 +144,7 @@ func (s *SimAdapter) DialRPC(id discover.NodeID) (*rpc.Client, error) {
}
// GetNode returns the node with the given ID if it exists
-func (s *SimAdapter) GetNode(id discover.NodeID) (*SimNode, bool) {
+func (s *SimAdapter) GetNode(id enode.ID) (*SimNode, bool) {
s.mtx.RLock()
defer s.mtx.RUnlock()
node, ok := s.nodes[id]
@@ -150,14 +156,13 @@ func (s *SimAdapter) GetNode(id discover.NodeID) (*SimNode, bool) {
// protocols directly over that pipe
type SimNode struct {
lock sync.RWMutex
- ID discover.NodeID
+ ID enode.ID
config *NodeConfig
adapter *SimAdapter
node *node.Node
running map[string]node.Service
client *rpc.Client
registerOnce sync.Once
- connected map[discover.NodeID]bool
}
// Addr returns the node's discovery address
@@ -165,9 +170,9 @@ func (self *SimNode) Addr() []byte {
return []byte(self.Node().String())
}
-// Node returns a discover.Node representing the SimNode
-func (self *SimNode) Node() *discover.Node {
- return discover.NewNode(self.ID, net.IP{127, 0, 0, 1}, 30303, 30303)
+// Node returns a node descriptor representing the SimNode
+func (sn *SimNode) Node() *enode.Node {
+ return sn.config.Node()
}
// Client returns an rpc.Client which can be used to communicate with the
diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go
index 0d7cf62ca406..d947d2b07d7e 100644
--- a/p2p/simulations/adapters/types.go
+++ b/p2p/simulations/adapters/types.go
@@ -21,12 +21,14 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
+ "net"
"os"
+ "strconv"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/node"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/rpc"
"github.com/docker/docker/pkg/reexec"
"github.com/gorilla/websocket"
@@ -38,7 +40,6 @@ import (
// * SimNode - An in-memory node
// * ExecNode - A child process node
// * DockerNode - A Docker container node
-//
type Node interface {
// Addr returns the node's address (e.g. an Enode URL)
Addr() []byte
@@ -77,7 +78,7 @@ type NodeAdapter interface {
type NodeConfig struct {
// ID is the node's ID which is used to identify the node in the
// simulation network
- ID discover.NodeID
+ ID enode.ID
// PrivateKey is the node's private key which is used by the devp2p
// stack to encrypt communications
@@ -96,7 +97,9 @@ type NodeConfig struct {
Services []string
// function to sanction or prevent suggesting a peer
- Reachable func(id discover.NodeID) bool
+ Reachable func(id enode.ID) bool
+
+ Port uint16
}
// nodeConfigJSON is used to encode and decode NodeConfig as JSON by encoding
@@ -131,11 +134,9 @@ func (n *NodeConfig) UnmarshalJSON(data []byte) error {
}
if confJSON.ID != "" {
- nodeID, err := discover.HexID(confJSON.ID)
- if err != nil {
+ if err := n.ID.UnmarshalText([]byte(confJSON.ID)); err != nil {
return err
}
- n.ID = nodeID
}
if confJSON.PrivateKey != "" {
@@ -156,20 +157,49 @@ func (n *NodeConfig) UnmarshalJSON(data []byte) error {
return nil
}
+// Node returns the node descriptor represented by the config.
+func (n *NodeConfig) Node() *enode.Node {
+ return enode.NewV4(&n.PrivateKey.PublicKey, net.IP{127, 0, 0, 1}, int(n.Port), int(n.Port))
+}
+
// RandomNodeConfig returns node configuration with a randomly generated ID and
// PrivateKey
func RandomNodeConfig() *NodeConfig {
- key, err := crypto.GenerateKey()
+ prvkey, err := crypto.GenerateKey()
if err != nil {
panic("unable to generate key")
}
- var id discover.NodeID
- pubkey := crypto.FromECDSAPub(&key.PublicKey)
- copy(id[:], pubkey[1:])
+
+ port, err := assignTCPPort()
+ if err != nil {
+ panic("unable to assign tcp port")
+ }
+
+ enodId := enode.PubkeyToIDV4(&prvkey.PublicKey)
return &NodeConfig{
- ID: id,
- PrivateKey: key,
+ PrivateKey: prvkey,
+ ID: enodId,
+ Name: fmt.Sprintf("node_%s", enodId.String()),
+ Port: port,
+ EnableMsgEvents: true,
+ }
+}
+
+func assignTCPPort() (uint16, error) {
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return 0, err
+ }
+ l.Close()
+ _, port, err := net.SplitHostPort(l.Addr().String())
+ if err != nil {
+ return 0, err
+ }
+ p, err := strconv.ParseInt(port, 10, 32)
+ if err != nil {
+ return 0, err
}
+ return uint16(p), nil
}
// ServiceContext is a collection of options and methods which can be utilised
@@ -186,7 +216,7 @@ type ServiceContext struct {
// other nodes in the network (for example a simulated Swarm node which needs
// to connect to a Geth node to resolve ENS names)
type RPCDialer interface {
- DialRPC(id discover.NodeID) (*rpc.Client, error)
+ DialRPC(id enode.ID) (*rpc.Client, error)
}
// Services is a collection of services which can be run in a simulation
diff --git a/p2p/simulations/examples/ping-pong.go b/p2p/simulations/examples/ping-pong.go
index 488abfb8208d..a15c82be7ddb 100644
--- a/p2p/simulations/examples/ping-pong.go
+++ b/p2p/simulations/examples/ping-pong.go
@@ -28,7 +28,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/node"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
"github.com/XinFinOrg/XDPoSChain/rpc"
@@ -96,12 +96,12 @@ func main() {
// sends a ping to all its connected peers every 10s and receives a pong in
// return
type pingPongService struct {
- id discover.NodeID
+ id enode.ID
log log.Logger
received int64
}
-func newPingPongService(id discover.NodeID) *pingPongService {
+func newPingPongService(id enode.ID) *pingPongService {
return &pingPongService{
id: id,
log: log.New("node.id", id),
diff --git a/p2p/simulations/http.go b/p2p/simulations/http.go
index 7e3aa5c96e1a..b27124d8c2eb 100644
--- a/p2p/simulations/http.go
+++ b/p2p/simulations/http.go
@@ -30,7 +30,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
"github.com/XinFinOrg/XDPoSChain/rpc"
"github.com/gorilla/websocket"
@@ -711,8 +711,9 @@ func (s *Server) wrapHandler(handler http.HandlerFunc) httprouter.Handle {
ctx := context.Background()
if id := params.ByName("nodeid"); id != "" {
+ var nodeID enode.ID
var node *Node
- if nodeID, err := discover.HexID(id); err == nil {
+ if nodeID.UnmarshalText([]byte(id)) == nil {
node = s.network.GetNode(nodeID)
} else {
node = s.network.GetNodeByName(id)
@@ -725,8 +726,9 @@ func (s *Server) wrapHandler(handler http.HandlerFunc) httprouter.Handle {
}
if id := params.ByName("peerid"); id != "" {
+ var peerID enode.ID
var peer *Node
- if peerID, err := discover.HexID(id); err == nil {
+ if peerID.UnmarshalText([]byte(id)) == nil {
peer = s.network.GetNode(peerID)
} else {
peer = s.network.GetNodeByName(id)
diff --git a/p2p/simulations/http_test.go b/p2p/simulations/http_test.go
index 2bc2e0fecc78..b38cfcd29d38 100644
--- a/p2p/simulations/http_test.go
+++ b/p2p/simulations/http_test.go
@@ -30,7 +30,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/node"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
"github.com/XinFinOrg/XDPoSChain/rpc"
)
@@ -38,12 +38,12 @@ import (
// testService implements the node.Service interface and provides protocols
// and APIs which are useful for testing nodes in a simulation network
type testService struct {
- id discover.NodeID
+ id enode.ID
// peerCount is incremented once a peer handshake has been performed
peerCount int64
- peers map[discover.NodeID]*testPeer
+ peers map[enode.ID]*testPeer
peersMtx sync.Mutex
// state stores []byte which is used to test creating and loading
@@ -54,7 +54,7 @@ type testService struct {
func newTestService(ctx *adapters.ServiceContext) (node.Service, error) {
svc := &testService{
id: ctx.Config.ID,
- peers: make(map[discover.NodeID]*testPeer),
+ peers: make(map[enode.ID]*testPeer),
}
svc.state.Store(ctx.Snapshot)
return svc, nil
@@ -65,7 +65,7 @@ type testPeer struct {
dumReady chan struct{}
}
-func (t *testService) peer(id discover.NodeID) *testPeer {
+func (t *testService) peer(id enode.ID) *testPeer {
t.peersMtx.Lock()
defer t.peersMtx.Unlock()
if peer, ok := t.peers[id]; ok {
@@ -350,7 +350,8 @@ func startTestNetwork(t *testing.T, client *Client) []string {
nodeCount := 2
nodeIDs := make([]string, nodeCount)
for i := 0; i < nodeCount; i++ {
- node, err := client.CreateNode(nil)
+ config := adapters.RandomNodeConfig()
+ node, err := client.CreateNode(config)
if err != nil {
t.Fatalf("error creating node: %s", err)
}
@@ -411,7 +412,7 @@ func (t *expectEvents) nodeEvent(id string, up bool) *Event {
Type: EventTypeNode,
Node: &Node{
Config: &adapters.NodeConfig{
- ID: discover.MustHexID(id),
+ ID: enode.HexID(id),
},
Up: up,
},
@@ -422,8 +423,8 @@ func (t *expectEvents) connEvent(one, other string, up bool) *Event {
return &Event{
Type: EventTypeConn,
Conn: &Conn{
- One: discover.MustHexID(one),
- Other: discover.MustHexID(other),
+ One: enode.HexID(one),
+ Other: enode.HexID(other),
Up: up,
},
}
@@ -529,7 +530,8 @@ func TestHTTPNodeRPC(t *testing.T) {
// start a node in the network
client := NewClient(s.URL)
- node, err := client.CreateNode(nil)
+ config := adapters.RandomNodeConfig()
+ node, err := client.CreateNode(config)
if err != nil {
t.Fatalf("error creating node: %s", err)
}
@@ -591,7 +593,8 @@ func TestHTTPSnapshot(t *testing.T) {
nodeCount := 2
nodes := make([]*p2p.NodeInfo, nodeCount)
for i := 0; i < nodeCount; i++ {
- node, err := client.CreateNode(nil)
+ config := adapters.RandomNodeConfig()
+ node, err := client.CreateNode(config)
if err != nil {
t.Fatalf("error creating node: %s", err)
}
diff --git a/p2p/simulations/mocker.go b/p2p/simulations/mocker.go
index 24900d083197..6f48a6c70d50 100644
--- a/p2p/simulations/mocker.go
+++ b/p2p/simulations/mocker.go
@@ -25,23 +25,24 @@ import (
"time"
"github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
+ "github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
)
-//a map of mocker names to its function
+// a map of mocker names to its function
var mockerList = map[string]func(net *Network, quit chan struct{}, nodeCount int){
"startStop": startStop,
"probabilistic": probabilistic,
"boot": boot,
}
-//Lookup a mocker by its name, returns the mockerFn
+// Lookup a mocker by its name, returns the mockerFn
func LookupMocker(mockerType string) func(net *Network, quit chan struct{}, nodeCount int) {
return mockerList[mockerType]
}
-//Get a list of mockers (keys of the map)
-//Useful for frontend to build available mocker selection
+// Get a list of mockers (keys of the map)
+// Useful for frontend to build available mocker selection
func GetMockerList() []string {
list := make([]string, 0, len(mockerList))
for k := range mockerList {
@@ -50,7 +51,7 @@ func GetMockerList() []string {
return list
}
-//The boot mockerFn only connects the node in a ring and doesn't do anything else
+// The boot mockerFn only connects the node in a ring and doesn't do anything else
func boot(net *Network, quit chan struct{}, nodeCount int) {
_, err := connectNodesInRing(net, nodeCount)
if err != nil {
@@ -58,7 +59,7 @@ func boot(net *Network, quit chan struct{}, nodeCount int) {
}
}
-//The startStop mockerFn stops and starts nodes in a defined period (ticker)
+// The startStop mockerFn stops and starts nodes in a defined period (ticker)
func startStop(net *Network, quit chan struct{}, nodeCount int) {
nodes, err := connectNodesInRing(net, nodeCount)
if err != nil {
@@ -95,10 +96,10 @@ func startStop(net *Network, quit chan struct{}, nodeCount int) {
}
}
-//The probabilistic mocker func has a more probabilistic pattern
-//(the implementation could probably be improved):
-//nodes are connected in a ring, then a varying number of random nodes is selected,
-//mocker then stops and starts them in random intervals, and continues the loop
+// The probabilistic mocker func has a more probabilistic pattern
+// (the implementation could probably be improved):
+// nodes are connected in a ring, then a varying number of random nodes is selected,
+// mocker then stops and starts them in random intervals, and continues the loop
func probabilistic(net *Network, quit chan struct{}, nodeCount int) {
nodes, err := connectNodesInRing(net, nodeCount)
if err != nil {
@@ -147,7 +148,7 @@ func probabilistic(net *Network, quit chan struct{}, nodeCount int) {
wg.Done()
continue
}
- go func(id discover.NodeID) {
+ go func(id enode.ID) {
time.Sleep(randWait)
err := net.Start(id)
if err != nil {
@@ -161,11 +162,12 @@ func probabilistic(net *Network, quit chan struct{}, nodeCount int) {
}
-//connect nodeCount number of nodes in a ring
-func connectNodesInRing(net *Network, nodeCount int) ([]discover.NodeID, error) {
- ids := make([]discover.NodeID, nodeCount)
+// connect nodeCount number of nodes in a ring
+func connectNodesInRing(net *Network, nodeCount int) ([]enode.ID, error) {
+ ids := make([]enode.ID, nodeCount)
for i := 0; i < nodeCount; i++ {
- node, err := net.NewNode()
+ conf := adapters.RandomNodeConfig()
+ node, err := net.NewNodeWithConfig(conf)
if err != nil {
log.Error("Error creating a node! %s", err)
return nil, err
diff --git a/p2p/simulations/mocker_test.go b/p2p/simulations/mocker_test.go
index becde298354c..70a945c802c5 100644
--- a/p2p/simulations/mocker_test.go
+++ b/p2p/simulations/mocker_test.go
@@ -27,7 +27,7 @@ import (
"testing"
"time"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
)
func TestMocker(t *testing.T) {
@@ -82,7 +82,7 @@ func TestMocker(t *testing.T) {
defer sub.Unsubscribe()
//wait until all nodes are started and connected
//store every node up event in a map (value is irrelevant, mimic Set datatype)
- nodemap := make(map[discover.NodeID]bool)
+ nodemap := make(map[enode.ID]bool)
wg.Add(1)
nodesComplete := false
connCount := 0
diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go
index 5be5697da332..42727e7cee7e 100644
--- a/p2p/simulations/network.go
+++ b/p2p/simulations/network.go
@@ -27,11 +27,11 @@ import (
"github.com/XinFinOrg/XDPoSChain/event"
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
)
-var dialBanTimeout = 200 * time.Millisecond
+var DialBanTimeout = 200 * time.Millisecond
// NetworkConfig defines configuration options for starting a Network
type NetworkConfig struct {
@@ -51,7 +51,7 @@ type Network struct {
NetworkConfig
Nodes []*Node `json:"nodes"`
- nodeMap map[discover.NodeID]int
+ nodeMap map[enode.ID]int
Conns []*Conn `json:"conns"`
connMap map[string]int
@@ -67,64 +67,48 @@ func NewNetwork(nodeAdapter adapters.NodeAdapter, conf *NetworkConfig) *Network
return &Network{
NetworkConfig: *conf,
nodeAdapter: nodeAdapter,
- nodeMap: make(map[discover.NodeID]int),
+ nodeMap: make(map[enode.ID]int),
connMap: make(map[string]int),
quitc: make(chan struct{}),
}
}
// Events returns the output event feed of the Network.
-func (self *Network) Events() *event.Feed {
- return &self.events
-}
-
-// NewNode adds a new node to the network with a random ID
-func (self *Network) NewNode() (*Node, error) {
- conf := adapters.RandomNodeConfig()
- conf.Services = []string{self.DefaultService}
- return self.NewNodeWithConfig(conf)
+func (net *Network) Events() *event.Feed {
+ return &net.events
}
// NewNodeWithConfig adds a new node to the network with the given config,
// returning an error if a node with the same ID or name already exists
-func (self *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) {
- self.lock.Lock()
- defer self.lock.Unlock()
+func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) {
+ net.lock.Lock()
+ defer net.lock.Unlock()
- // create a random ID and PrivateKey if not set
- if conf.ID == (discover.NodeID{}) {
- c := adapters.RandomNodeConfig()
- conf.ID = c.ID
- conf.PrivateKey = c.PrivateKey
- }
- id := conf.ID
if conf.Reachable == nil {
- conf.Reachable = func(otherID discover.NodeID) bool {
- _, err := self.InitConn(conf.ID, otherID)
- return err == nil
+ conf.Reachable = func(otherID enode.ID) bool {
+ _, err := net.InitConn(conf.ID, otherID)
+ if err != nil && bytes.Compare(conf.ID.Bytes(), otherID.Bytes()) < 0 {
+ return false
+ }
+ return true
}
}
- // assign a name to the node if not set
- if conf.Name == "" {
- conf.Name = fmt.Sprintf("node%02d", len(self.Nodes)+1)
- }
-
// check the node doesn't already exist
- if node := self.getNode(id); node != nil {
- return nil, fmt.Errorf("node with ID %q already exists", id)
+ if node := net.getNode(conf.ID); node != nil {
+ return nil, fmt.Errorf("node with ID %q already exists", conf.ID)
}
- if node := self.getNodeByName(conf.Name); node != nil {
+ if node := net.getNodeByName(conf.Name); node != nil {
return nil, fmt.Errorf("node with name %q already exists", conf.Name)
}
// if no services are configured, use the default service
if len(conf.Services) == 0 {
- conf.Services = []string{self.DefaultService}
+ conf.Services = []string{net.DefaultService}
}
// use the NodeAdapter to create the node
- adapterNode, err := self.nodeAdapter.NewNode(conf)
+ adapterNode, err := net.nodeAdapter.NewNode(conf)
if err != nil {
return nil, err
}
@@ -132,28 +116,28 @@ func (self *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error)
Node: adapterNode,
Config: conf,
}
- log.Trace(fmt.Sprintf("node %v created", id))
- self.nodeMap[id] = len(self.Nodes)
- self.Nodes = append(self.Nodes, node)
+ log.Trace(fmt.Sprintf("node %v created", conf.ID))
+ net.nodeMap[conf.ID] = len(net.Nodes)
+ net.Nodes = append(net.Nodes, node)
// emit a "control" event
- self.events.Send(ControlEvent(node))
+ net.events.Send(ControlEvent(node))
return node, nil
}
// Config returns the network configuration
-func (self *Network) Config() *NetworkConfig {
- return &self.NetworkConfig
+func (net *Network) Config() *NetworkConfig {
+ return &net.NetworkConfig
}
// StartAll starts all nodes in the network
-func (self *Network) StartAll() error {
- for _, node := range self.Nodes {
+func (net *Network) StartAll() error {
+ for _, node := range net.Nodes {
if node.Up {
continue
}
- if err := self.Start(node.ID()); err != nil {
+ if err := net.Start(node.ID()); err != nil {
return err
}
}
@@ -161,12 +145,12 @@ func (self *Network) StartAll() error {
}
// StopAll stops all nodes in the network
-func (self *Network) StopAll() error {
- for _, node := range self.Nodes {
+func (net *Network) StopAll() error {
+ for _, node := range net.Nodes {
if !node.Up {
continue
}
- if err := self.Stop(node.ID()); err != nil {
+ if err := net.Stop(node.ID()); err != nil {
return err
}
}
@@ -174,21 +158,23 @@ func (self *Network) StopAll() error {
}
// Start starts the node with the given ID
-func (self *Network) Start(id discover.NodeID) error {
- return self.startWithSnapshots(id, nil)
+func (net *Network) Start(id enode.ID) error {
+ return net.startWithSnapshots(id, nil)
}
// startWithSnapshots starts the node with the given ID using the give
// snapshots
-func (self *Network) startWithSnapshots(id discover.NodeID, snapshots map[string][]byte) error {
- node := self.GetNode(id)
+func (net *Network) startWithSnapshots(id enode.ID, snapshots map[string][]byte) error {
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ node := net.getNode(id)
if node == nil {
return fmt.Errorf("node %v does not exist", id)
}
if node.Up {
return fmt.Errorf("node %v already up", id)
}
- log.Trace(fmt.Sprintf("starting node %v: %v using %v", id, node.Up, self.nodeAdapter.Name()))
+ log.Trace(fmt.Sprintf("starting node %v: %v using %v", id, node.Up, net.nodeAdapter.Name()))
if err := node.Start(snapshots); err != nil {
log.Warn(fmt.Sprintf("start up failed: %v", err))
return err
@@ -196,7 +182,7 @@ func (self *Network) startWithSnapshots(id discover.NodeID, snapshots map[string
node.Up = true
log.Info(fmt.Sprintf("started node %v: %v", id, node.Up))
- self.events.Send(NewEvent(node))
+ net.events.Send(NewEvent(node))
// subscribe to peer events
client, err := node.Client()
@@ -208,22 +194,26 @@ func (self *Network) startWithSnapshots(id discover.NodeID, snapshots map[string
if err != nil {
return fmt.Errorf("error getting peer events for node %v: %s", id, err)
}
- go self.watchPeerEvents(id, events, sub)
+ go net.watchPeerEvents(id, events, sub)
return nil
}
// watchPeerEvents reads peer events from the given channel and emits
// corresponding network events
-func (self *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEvent, sub event.Subscription) {
+func (net *Network) watchPeerEvents(id enode.ID, events chan *p2p.PeerEvent, sub event.Subscription) {
defer func() {
sub.Unsubscribe()
// assume the node is now down
- self.lock.Lock()
- node := self.getNode(id)
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ node := net.getNode(id)
+ if node == nil {
+ log.Error("Can not find node for id", "id", id)
+ return
+ }
node.Up = false
- self.lock.Unlock()
- self.events.Send(NewEvent(node))
+ net.events.Send(NewEvent(node))
}()
for {
select {
@@ -235,16 +225,16 @@ func (self *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEv
switch event.Type {
case p2p.PeerEventTypeAdd:
- self.DidConnect(id, peer)
+ net.DidConnect(id, peer)
case p2p.PeerEventTypeDrop:
- self.DidDisconnect(id, peer)
+ net.DidDisconnect(id, peer)
case p2p.PeerEventTypeMsgSend:
- self.DidSend(id, peer, event.Protocol, *event.MsgCode)
+ net.DidSend(id, peer, event.Protocol, *event.MsgCode)
case p2p.PeerEventTypeMsgRecv:
- self.DidReceive(peer, id, event.Protocol, *event.MsgCode)
+ net.DidReceive(peer, id, event.Protocol, *event.MsgCode)
}
@@ -258,8 +248,10 @@ func (self *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEv
}
// Stop stops the node with the given ID
-func (self *Network) Stop(id discover.NodeID) error {
- node := self.GetNode(id)
+func (net *Network) Stop(id enode.ID) error {
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ node := net.getNode(id)
if node == nil {
return fmt.Errorf("node %v does not exist", id)
}
@@ -272,15 +264,15 @@ func (self *Network) Stop(id discover.NodeID) error {
node.Up = false
log.Info(fmt.Sprintf("stop node %v: %v", id, node.Up))
- self.events.Send(ControlEvent(node))
+ net.events.Send(ControlEvent(node))
return nil
}
// Connect connects two nodes together by calling the "admin_addPeer" RPC
// method on the "one" node so that it connects to the "other" node
-func (self *Network) Connect(oneID, otherID discover.NodeID) error {
+func (net *Network) Connect(oneID, otherID enode.ID) error {
log.Debug(fmt.Sprintf("connecting %s to %s", oneID, otherID))
- conn, err := self.InitConn(oneID, otherID)
+ conn, err := net.InitConn(oneID, otherID)
if err != nil {
return err
}
@@ -288,14 +280,14 @@ func (self *Network) Connect(oneID, otherID discover.NodeID) error {
if err != nil {
return err
}
- self.events.Send(ControlEvent(conn))
+ net.events.Send(ControlEvent(conn))
return client.Call(nil, "admin_addPeer", string(conn.other.Addr()))
}
// Disconnect disconnects two nodes by calling the "admin_removePeer" RPC
// method on the "one" node so that it disconnects from the "other" node
-func (self *Network) Disconnect(oneID, otherID discover.NodeID) error {
- conn := self.GetConn(oneID, otherID)
+func (net *Network) Disconnect(oneID, otherID enode.ID) error {
+ conn := net.GetConn(oneID, otherID)
if conn == nil {
return fmt.Errorf("connection between %v and %v does not exist", oneID, otherID)
}
@@ -306,13 +298,15 @@ func (self *Network) Disconnect(oneID, otherID discover.NodeID) error {
if err != nil {
return err
}
- self.events.Send(ControlEvent(conn))
+ net.events.Send(ControlEvent(conn))
return client.Call(nil, "admin_removePeer", string(conn.other.Addr()))
}
// DidConnect tracks the fact that the "one" node connected to the "other" node
-func (self *Network) DidConnect(one, other discover.NodeID) error {
- conn, err := self.GetOrCreateConn(one, other)
+func (net *Network) DidConnect(one, other enode.ID) error {
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ conn, err := net.getOrCreateConn(one, other)
if err != nil {
return fmt.Errorf("connection between %v and %v does not exist", one, other)
}
@@ -320,14 +314,16 @@ func (self *Network) DidConnect(one, other discover.NodeID) error {
return fmt.Errorf("%v and %v already connected", one, other)
}
conn.Up = true
- self.events.Send(NewEvent(conn))
+ net.events.Send(NewEvent(conn))
return nil
}
// DidDisconnect tracks the fact that the "one" node disconnected from the
// "other" node
-func (self *Network) DidDisconnect(one, other discover.NodeID) error {
- conn := self.GetConn(one, other)
+func (net *Network) DidDisconnect(one, other enode.ID) error {
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ conn := net.getConn(one, other)
if conn == nil {
return fmt.Errorf("connection between %v and %v does not exist", one, other)
}
@@ -335,13 +331,13 @@ func (self *Network) DidDisconnect(one, other discover.NodeID) error {
return fmt.Errorf("%v and %v already disconnected", one, other)
}
conn.Up = false
- conn.initiated = time.Now().Add(-dialBanTimeout)
- self.events.Send(NewEvent(conn))
+ conn.initiated = time.Now().Add(-DialBanTimeout)
+ net.events.Send(NewEvent(conn))
return nil
}
// DidSend tracks the fact that "sender" sent a message to "receiver"
-func (self *Network) DidSend(sender, receiver discover.NodeID, proto string, code uint64) error {
+func (net *Network) DidSend(sender, receiver enode.ID, proto string, code uint64) error {
msg := &Msg{
One: sender,
Other: receiver,
@@ -349,12 +345,12 @@ func (self *Network) DidSend(sender, receiver discover.NodeID, proto string, cod
Code: code,
Received: false,
}
- self.events.Send(NewEvent(msg))
+ net.events.Send(NewEvent(msg))
return nil
}
// DidReceive tracks the fact that "receiver" received a message from "sender"
-func (self *Network) DidReceive(sender, receiver discover.NodeID, proto string, code uint64) error {
+func (net *Network) DidReceive(sender, receiver enode.ID, proto string, code uint64) error {
msg := &Msg{
One: sender,
Other: receiver,
@@ -362,36 +358,45 @@ func (self *Network) DidReceive(sender, receiver discover.NodeID, proto string,
Code: code,
Received: true,
}
- self.events.Send(NewEvent(msg))
+ net.events.Send(NewEvent(msg))
return nil
}
// GetNode gets the node with the given ID, returning nil if the node does not
// exist
-func (self *Network) GetNode(id discover.NodeID) *Node {
- self.lock.Lock()
- defer self.lock.Unlock()
- return self.getNode(id)
+func (net *Network) GetNode(id enode.ID) *Node {
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ return net.getNode(id)
}
// GetNode gets the node with the given name, returning nil if the node does
// not exist
-func (self *Network) GetNodeByName(name string) *Node {
- self.lock.Lock()
- defer self.lock.Unlock()
- return self.getNodeByName(name)
+func (net *Network) GetNodeByName(name string) *Node {
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ return net.getNodeByName(name)
}
-func (self *Network) getNode(id discover.NodeID) *Node {
- i, found := self.nodeMap[id]
+// GetNodes returns the existing nodes
+func (net *Network) GetNodes() (nodes []*Node) {
+ net.lock.Lock()
+ defer net.lock.Unlock()
+
+ nodes = append(nodes, net.Nodes...)
+ return nodes
+}
+
+func (net *Network) getNode(id enode.ID) *Node {
+ i, found := net.nodeMap[id]
if !found {
return nil
}
- return self.Nodes[i]
+ return net.Nodes[i]
}
-func (self *Network) getNodeByName(name string) *Node {
- for _, node := range self.Nodes {
+func (net *Network) getNodeByName(name string) *Node {
+ for _, node := range net.Nodes {
if node.Config.Name == name {
return node
}
@@ -399,41 +404,32 @@ func (self *Network) getNodeByName(name string) *Node {
return nil
}
-// GetNodes returns the existing nodes
-func (self *Network) GetNodes() (nodes []*Node) {
- self.lock.Lock()
- defer self.lock.Unlock()
-
- nodes = append(nodes, self.Nodes...)
- return nodes
-}
-
// GetConn returns the connection which exists between "one" and "other"
// regardless of which node initiated the connection
-func (self *Network) GetConn(oneID, otherID discover.NodeID) *Conn {
- self.lock.Lock()
- defer self.lock.Unlock()
- return self.getConn(oneID, otherID)
+func (net *Network) GetConn(oneID, otherID enode.ID) *Conn {
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ return net.getConn(oneID, otherID)
}
// GetOrCreateConn is like GetConn but creates the connection if it doesn't
// already exist
-func (self *Network) GetOrCreateConn(oneID, otherID discover.NodeID) (*Conn, error) {
- self.lock.Lock()
- defer self.lock.Unlock()
- return self.getOrCreateConn(oneID, otherID)
+func (net *Network) GetOrCreateConn(oneID, otherID enode.ID) (*Conn, error) {
+ net.lock.Lock()
+ defer net.lock.Unlock()
+ return net.getOrCreateConn(oneID, otherID)
}
-func (self *Network) getOrCreateConn(oneID, otherID discover.NodeID) (*Conn, error) {
- if conn := self.getConn(oneID, otherID); conn != nil {
+func (net *Network) getOrCreateConn(oneID, otherID enode.ID) (*Conn, error) {
+ if conn := net.getConn(oneID, otherID); conn != nil {
return conn, nil
}
- one := self.getNode(oneID)
+ one := net.getNode(oneID)
if one == nil {
return nil, fmt.Errorf("node %v does not exist", oneID)
}
- other := self.getNode(otherID)
+ other := net.getNode(otherID)
if other == nil {
return nil, fmt.Errorf("node %v does not exist", otherID)
}
@@ -444,18 +440,18 @@ func (self *Network) getOrCreateConn(oneID, otherID discover.NodeID) (*Conn, err
other: other,
}
label := ConnLabel(oneID, otherID)
- self.connMap[label] = len(self.Conns)
- self.Conns = append(self.Conns, conn)
+ net.connMap[label] = len(net.Conns)
+ net.Conns = append(net.Conns, conn)
return conn, nil
}
-func (self *Network) getConn(oneID, otherID discover.NodeID) *Conn {
+func (net *Network) getConn(oneID, otherID enode.ID) *Conn {
label := ConnLabel(oneID, otherID)
- i, found := self.connMap[label]
+ i, found := net.connMap[label]
if !found {
return nil
}
- return self.Conns[i]
+ return net.Conns[i]
}
// InitConn(one, other) retrieves the connectiton model for the connection between
@@ -466,53 +462,56 @@ func (self *Network) getConn(oneID, otherID discover.NodeID) *Conn {
// it also checks whether there has been recent attempt to connect the peers
// this is cheating as the simulation is used as an oracle and know about
// remote peers attempt to connect to a node which will then not initiate the connection
-func (self *Network) InitConn(oneID, otherID discover.NodeID) (*Conn, error) {
- self.lock.Lock()
- defer self.lock.Unlock()
+func (net *Network) InitConn(oneID, otherID enode.ID) (*Conn, error) {
+ net.lock.Lock()
+ defer net.lock.Unlock()
if oneID == otherID {
return nil, fmt.Errorf("refusing to connect to self %v", oneID)
}
- conn, err := self.getOrCreateConn(oneID, otherID)
+ conn, err := net.getOrCreateConn(oneID, otherID)
if err != nil {
return nil, err
}
- if time.Since(conn.initiated) < dialBanTimeout {
- return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID)
- }
if conn.Up {
return nil, fmt.Errorf("%v and %v already connected", oneID, otherID)
}
+ if time.Since(conn.initiated) < DialBanTimeout {
+ return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID)
+ }
+
err = conn.nodesUp()
if err != nil {
+ log.Trace(fmt.Sprintf("nodes not up: %v", err))
return nil, fmt.Errorf("nodes not up: %v", err)
}
+ log.Debug("InitConn - connection initiated")
conn.initiated = time.Now()
return conn, nil
}
// Shutdown stops all nodes in the network and closes the quit channel
-func (self *Network) Shutdown() {
- for _, node := range self.Nodes {
+func (net *Network) Shutdown() {
+ for _, node := range net.Nodes {
log.Debug(fmt.Sprintf("stopping node %s", node.ID().TerminalString()))
if err := node.Stop(); err != nil {
log.Warn(fmt.Sprintf("error stopping node %s", node.ID().TerminalString()), "err", err)
}
}
- close(self.quitc)
+ close(net.quitc)
}
-//Reset resets all network properties:
-//emtpies the nodes and the connection list
-func (self *Network) Reset() {
- self.lock.Lock()
- defer self.lock.Unlock()
+// Reset resets all network properties:
+// emtpies the nodes and the connection list
+func (net *Network) Reset() {
+ net.lock.Lock()
+ defer net.lock.Unlock()
//re-initialize the maps
- self.connMap = make(map[string]int)
- self.nodeMap = make(map[discover.NodeID]int)
+ net.connMap = make(map[string]int)
+ net.nodeMap = make(map[enode.ID]int)
- self.Nodes = nil
- self.Conns = nil
+ net.Nodes = nil
+ net.Conns = nil
}
// Node is a wrapper around adapters.Node which is used to track the status
@@ -528,47 +527,47 @@ type Node struct {
}
// ID returns the ID of the node
-func (self *Node) ID() discover.NodeID {
- return self.Config.ID
+func (n *Node) ID() enode.ID {
+ return n.Config.ID
}
// String returns a log-friendly string
-func (self *Node) String() string {
- return fmt.Sprintf("Node %v", self.ID().TerminalString())
+func (n *Node) String() string {
+ return fmt.Sprintf("Node %v", n.ID().TerminalString())
}
// NodeInfo returns information about the node
-func (self *Node) NodeInfo() *p2p.NodeInfo {
+func (n *Node) NodeInfo() *p2p.NodeInfo {
// avoid a panic if the node is not started yet
- if self.Node == nil {
+ if n.Node == nil {
return nil
}
- info := self.Node.NodeInfo()
- info.Name = self.Config.Name
+ info := n.Node.NodeInfo()
+ info.Name = n.Config.Name
return info
}
// MarshalJSON implements the json.Marshaler interface so that the encoded
// JSON includes the NodeInfo
-func (self *Node) MarshalJSON() ([]byte, error) {
+func (n *Node) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Info *p2p.NodeInfo `json:"info,omitempty"`
Config *adapters.NodeConfig `json:"config,omitempty"`
Up bool `json:"up"`
}{
- Info: self.NodeInfo(),
- Config: self.Config,
- Up: self.Up,
+ Info: n.NodeInfo(),
+ Config: n.Config,
+ Up: n.Up,
})
}
// Conn represents a connection between two nodes in the network
type Conn struct {
// One is the node which initiated the connection
- One discover.NodeID `json:"one"`
+ One enode.ID `json:"one"`
// Other is the node which the connection was made to
- Other discover.NodeID `json:"other"`
+ Other enode.ID `json:"other"`
// Up tracks whether or not the connection is active
Up bool `json:"up"`
@@ -580,40 +579,40 @@ type Conn struct {
}
// nodesUp returns whether both nodes are currently up
-func (self *Conn) nodesUp() error {
- if !self.one.Up {
- return fmt.Errorf("one %v is not up", self.One)
+func (c *Conn) nodesUp() error {
+ if !c.one.Up {
+ return fmt.Errorf("one %v is not up", c.One)
}
- if !self.other.Up {
- return fmt.Errorf("other %v is not up", self.Other)
+ if !c.other.Up {
+ return fmt.Errorf("other %v is not up", c.Other)
}
return nil
}
// String returns a log-friendly string
-func (self *Conn) String() string {
- return fmt.Sprintf("Conn %v->%v", self.One.TerminalString(), self.Other.TerminalString())
+func (c *Conn) String() string {
+ return fmt.Sprintf("Conn %v->%v", c.One.TerminalString(), c.Other.TerminalString())
}
// Msg represents a p2p message sent between two nodes in the network
type Msg struct {
- One discover.NodeID `json:"one"`
- Other discover.NodeID `json:"other"`
- Protocol string `json:"protocol"`
- Code uint64 `json:"code"`
- Received bool `json:"received"`
+ One enode.ID `json:"one"`
+ Other enode.ID `json:"other"`
+ Protocol string `json:"protocol"`
+ Code uint64 `json:"code"`
+ Received bool `json:"received"`
}
// String returns a log-friendly string
-func (self *Msg) String() string {
- return fmt.Sprintf("Msg(%d) %v->%v", self.Code, self.One.TerminalString(), self.Other.TerminalString())
+func (m *Msg) String() string {
+ return fmt.Sprintf("Msg(%d) %v->%v", m.Code, m.One.TerminalString(), m.Other.TerminalString())
}
// ConnLabel generates a deterministic string which represents a connection
// between two nodes, used to compare if two connections are between the same
// nodes
-func ConnLabel(source, target discover.NodeID) string {
- var first, second discover.NodeID
+func ConnLabel(source, target enode.ID) string {
+ var first, second enode.ID
if bytes.Compare(source.Bytes(), target.Bytes()) > 0 {
first = target
second = source
@@ -640,14 +639,14 @@ type NodeSnapshot struct {
}
// Snapshot creates a network snapshot
-func (self *Network) Snapshot() (*Snapshot, error) {
- self.lock.Lock()
- defer self.lock.Unlock()
+func (net *Network) Snapshot() (*Snapshot, error) {
+ net.lock.Lock()
+ defer net.lock.Unlock()
snap := &Snapshot{
- Nodes: make([]NodeSnapshot, len(self.Nodes)),
- Conns: make([]Conn, len(self.Conns)),
+ Nodes: make([]NodeSnapshot, len(net.Nodes)),
+ Conns: make([]Conn, len(net.Conns)),
}
- for i, node := range self.Nodes {
+ for i, node := range net.Nodes {
snap.Nodes[i] = NodeSnapshot{Node: *node}
if !node.Up {
continue
@@ -658,33 +657,33 @@ func (self *Network) Snapshot() (*Snapshot, error) {
}
snap.Nodes[i].Snapshots = snapshots
}
- for i, conn := range self.Conns {
+ for i, conn := range net.Conns {
snap.Conns[i] = *conn
}
return snap, nil
}
// Load loads a network snapshot
-func (self *Network) Load(snap *Snapshot) error {
+func (net *Network) Load(snap *Snapshot) error {
for _, n := range snap.Nodes {
- if _, err := self.NewNodeWithConfig(n.Node.Config); err != nil {
+ if _, err := net.NewNodeWithConfig(n.Node.Config); err != nil {
return err
}
if !n.Node.Up {
continue
}
- if err := self.startWithSnapshots(n.Node.Config.ID, n.Snapshots); err != nil {
+ if err := net.startWithSnapshots(n.Node.Config.ID, n.Snapshots); err != nil {
return err
}
}
for _, conn := range snap.Conns {
- if !self.GetNode(conn.One).Up || !self.GetNode(conn.Other).Up {
+ if !net.GetNode(conn.One).Up || !net.GetNode(conn.Other).Up {
//in this case, at least one of the nodes of a connection is not up,
//so it would result in the snapshot `Load` to fail
continue
}
- if err := self.Connect(conn.One, conn.Other); err != nil {
+ if err := net.Connect(conn.One, conn.Other); err != nil {
return err
}
}
@@ -692,7 +691,7 @@ func (self *Network) Load(snap *Snapshot) error {
}
// Subscribe reads control events from a channel and executes them
-func (self *Network) Subscribe(events chan *Event) {
+func (net *Network) Subscribe(events chan *Event) {
for {
select {
case event, ok := <-events:
@@ -700,23 +699,23 @@ func (self *Network) Subscribe(events chan *Event) {
return
}
if event.Control {
- self.executeControlEvent(event)
+ net.executeControlEvent(event)
}
- case <-self.quitc:
+ case <-net.quitc:
return
}
}
}
-func (self *Network) executeControlEvent(event *Event) {
+func (net *Network) executeControlEvent(event *Event) {
log.Trace("execute control event", "type", event.Type, "event", event)
switch event.Type {
case EventTypeNode:
- if err := self.executeNodeEvent(event); err != nil {
+ if err := net.executeNodeEvent(event); err != nil {
log.Error("error executing node event", "event", event, "err", err)
}
case EventTypeConn:
- if err := self.executeConnEvent(event); err != nil {
+ if err := net.executeConnEvent(event); err != nil {
log.Error("error executing conn event", "event", event, "err", err)
}
case EventTypeMsg:
@@ -724,21 +723,21 @@ func (self *Network) executeControlEvent(event *Event) {
}
}
-func (self *Network) executeNodeEvent(e *Event) error {
+func (net *Network) executeNodeEvent(e *Event) error {
if !e.Node.Up {
- return self.Stop(e.Node.ID())
+ return net.Stop(e.Node.ID())
}
- if _, err := self.NewNodeWithConfig(e.Node.Config); err != nil {
+ if _, err := net.NewNodeWithConfig(e.Node.Config); err != nil {
return err
}
- return self.Start(e.Node.ID())
+ return net.Start(e.Node.ID())
}
-func (self *Network) executeConnEvent(e *Event) error {
+func (net *Network) executeConnEvent(e *Event) error {
if e.Conn.Up {
- return self.Connect(e.Conn.One, e.Conn.Other)
+ return net.Connect(e.Conn.One, e.Conn.Other)
} else {
- return self.Disconnect(e.Conn.One, e.Conn.Other)
+ return net.Disconnect(e.Conn.One, e.Conn.Other)
}
}
diff --git a/p2p/simulations/network_test.go b/p2p/simulations/network_test.go
index 79803d59b0eb..e534c6571499 100644
--- a/p2p/simulations/network_test.go
+++ b/p2p/simulations/network_test.go
@@ -22,7 +22,7 @@ import (
"testing"
"time"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
)
@@ -39,9 +39,10 @@ func TestNetworkSimulation(t *testing.T) {
})
defer network.Shutdown()
nodeCount := 20
- ids := make([]discover.NodeID, nodeCount)
+ ids := make([]enode.ID, nodeCount)
for i := 0; i < nodeCount; i++ {
- node, err := network.NewNode()
+ conf := adapters.RandomNodeConfig()
+ node, err := network.NewNodeWithConfig(conf)
if err != nil {
t.Fatalf("error creating node: %s", err)
}
@@ -63,7 +64,7 @@ func TestNetworkSimulation(t *testing.T) {
}
return nil
}
- check := func(ctx context.Context, id discover.NodeID) (bool, error) {
+ check := func(ctx context.Context, id enode.ID) (bool, error) {
// check we haven't run out of time
select {
case <-ctx.Done():
@@ -101,7 +102,7 @@ func TestNetworkSimulation(t *testing.T) {
defer cancel()
// trigger a check every 100ms
- trigger := make(chan discover.NodeID)
+ trigger := make(chan enode.ID)
go triggerChecks(ctx, ids, trigger, 100*time.Millisecond)
result := NewSimulation(network).Run(ctx, &Step{
@@ -139,7 +140,7 @@ func TestNetworkSimulation(t *testing.T) {
}
}
-func triggerChecks(ctx context.Context, ids []discover.NodeID, trigger chan discover.NodeID, interval time.Duration) {
+func triggerChecks(ctx context.Context, ids []enode.ID, trigger chan enode.ID, interval time.Duration) {
tick := time.NewTicker(interval)
defer tick.Stop()
for {
diff --git a/p2p/simulations/pipes/pipes.go b/p2p/simulations/pipes/pipes.go
new file mode 100644
index 000000000000..ec277c0d147c
--- /dev/null
+++ b/p2p/simulations/pipes/pipes.go
@@ -0,0 +1,55 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pipes
+
+import (
+ "net"
+)
+
+// NetPipe wraps net.Pipe in a signature returning an error
+func NetPipe() (net.Conn, net.Conn, error) {
+ p1, p2 := net.Pipe()
+ return p1, p2, nil
+}
+
+// TCPPipe creates an in process full duplex pipe based on a localhost TCP socket
+func TCPPipe() (net.Conn, net.Conn, error) {
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, nil, err
+ }
+ defer l.Close()
+
+ var aconn net.Conn
+ aerr := make(chan error, 1)
+ go func() {
+ var err error
+ aconn, err = l.Accept()
+ aerr <- err
+ }()
+
+ dconn, err := net.Dial("tcp", l.Addr().String())
+ if err != nil {
+ <-aerr
+ return nil, nil, err
+ }
+ if err := <-aerr; err != nil {
+ dconn.Close()
+ return nil, nil, err
+ }
+ return aconn, dconn, nil
+}
diff --git a/p2p/simulations/simulation.go b/p2p/simulations/simulation.go
index 30faf988bca8..533dcb1a3c1a 100644
--- a/p2p/simulations/simulation.go
+++ b/p2p/simulations/simulation.go
@@ -20,7 +20,7 @@ import (
"context"
"time"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
)
// Simulation provides a framework for running actions in a simulated network
@@ -55,7 +55,7 @@ func (s *Simulation) Run(ctx context.Context, step *Step) (result *StepResult) {
}
// wait for all node expectations to either pass, error or timeout
- nodes := make(map[discover.NodeID]struct{}, len(step.Expect.Nodes))
+ nodes := make(map[enode.ID]struct{}, len(step.Expect.Nodes))
for _, id := range step.Expect.Nodes {
nodes[id] = struct{}{}
}
@@ -119,7 +119,7 @@ type Step struct {
// Trigger is a channel which receives node ids and triggers an
// expectation check for that node
- Trigger chan discover.NodeID
+ Trigger chan enode.ID
// Expect is the expectation to wait for when performing this step
Expect *Expectation
@@ -127,15 +127,15 @@ type Step struct {
type Expectation struct {
// Nodes is a list of nodes to check
- Nodes []discover.NodeID
+ Nodes []enode.ID
// Check checks whether a given node meets the expectation
- Check func(context.Context, discover.NodeID) (bool, error)
+ Check func(context.Context, enode.ID) (bool, error)
}
func newStepResult() *StepResult {
return &StepResult{
- Passes: make(map[discover.NodeID]time.Time),
+ Passes: make(map[enode.ID]time.Time),
}
}
@@ -150,7 +150,7 @@ type StepResult struct {
FinishedAt time.Time
// Passes are the timestamps of the successful node expectations
- Passes map[discover.NodeID]time.Time
+ Passes map[enode.ID]time.Time
// NetworkEvents are the network events which occurred during the step
NetworkEvents []*Event
diff --git a/p2p/testing/peerpool.go b/p2p/testing/peerpool.go
index 6f8a5d7a5228..300eaf4f6aa4 100644
--- a/p2p/testing/peerpool.go
+++ b/p2p/testing/peerpool.go
@@ -21,22 +21,22 @@ import (
"sync"
"github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
)
type TestPeer interface {
- ID() discover.NodeID
+ ID() enode.ID
Drop(error)
}
// TestPeerPool is an example peerPool to demonstrate registration of peer connections
type TestPeerPool struct {
lock sync.Mutex
- peers map[discover.NodeID]TestPeer
+ peers map[enode.ID]TestPeer
}
func NewTestPeerPool() *TestPeerPool {
- return &TestPeerPool{peers: make(map[discover.NodeID]TestPeer)}
+ return &TestPeerPool{peers: make(map[enode.ID]TestPeer)}
}
func (self *TestPeerPool) Add(p TestPeer) {
@@ -53,15 +53,15 @@ func (self *TestPeerPool) Remove(p TestPeer) {
delete(self.peers, p.ID())
}
-func (self *TestPeerPool) Has(id discover.NodeID) bool {
- self.lock.Lock()
- defer self.lock.Unlock()
- _, ok := self.peers[id]
+func (p *TestPeerPool) Has(id enode.ID) bool {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ _, ok := p.peers[id]
return ok
}
-func (self *TestPeerPool) Get(id discover.NodeID) TestPeer {
- self.lock.Lock()
- defer self.lock.Unlock()
- return self.peers[id]
+func (p *TestPeerPool) Get(id enode.ID) TestPeer {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+ return p.peers[id]
}
diff --git a/p2p/testing/protocolsession.go b/p2p/testing/protocolsession.go
index 2c0133b111b1..74597f08f70a 100644
--- a/p2p/testing/protocolsession.go
+++ b/p2p/testing/protocolsession.go
@@ -24,7 +24,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
)
@@ -35,7 +35,7 @@ var errTimedOut = errors.New("timed out")
// receive (expect) messages
type ProtocolSession struct {
Server *p2p.Server
- IDs []discover.NodeID
+ Nodes []*enode.Node
adapter *adapters.SimAdapter
events chan *p2p.PeerEvent
}
@@ -56,32 +56,32 @@ type Exchange struct {
// Trigger is part of the exchange, incoming message for the pivot node
// sent by a peer
type Trigger struct {
- Msg interface{} // type of message to be sent
- Code uint64 // code of message is given
- Peer discover.NodeID // the peer to send the message to
- Timeout time.Duration // timeout duration for the sending
+ Msg interface{} // type of message to be sent
+ Code uint64 // code of message is given
+ Peer enode.ID // the peer to send the message to
+ Timeout time.Duration // timeout duration for the sending
}
// Expect is part of an exchange, outgoing message from the pivot node
// received by a peer
type Expect struct {
- Msg interface{} // type of message to expect
- Code uint64 // code of message is now given
- Peer discover.NodeID // the peer that expects the message
- Timeout time.Duration // timeout duration for receiving
+ Msg interface{} // type of message to expect
+ Code uint64 // code of message is now given
+ Peer enode.ID // the peer that expects the message
+ Timeout time.Duration // timeout duration for receiving
}
// Disconnect represents a disconnect event, used and checked by TestDisconnected
type Disconnect struct {
- Peer discover.NodeID // discconnected peer
- Error error // disconnect reason
+ Peer enode.ID // discconnected peer
+ Error error // disconnect reason
}
// trigger sends messages from peers
func (self *ProtocolSession) trigger(trig Trigger) error {
simNode, ok := self.adapter.GetNode(trig.Peer)
if !ok {
- return fmt.Errorf("trigger: peer %v does not exist (1- %v)", trig.Peer, len(self.IDs))
+ return fmt.Errorf("trigger: peer %v does not exist (1- %v)", trig.Peer, len(self.Nodes))
}
mockNode, ok := simNode.Services()[0].(*mockNode)
if !ok {
@@ -109,7 +109,7 @@ func (self *ProtocolSession) trigger(trig Trigger) error {
// expect checks an expectation of a message sent out by the pivot node
func (self *ProtocolSession) expect(exps []Expect) error {
// construct a map of expectations for each node
- peerExpects := make(map[discover.NodeID][]Expect)
+ peerExpects := make(map[enode.ID][]Expect)
for _, exp := range exps {
if exp.Msg == nil {
return errors.New("no message to expect")
@@ -118,11 +118,11 @@ func (self *ProtocolSession) expect(exps []Expect) error {
}
// construct a map of mockNodes for each node
- mockNodes := make(map[discover.NodeID]*mockNode)
+ mockNodes := make(map[enode.ID]*mockNode)
for nodeID := range peerExpects {
simNode, ok := self.adapter.GetNode(nodeID)
if !ok {
- return fmt.Errorf("trigger: peer %v does not exist (1- %v)", nodeID, len(self.IDs))
+ return fmt.Errorf("trigger: peer %v does not exist (1- %v)", nodeID, len(self.Nodes))
}
mockNode, ok := simNode.Services()[0].(*mockNode)
if !ok {
@@ -251,7 +251,7 @@ func (self *ProtocolSession) testExchange(e Exchange) error {
// TestDisconnected tests the disconnections given as arguments
// the disconnect structs describe what disconnect error is expected on which peer
func (self *ProtocolSession) TestDisconnected(disconnects ...*Disconnect) error {
- expects := make(map[discover.NodeID]error)
+ expects := make(map[enode.ID]error)
for _, disconnect := range disconnects {
expects[disconnect.Peer] = disconnect.Error
}
diff --git a/p2p/testing/protocoltester.go b/p2p/testing/protocoltester.go
index 21a57fd09cf9..58de36253632 100644
--- a/p2p/testing/protocoltester.go
+++ b/p2p/testing/protocoltester.go
@@ -34,7 +34,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/log"
"github.com/XinFinOrg/XDPoSChain/node"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations"
"github.com/XinFinOrg/XDPoSChain/p2p/simulations/adapters"
"github.com/XinFinOrg/XDPoSChain/rlp"
@@ -51,7 +51,7 @@ type ProtocolTester struct {
// NewProtocolTester constructs a new ProtocolTester
// it takes as argument the pivot node id, the number of dummy peers and the
// protocol run function called on a peer connection by the p2p server
-func NewProtocolTester(t *testing.T, id discover.NodeID, n int, run func(*p2p.Peer, p2p.MsgReadWriter) error) *ProtocolTester {
+func NewProtocolTester(t *testing.T, id enode.ID, n int, run func(*p2p.Peer, p2p.MsgReadWriter) error) *ProtocolTester {
services := adapters.Services{
"test": func(ctx *adapters.ServiceContext) (node.Service, error) {
return &testNode{run}, nil
@@ -75,17 +75,17 @@ func NewProtocolTester(t *testing.T, id discover.NodeID, n int, run func(*p2p.Pe
node := net.GetNode(id).Node.(*adapters.SimNode)
peers := make([]*adapters.NodeConfig, n)
- peerIDs := make([]discover.NodeID, n)
+ nodes := make([]*enode.Node, n)
for i := 0; i < n; i++ {
peers[i] = adapters.RandomNodeConfig()
peers[i].Services = []string{"mock"}
- peerIDs[i] = peers[i].ID
+ nodes[i] = peers[i].Node()
}
events := make(chan *p2p.PeerEvent, 1000)
node.SubscribeEvents(events)
ps := &ProtocolSession{
Server: node.Server(),
- IDs: peerIDs,
+ Nodes: nodes,
adapter: adapter,
events: events,
}
@@ -107,7 +107,7 @@ func (self *ProtocolTester) Stop() error {
// Connect brings up the remote peer node and connects it using the
// p2p/simulations network connection with the in memory network adapter
-func (self *ProtocolTester) Connect(selfID discover.NodeID, peers ...*adapters.NodeConfig) {
+func (self *ProtocolTester) Connect(selfID enode.ID, peers ...*adapters.NodeConfig) {
for _, peer := range peers {
log.Trace(fmt.Sprintf("start node %v", peer.ID))
if _, err := self.network.NewNodeWithConfig(peer); err != nil {
diff --git a/tests/fuzzers/txfetcher/corpus/0151ee1d0db4c74d3bcdfa4f7396a4c8538748c9-2 b/tests/fuzzers/txfetcher/corpus/0151ee1d0db4c74d3bcdfa4f7396a4c8538748c9-2
new file mode 100644
index 000000000000..2c75e9c7a755
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/0151ee1d0db4c74d3bcdfa4f7396a4c8538748c9-2
@@ -0,0 +1 @@
+¿½
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/020dd7b492a6eb34ff0b7d8ee46189422c37e4a7-6 b/tests/fuzzers/txfetcher/corpus/020dd7b492a6eb34ff0b7d8ee46189422c37e4a7-6
new file mode 100644
index 000000000000..8d3b57789e79
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/020dd7b492a6eb34ff0b7d8ee46189422c37e4a7-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/021d1144e359233c496e22c3250609b11b213e9f-4 b/tests/fuzzers/txfetcher/corpus/021d1144e359233c496e22c3250609b11b213e9f-4
new file mode 100644
index 000000000000..73731899d588
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/021d1144e359233c496e22c3250609b11b213e9f-4
@@ -0,0 +1,12 @@
+ TESTING KEY-----
+MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iAJm2gsvvZhIrCHS3l6afab4pZB
+l2+XsDlrKBxKKtDrGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTtqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tuV6ef6anZzus1s1Y1Clb6HbnWWF/wbZGOpet
+3m4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKZTXtdZrh+k7hx0nTP8Jcb
+uqFk541awmMogY/EfbWd6IOkp+4xqjlFBEDytgbIECQQDvH/6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz84SHEg1Ak/7KCxmD/sfgS5TeuNi8DoUBEmiSJwm7FX
+ftxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su43sjXNueLKH8+ph2UfQuU9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
+y2pGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIáo‡X
+qUn3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JMhNRcVFMO8dDaFo
+f9Oeos0UotgiDktdQHxdNEwLjQlJBz+OtwwA=---E RATTIEY-
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/0d28327b1fb52c1ba02a6eb96675c31633921bb2-2 b/tests/fuzzers/txfetcher/corpus/0d28327b1fb52c1ba02a6eb96675c31633921bb2-2
new file mode 100644
index 000000000000..8cc3039cb837
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/0d28327b1fb52c1ba02a6eb96675c31633921bb2-2
@@ -0,0 +1,15 @@
+¸&^£áo‡È—-----BEGIN RSA TESTING KEY-----
+MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZB
+l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX
+qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
+f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
+-----END RSA TESTING KEY-----Q_
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/0fcd827b57ded58e91f7ba2ac2b7ea4d25ebedca-7 b/tests/fuzzers/txfetcher/corpus/0fcd827b57ded58e91f7ba2ac2b7ea4d25ebedca-7
new file mode 100644
index 000000000000..8ceee16af1ee
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/0fcd827b57ded58e91f7ba2ac2b7ea4d25ebedca-7
@@ -0,0 +1 @@
+ð½apï¿ïï��ï¿ï¿¿½½½¿¿½½ï¿½ï¿½¿½ï¿ï¿½ï¿ïÓÌV½¿½ïïï¿ï¿½#ï¿ï¿½&��
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/109bc9b8fd4fef63493e104c703c79bc4a5e8d34-6 b/tests/fuzzers/txfetcher/corpus/109bc9b8fd4fef63493e104c703c79bc4a5e8d34-6
new file mode 100644
index 000000000000..df9b986af100
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/109bc9b8fd4fef63493e104c703c79bc4a5e8d34-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/163785ab002746452619f31e8dfcb4549e6f8b6e-6 b/tests/fuzzers/txfetcher/corpus/163785ab002746452619f31e8dfcb4549e6f8b6e-6
new file mode 100644
index 000000000000..55467373d461
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/163785ab002746452619f31e8dfcb4549e6f8b6e-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1adfa6b9ddf5766220c8ff7ede2926ca241bb947-3 b/tests/fuzzers/txfetcher/corpus/1adfa6b9ddf5766220c8ff7ede2926ca241bb947-3
new file mode 100644
index 000000000000..4a593aa28dd4
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/1adfa6b9ddf5766220c8ff7ede2926ca241bb947-3
@@ -0,0 +1,11 @@
+TAKBgDuLnQA3gey3VBznB39JUtxjeE6myuDkM/uGlfjb
+S1w4iA5sBzzh8uxEbi4nW91IJm2gsvvZhICHS3l6ab4pZB
+l2DulrKBxKKtD1rGxlG4LncabFn9vLZad2bSysqz/qTAUSTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Z4vMXc7jpTLryzTQIvVdfQbRc6+MUVeLKZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk54MogxEcfbWd6IOkp+4xqFLBEDtgbIECnk+hgN4H
+qzzxxr397vWrjrIgbJpQvBv8QeeuNi8DoUBEmiSJwa7FXY
+FUtxuvL7XvjwjN5B30pEbc6Iuyt7y4MQJBAIt21su4b3sjphy2tuUE9xblTu14qgHZ6+AiZovGKU--FfYAqVXVlxtIX
+qyU3X9ps8ZfjLZ45l6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
+f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
+-----END RSA T
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/1b9a02e9a48fea1d2fc3fb77946ada278e152079-4 b/tests/fuzzers/txfetcher/corpus/1b9a02e9a48fea1d2fc3fb77946ada278e152079-4
new file mode 100644
index 000000000000..4a56f93d3ba9
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/1b9a02e9a48fea1d2fc3fb77946ada278e152079-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1e14c7ea1faef92890988061b5abe96db7190f98-7 b/tests/fuzzers/txfetcher/corpus/1e14c7ea1faef92890988061b5abe96db7190f98-7
new file mode 100644
index 000000000000..d2442fc5a6c5
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/1e14c7ea1faef92890988061b5abe96db7190f98-7
@@ -0,0 +1 @@
+0000000000000000000000000000000000000000000000000000000000000000000000000
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/1e7d05f00e99cbf3ff0ef1cd7ea8dd07ad6dff23-6 b/tests/fuzzers/txfetcher/corpus/1e7d05f00e99cbf3ff0ef1cd7ea8dd07ad6dff23-6
new file mode 100644
index 000000000000..1c342ff53a36
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/1e7d05f00e99cbf3ff0ef1cd7ea8dd07ad6dff23-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1ec95e347fd522e6385b5091aa81aa2485be4891-4 b/tests/fuzzers/txfetcher/corpus/1ec95e347fd522e6385b5091aa81aa2485be4891-4
new file mode 100644
index 000000000000..b0c776bd4d99
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/1ec95e347fd522e6385b5091aa81aa2485be4891-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1fbfa5d214060d2a0905846a589fd6f78d411451-4 b/tests/fuzzers/txfetcher/corpus/1fbfa5d214060d2a0905846a589fd6f78d411451-4
new file mode 100644
index 000000000000..75de835c98de
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/1fbfa5d214060d2a0905846a589fd6f78d411451-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1fd84ee194e791783a7f18f0a6deab8efe05fc04-2 b/tests/fuzzers/txfetcher/corpus/1fd84ee194e791783a7f18f0a6deab8efe05fc04-2
new file mode 100644
index 000000000000..3b6d2560aea8
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/1fd84ee194e791783a7f18f0a6deab8efe05fc04-2
@@ -0,0 +1 @@
+¸&
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/21e76b9fca21d94d97f860c1c82f40697a83471b-8 b/tests/fuzzers/txfetcher/corpus/21e76b9fca21d94d97f860c1c82f40697a83471b-8
new file mode 100644
index 000000000000..1d4620f49f21
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/21e76b9fca21d94d97f860c1c82f40697a83471b-8
@@ -0,0 +1,3 @@
+DtQvfQ+MULKZTXk78c
+/fWkpxlQQ/+hgNzVtx9vWgJsafG7b0dA4AFjwVbFLmQcj2PprIMmPNQrooX
+L
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/220a87fed0c92474923054094eb7aff14289cf5e-4 b/tests/fuzzers/txfetcher/corpus/220a87fed0c92474923054094eb7aff14289cf5e-4
new file mode 100644
index 000000000000..175f74fd5aa8
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/220a87fed0c92474923054094eb7aff14289cf5e-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/23ddcd66aa92fe3d78b7f5b6e7cddb1b55c5f5df-3 b/tests/fuzzers/txfetcher/corpus/23ddcd66aa92fe3d78b7f5b6e7cddb1b55c5f5df-3
new file mode 100644
index 000000000000..95892c7b00c5
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/23ddcd66aa92fe3d78b7f5b6e7cddb1b55c5f5df-3
@@ -0,0 +1,12 @@
+4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZeIrCHS3l6afab4pZB
+l2+XsDlrKBxKKtD1rGxlG4jncdabFn9gvLZad2bSysqz/qTAUSTvqJQIDAQAB
+AoGAGRzwwXvBOAy5tM/uV6e+Zf6aZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Z4vD6Mc7pLryzTQIVdfQbRc6+MUVeLKZaTXtdZru+Jk70PJJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+gN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQ2PprIMPcQroo8vpjSHg1Ev14KxmQeDydfsgeuN8UBESJwm7F
+UtuL7Xvjw50pNEbc6Iuyty4QJA21su4sjXNueLQphy2U
+fQtuUE9txblTu14qN7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6ARYiZPYj1oGUFfYAVVxtI
+qyBnu3X9pfLZOAkEAlT4R5Yl6cJQYZHOde3JEhNRcVFMO8dJFo
+f9Oeos0UUhgiDkQxdEwLjQf7lJJz5OtwC=
+-NRSA TESINGKEY-Q_
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/2441d249faf9a859e38c49f6e305b394280c6ea5-1 b/tests/fuzzers/txfetcher/corpus/2441d249faf9a859e38c49f6e305b394280c6ea5-1
new file mode 100644
index 000000000000..d76207e992a6
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/2441d249faf9a859e38c49f6e305b394280c6ea5-1 differ
diff --git a/tests/fuzzers/txfetcher/corpus/2da1f0635e11283b1927974f418aadd8837ad31e-7 b/tests/fuzzers/txfetcher/corpus/2da1f0635e11283b1927974f418aadd8837ad31e-7
new file mode 100644
index 000000000000..73ae7057014f
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/2da1f0635e11283b1927974f418aadd8837ad31e-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/2e1853fbf8efe40098b1583224fe3b5f335e7037-6 b/tests/fuzzers/txfetcher/corpus/2e1853fbf8efe40098b1583224fe3b5f335e7037-6
new file mode 100644
index 000000000000..692981e61415
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/2e1853fbf8efe40098b1583224fe3b5f335e7037-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/2f25490dc49c103d653843ed47324b310ee7105e-7 b/tests/fuzzers/txfetcher/corpus/2f25490dc49c103d653843ed47324b310ee7105e-7
new file mode 100644
index 000000000000..5cf7da75df2d
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/2f25490dc49c103d653843ed47324b310ee7105e-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/30494b85bb60ad7f099fa49d427007a761620d8f-5 b/tests/fuzzers/txfetcher/corpus/30494b85bb60ad7f099fa49d427007a761620d8f-5
new file mode 100644
index 000000000000..7ff9d397521d
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/30494b85bb60ad7f099fa49d427007a761620d8f-5
@@ -0,0 +1,10 @@
+jXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6Yj013sovGKUFfYAqVXVlxtIX
+qyUBnu3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dDaFeo
+f9Oeos0UotgiDktdQHxdNEwLjQfl
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/316024ca3aaf09c1de5258733ff5fe3d799648d3-4 b/tests/fuzzers/txfetcher/corpus/316024ca3aaf09c1de5258733ff5fe3d799648d3-4
new file mode 100644
index 000000000000..61f7d78f3463
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/316024ca3aaf09c1de5258733ff5fe3d799648d3-4
@@ -0,0 +1,15 @@
+¸^áo‡È—----BEGIN RA TTING KEY-----
+IIXgIBAAKBQDuLnQI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJmgsvvZhrCHSl6afab4pZB
+l2+XsDulrKBxKKtD1rGxlG4LjcdabF9gvLZad2bSysqz/qTAUStTvqJQDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Z4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj043sovGKUFfYAqVXVlxtIX
+qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
+f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
+-----END RSA TESTING KEY-----Q_
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/32a089e2c439a91f4c1b67a13d52429bcded0dd9-7 b/tests/fuzzers/txfetcher/corpus/32a089e2c439a91f4c1b67a13d52429bcded0dd9-7
new file mode 100644
index 000000000000..a986a9d8e753
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/32a089e2c439a91f4c1b67a13d52429bcded0dd9-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/33ec1dc0bfeb93d16edee3c07125fec6ac1aa17d-2 b/tests/fuzzers/txfetcher/corpus/33ec1dc0bfeb93d16edee3c07125fec6ac1aa17d-2
new file mode 100644
index 000000000000..d41771b86ce9
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/33ec1dc0bfeb93d16edee3c07125fec6ac1aa17d-2
@@ -0,0 +1 @@
+ï¿
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/37a0d207700b52caa005ec8aeb344dcb13150ed2-5 b/tests/fuzzers/txfetcher/corpus/37a0d207700b52caa005ec8aeb344dcb13150ed2-5
new file mode 100644
index 000000000000..2f09c6e28f03
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/37a0d207700b52caa005ec8aeb344dcb13150ed2-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/382f59c66d0ddb6747d3177263279789ca15c2db-5 b/tests/fuzzers/txfetcher/corpus/382f59c66d0ddb6747d3177263279789ca15c2db-5
new file mode 100644
index 000000000000..84441ac37462
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/382f59c66d0ddb6747d3177263279789ca15c2db-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/3a010483a4ad8d7215447ce27e0fac3791235c99-4 b/tests/fuzzers/txfetcher/corpus/3a010483a4ad8d7215447ce27e0fac3791235c99-4
new file mode 100644
index 000000000000..28f5d99b986a
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/3a010483a4ad8d7215447ce27e0fac3791235c99-4
@@ -0,0 +1,7 @@
+
+lGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/3a3b717fcfe7ffb000b906e5a76f32248a576bf7-6 b/tests/fuzzers/txfetcher/corpus/3a3b717fcfe7ffb000b906e5a76f32248a576bf7-6
new file mode 100644
index 000000000000..022de3c61d4b
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/3a3b717fcfe7ffb000b906e5a76f32248a576bf7-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/3c37f6d58b8029971935f127f53e6aaeba558445-6 b/tests/fuzzers/txfetcher/corpus/3c37f6d58b8029971935f127f53e6aaeba558445-6
new file mode 100644
index 000000000000..9f3bf093ad1c
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/3c37f6d58b8029971935f127f53e6aaeba558445-6
@@ -0,0 +1,2 @@
+¶Èíw¿½ï¿½Â€ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ �
+���
���ï¿ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/¿½0
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/3c73b63bafa9f535c882ec17189adaf02b58f432-6 b/tests/fuzzers/txfetcher/corpus/3c73b63bafa9f535c882ec17189adaf02b58f432-6
new file mode 100644
index 000000000000..0dfbc46993f8
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/3c73b63bafa9f535c882ec17189adaf02b58f432-6
@@ -0,0 +1 @@
+LvhaJQHOe3EhRcdaFofeoogkjQfJB
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/3d11500c4f66b20c73bbdfb1a7bddd7bbf92b29c-5 b/tests/fuzzers/txfetcher/corpus/3d11500c4f66b20c73bbdfb1a7bddd7bbf92b29c-5
new file mode 100644
index 000000000000..b19fc7f4584a
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/3d11500c4f66b20c73bbdfb1a7bddd7bbf92b29c-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/3d8b5bf36c80d6f65802280039f85421f32b5055-6 b/tests/fuzzers/txfetcher/corpus/3d8b5bf36c80d6f65802280039f85421f32b5055-6
new file mode 100644
index 000000000000..eacd269f317b
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/3d8b5bf36c80d6f65802280039f85421f32b5055-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/3f99c546a3962256176d566c19e3fffb62072078-1 b/tests/fuzzers/txfetcher/corpus/3f99c546a3962256176d566c19e3fffb62072078-1
new file mode 100644
index 000000000000..9e90183d6b65
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/3f99c546a3962256176d566c19e3fffb62072078-1
@@ -0,0 +1 @@
+¸&^£áo‡
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/408ec46539af27acd82b3d01e863597030882458-8 b/tests/fuzzers/txfetcher/corpus/408ec46539af27acd82b3d01e863597030882458-8
new file mode 100644
index 000000000000..65d55437e5c5
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/408ec46539af27acd82b3d01e863597030882458-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/436154e5bb6487673f6642e6d2a582c01b083c08-8 b/tests/fuzzers/txfetcher/corpus/436154e5bb6487673f6642e6d2a582c01b083c08-8
new file mode 100644
index 000000000000..28e519c12589
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/436154e5bb6487673f6642e6d2a582c01b083c08-8
@@ -0,0 +1 @@
+ð½apfffffffffffffffffffffffffffffffebadce6f48a0Ÿ_3bbfd2364
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/45f565cd14b8de1ba2e925047ce776c2682b4b8d-3 b/tests/fuzzers/txfetcher/corpus/45f565cd14b8de1ba2e925047ce776c2682b4b8d-3
new file mode 100644
index 000000000000..9f03a095b9f2
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/45f565cd14b8de1ba2e925047ce776c2682b4b8d-3 differ
diff --git a/tests/fuzzers/txfetcher/corpus/4a0a12f5b033c8c160cc3b5133692ea1e92c6cdf-7 b/tests/fuzzers/txfetcher/corpus/4a0a12f5b033c8c160cc3b5133692ea1e92c6cdf-7
new file mode 100644
index 000000000000..e50b5494c971
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/4a0a12f5b033c8c160cc3b5133692ea1e92c6cdf-7
@@ -0,0 +1,3 @@
+DtQvfQ+MULKZTXk78c
+/fWkpxlyEQQ/+hgNzVtx9vWgJsafG7b0dA4AFjwVbFLmQcj2PprIMmPNQg1Ak/7KCxmDgS5TDEmSJwFX
+txLjbt4xTgeXVlXsjLZ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/550f15ef65230cc4dcfab7fea67de212d9212ff8-8 b/tests/fuzzers/txfetcher/corpus/550f15ef65230cc4dcfab7fea67de212d9212ff8-8
new file mode 100644
index 000000000000..34005f43cbee
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/550f15ef65230cc4dcfab7fea67de212d9212ff8-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/5552213d659fef900a194c52718ffeffdc72d043-3 b/tests/fuzzers/txfetcher/corpus/5552213d659fef900a194c52718ffeffdc72d043-3
new file mode 100644
index 000000000000..7346ff1955e9
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/5552213d659fef900a194c52718ffeffdc72d043-3 differ
diff --git a/tests/fuzzers/txfetcher/corpus/5570ef82893a9b9b9158572d43a7de7537121d2d-1 b/tests/fuzzers/txfetcher/corpus/5570ef82893a9b9b9158572d43a7de7537121d2d-1
new file mode 100644
index 000000000000..feffcebca0c8
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/5570ef82893a9b9b9158572d43a7de7537121d2d-1
@@ -0,0 +1 @@
+ð½ï½ï¿½Ù¯0,1,2,3,4,5,6,7,-3420794409,(2,a)
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/5e10f734f8af4116fbd164d96eec67aa53e6228c-5 b/tests/fuzzers/txfetcher/corpus/5e10f734f8af4116fbd164d96eec67aa53e6228c-5
new file mode 100644
index 000000000000..0eacd0b59a6d
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/5e10f734f8af4116fbd164d96eec67aa53e6228c-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/608200b402488b3989ec8ec5f4190ccb537b8ea4-4 b/tests/fuzzers/txfetcher/corpus/608200b402488b3989ec8ec5f4190ccb537b8ea4-4
new file mode 100644
index 000000000000..d37b018515b8
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/608200b402488b3989ec8ec5f4190ccb537b8ea4-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/61e89c3fbdf9eff74bd250ea73cc2e61f8ca0d97-5 b/tests/fuzzers/txfetcher/corpus/61e89c3fbdf9eff74bd250ea73cc2e61f8ca0d97-5
new file mode 100644
index 000000000000..155744bccc2f
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/61e89c3fbdf9eff74bd250ea73cc2e61f8ca0d97-5
@@ -0,0 +1 @@
+88242871'392752200424491531672177074144720616417147514758635765020556616¿
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/62817a48c78fbf2c12fcdc5ca58e2ca60c43543a-7 b/tests/fuzzers/txfetcher/corpus/62817a48c78fbf2c12fcdc5ca58e2ca60c43543a-7
new file mode 100644
index 000000000000..795608a78957
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/62817a48c78fbf2c12fcdc5ca58e2ca60c43543a-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/6782da8f1a432a77306d60d2ac2470c35b98004f-3 b/tests/fuzzers/txfetcher/corpus/6782da8f1a432a77306d60d2ac2470c35b98004f-3
new file mode 100644
index 000000000000..f44949e6aefc
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/6782da8f1a432a77306d60d2ac2470c35b98004f-3
@@ -0,0 +1 @@
+21888242871'392752200424452601091531672177074144720616417147514758635765020556616¿½
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/68fb55290cb9d6da5b259017c34bcecf96c944aa-5 b/tests/fuzzers/txfetcher/corpus/68fb55290cb9d6da5b259017c34bcecf96c944aa-5
new file mode 100644
index 000000000000..23d905b827e2
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/68fb55290cb9d6da5b259017c34bcecf96c944aa-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/6a5059bc86872526241d21ab5dae9f0afd3b9ae1-3 b/tests/fuzzers/txfetcher/corpus/6a5059bc86872526241d21ab5dae9f0afd3b9ae1-3
new file mode 100644
index 000000000000..b71d5dff5167
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/6a5059bc86872526241d21ab5dae9f0afd3b9ae1-3
@@ -0,0 +1 @@
+¿½
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/717928e0e2d478c680c6409b173552ca98469ba5-6 b/tests/fuzzers/txfetcher/corpus/717928e0e2d478c680c6409b173552ca98469ba5-6
new file mode 100644
index 000000000000..dce51061150d
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/717928e0e2d478c680c6409b173552ca98469ba5-6
@@ -0,0 +1 @@
+LvhaJcdaFofenogkjQfJB
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/71d22f25419543e437f249ca437823b87ac926b1-6 b/tests/fuzzers/txfetcher/corpus/71d22f25419543e437f249ca437823b87ac926b1-6
new file mode 100644
index 000000000000..d07a6c2f3244
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/71d22f25419543e437f249ca437823b87ac926b1-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/7312a0f31ae5d773ed4fd74abc7521eb14754683-8 b/tests/fuzzers/txfetcher/corpus/7312a0f31ae5d773ed4fd74abc7521eb14754683-8
new file mode 100644
index 000000000000..3593ce2e1931
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/7312a0f31ae5d773ed4fd74abc7521eb14754683-8
@@ -0,0 +1,2 @@
+DtQvfQ+MULKZTXk78c
+/fWkpxlyEQQ/+hgNzVtx9vWgJsafG7b0dA4AFjwVbFLmQcj2PprIMmPNQg1AkS5TDEmSJwFVlXsjLZ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/76e413a50dc8861e3756e556f796f1737bec2675-4 b/tests/fuzzers/txfetcher/corpus/76e413a50dc8861e3756e556f796f1737bec2675-4
new file mode 100644
index 000000000000..623fcf9601e5
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/76e413a50dc8861e3756e556f796f1737bec2675-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/78480977d5c07386b06e9b37f5c82f5ed86c2f09-3 b/tests/fuzzers/txfetcher/corpus/78480977d5c07386b06e9b37f5c82f5ed86c2f09-3
new file mode 100644
index 000000000000..e92863a1c703
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/78480977d5c07386b06e9b37f5c82f5ed86c2f09-3
@@ -0,0 +1,14 @@
+ TESTING KEY-----
+MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iAJm2gsvvZhIrCHS3l6afab4pZB
+l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX
+qyUBnu3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dDaFeo
+f9Oeos0UotgiDktdQHxdNEwLjQflJJBzV+5OtwswCA=----EN RATESTI EY-----Q
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/7a113cd3c178934cdb64353af86d51462d7080a4-5 b/tests/fuzzers/txfetcher/corpus/7a113cd3c178934cdb64353af86d51462d7080a4-5
new file mode 100644
index 000000000000..16818128aec7
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/7a113cd3c178934cdb64353af86d51462d7080a4-5
@@ -0,0 +1,10 @@
+l6afab4pZB
+l2+XsDlrKBxKKtDrGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTtqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tuV6ef6anZzus1s1Y1Clb6HbnWWF/wbZGOpet
+3m4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKZTXtdZrh+k7hx0nTP8Jcb
+uqFk541awmMogY/EfbWd6IOkp+4xqjlFBEDytgbIECQQDvH/6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz84SHEg1Ak/7KCxmD/sfgS5TeuNi8DoUBEmiSJwm7FX
+ftxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su43sjXNueLKH8+ph2UfQuU9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
+y2pGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj13sovGKUFfYAqVXVlxtIáo‡X
+qUn3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JMhNRcVFMO8dDaFo
+f9Oeos0UotgiDktdQHxdNEwLjQlJBz+OtwwA=---E ATTIEY-
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/7ea9f71020f3eb783f743f744eba8d8ca4b2582f-3 b/tests/fuzzers/txfetcher/corpus/7ea9f71020f3eb783f743f744eba8d8ca4b2582f-3
new file mode 100644
index 000000000000..08f5bb99f5de
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/7ea9f71020f3eb783f743f744eba8d8ca4b2582f-3
@@ -0,0 +1,9 @@
+
+l2+DulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
diff --git a/tests/fuzzers/txfetcher/corpus/84f8c275f3ffbaf8c32c21782af13de10e7de28b-3 b/tests/fuzzers/txfetcher/corpus/84f8c275f3ffbaf8c32c21782af13de10e7de28b-3
new file mode 100644
index 000000000000..2d6060c40678
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/84f8c275f3ffbaf8c32c21782af13de10e7de28b-3
@@ -0,0 +1 @@
+KKtDlbjVeLKwZatTXtdZrhu+Jk7hx0xxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLQcmPcQETT YQ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/85dfe7ddee0e52aa19115c0ebb9ed28a14e488c6-5 b/tests/fuzzers/txfetcher/corpus/85dfe7ddee0e52aa19115c0ebb9ed28a14e488c6-5
new file mode 100644
index 000000000000..9b6fe78029e7
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/85dfe7ddee0e52aa19115c0ebb9ed28a14e488c6-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/87bba5b1e3da38fed8cb5a9bc5c8baa819e83d05-5 b/tests/fuzzers/txfetcher/corpus/87bba5b1e3da38fed8cb5a9bc5c8baa819e83d05-5
new file mode 100644
index 000000000000..ef091f0be294
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/87bba5b1e3da38fed8cb5a9bc5c8baa819e83d05-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/8a9ebedfbfec584d8b22761e6121dc1ca0248548-4 b/tests/fuzzers/txfetcher/corpus/8a9ebedfbfec584d8b22761e6121dc1ca0248548-4
new file mode 100644
index 000000000000..953be79201dc
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/8a9ebedfbfec584d8b22761e6121dc1ca0248548-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/8ff3bd49f93079e5e1c7f8f2461ba7ee612900c3-5 b/tests/fuzzers/txfetcher/corpus/8ff3bd49f93079e5e1c7f8f2461ba7ee612900c3-5
new file mode 100644
index 000000000000..a86a66593b46
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/8ff3bd49f93079e5e1c7f8f2461ba7ee612900c3-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/9034aaf45143996a2b14465c352ab0c6fa26b221-2 b/tests/fuzzers/txfetcher/corpus/9034aaf45143996a2b14465c352ab0c6fa26b221-2
new file mode 100644
index 000000000000..9c95a6ba6af7
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/9034aaf45143996a2b14465c352ab0c6fa26b221-2
@@ -0,0 +1 @@
+½
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/92cefdc6251d04896349a464b29be03d6bb04c3d-2 b/tests/fuzzers/txfetcher/corpus/92cefdc6251d04896349a464b29be03d6bb04c3d-2
new file mode 100644
index 000000000000..9b78e45707a6
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/92cefdc6251d04896349a464b29be03d6bb04c3d-2
@@ -0,0 +1 @@
+ï39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319¿½
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/9613e580ccb69df7c9074f0e2f6886ac6b34ca55-5 b/tests/fuzzers/txfetcher/corpus/9613e580ccb69df7c9074f0e2f6886ac6b34ca55-5
new file mode 100644
index 000000000000..681adc6a9cd9
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/9613e580ccb69df7c9074f0e2f6886ac6b34ca55-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/98afc8970a680fdc4aee0b5d48784f650c566b75-6 b/tests/fuzzers/txfetcher/corpus/98afc8970a680fdc4aee0b5d48784f650c566b75-6
new file mode 100644
index 000000000000..c82defc2437f
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/98afc8970a680fdc4aee0b5d48784f650c566b75-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/9dfc92f4ca2ece0167096fca6751ff314765f08b-8 b/tests/fuzzers/txfetcher/corpus/9dfc92f4ca2ece0167096fca6751ff314765f08b-8
new file mode 100644
index 000000000000..be75c25fec2b
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/9dfc92f4ca2ece0167096fca6751ff314765f08b-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/9ebcbbfdaf0e98c87652e57226a4d8a35170c67d-4 b/tests/fuzzers/txfetcher/corpus/9ebcbbfdaf0e98c87652e57226a4d8a35170c67d-4
new file mode 100644
index 000000000000..ab036767db9e
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/9ebcbbfdaf0e98c87652e57226a4d8a35170c67d-4
@@ -0,0 +1,5 @@
+l2+DulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpwVbFLmQet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjr
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/9ff520eb8b8319a5fdafbe4d1cbb02a75058d93b-7 b/tests/fuzzers/txfetcher/corpus/9ff520eb8b8319a5fdafbe4d1cbb02a75058d93b-7
new file mode 100644
index 000000000000..d91a13138cb0
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/9ff520eb8b8319a5fdafbe4d1cbb02a75058d93b-7
@@ -0,0 +1,2 @@
+&Èíw¿½ï¿½Â€ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ï¿½ �
+���
���ï¿ï¿½ï¿½ï¿½ÿÿÿ����������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/¿½0
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/a0b57a12e25ac5adcedb2a5c45915f0f62aee869-4 b/tests/fuzzers/txfetcher/corpus/a0b57a12e25ac5adcedb2a5c45915f0f62aee869-4
new file mode 100644
index 000000000000..78243163a855
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a0b57a12e25ac5adcedb2a5c45915f0f62aee869-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/a2684adccf16e036b051c12f283734fa803746e8-6 b/tests/fuzzers/txfetcher/corpus/a2684adccf16e036b051c12f283734fa803746e8-6
new file mode 100644
index 000000000000..4e12af2da8e9
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a2684adccf16e036b051c12f283734fa803746e8-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/a37305974cf477ecfe65fa92f37b1f51dea25910-4 b/tests/fuzzers/txfetcher/corpus/a37305974cf477ecfe65fa92f37b1f51dea25910-4
new file mode 100644
index 000000000000..75cb14e8d98e
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a37305974cf477ecfe65fa92f37b1f51dea25910-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/a7eb43926bd14b1f62a66a33107776e487434d32-7 b/tests/fuzzers/txfetcher/corpus/a7eb43926bd14b1f62a66a33107776e487434d32-7
new file mode 100644
index 000000000000..88e6127355dd
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a7eb43926bd14b1f62a66a33107776e487434d32-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/a8f7c254eb64a40fd2a77b79979c7bbdac6a760c-4 b/tests/fuzzers/txfetcher/corpus/a8f7c254eb64a40fd2a77b79979c7bbdac6a760c-4
new file mode 100644
index 000000000000..da61777c22b5
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/a8f7c254eb64a40fd2a77b79979c7bbdac6a760c-4
@@ -0,0 +1,2 @@
+lxtIX
+qyU3X9ps8ZfjLZ45l6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFe
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/a9a8f287d6af24e47d8db468e8f967aa44fb5a1f-7 b/tests/fuzzers/txfetcher/corpus/a9a8f287d6af24e47d8db468e8f967aa44fb5a1f-7
new file mode 100644
index 000000000000..7811921b79e9
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a9a8f287d6af24e47d8db468e8f967aa44fb5a1f-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/aa7444d8e326158046862590a0db993c07aef372-7 b/tests/fuzzers/txfetcher/corpus/aa7444d8e326158046862590a0db993c07aef372-7
new file mode 100644
index 000000000000..870e12ffbcf4
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/aa7444d8e326158046862590a0db993c07aef372-7
@@ -0,0 +1 @@
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000@0000000000000
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/ae4593626d8796e079a358c2395a4f6c9ddd6a44-6 b/tests/fuzzers/txfetcher/corpus/ae4593626d8796e079a358c2395a4f6c9ddd6a44-6
new file mode 100644
index 000000000000..845deedd0e23
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/ae4593626d8796e079a358c2395a4f6c9ddd6a44-6
@@ -0,0 +1,8 @@
+9pmM gY/xEcfbWd6IOkp+4xqjlFLBEDytgbparsing /E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprLANGcQrooz8vp
+jy4SHEg1AkEA/v13/@M47K9vCxb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCz� jA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6Yj013sovGKUFfYAqVXVlxtIX
+qyUBnu3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYFZHOde3JEMhNRcVFMO8dDaFeo
+f9Oeos0Uot
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/b2942d4413a66939cda7db93020dee79eb17788c-9 b/tests/fuzzers/txfetcher/corpus/b2942d4413a66939cda7db93020dee79eb17788c-9
new file mode 100644
index 000000000000..10aca6512180
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/b2942d4413a66939cda7db93020dee79eb17788c-9 differ
diff --git a/tests/fuzzers/txfetcher/corpus/b4614117cdfd147d38f4e8a4d85f5a2bb99a6a4f-5 b/tests/fuzzers/txfetcher/corpus/b4614117cdfd147d38f4e8a4d85f5a2bb99a6a4f-5
new file mode 100644
index 000000000000..af69eef9b086
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/b4614117cdfd147d38f4e8a4d85f5a2bb99a6a4f-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/b631ef3291fa405cd6517d11f4d1b9b6d02912d4-2 b/tests/fuzzers/txfetcher/corpus/b631ef3291fa405cd6517d11f4d1b9b6d02912d4-2
new file mode 100644
index 000000000000..a6b8858b40d5
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/b631ef3291fa405cd6517d11f4d1b9b6d02912d4-2
@@ -0,0 +1 @@
+&áo‡
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/b7a91e338cc11f50ebdb2c414610efc4d5be3137-4 b/tests/fuzzers/txfetcher/corpus/b7a91e338cc11f50ebdb2c414610efc4d5be3137-4
new file mode 100644
index 000000000000..9709a1fcb82b
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/b7a91e338cc11f50ebdb2c414610efc4d5be3137-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/b858cb282617fb0956d960215c8e84d1ccf909c6-2 b/tests/fuzzers/txfetcher/corpus/b858cb282617fb0956d960215c8e84d1ccf909c6-2
new file mode 100644
index 000000000000..0519ecba6ea9
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/b858cb282617fb0956d960215c8e84d1ccf909c6-2
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/bc9d570aacf3acd39600feda8e72a293a4667da4-1 b/tests/fuzzers/txfetcher/corpus/bc9d570aacf3acd39600feda8e72a293a4667da4-1
new file mode 100644
index 000000000000..aab27c590956
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/bc9d570aacf3acd39600feda8e72a293a4667da4-1
@@ -0,0 +1 @@
+�
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/be7eed35b245b5d5d2adcdb4c67f07794eb86b24-3 b/tests/fuzzers/txfetcher/corpus/be7eed35b245b5d5d2adcdb4c67f07794eb86b24-3
new file mode 100644
index 000000000000..47c996d33ff3
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/be7eed35b245b5d5d2adcdb4c67f07794eb86b24-3
@@ -0,0 +1,2 @@
+4LZmbRc6+MUVeLKXtdZr+Jk7hhgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLQcmPcQ SN_
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/c010b0cd70c7edbc5bd332fc9e2e91c6a1cbcdc4-5 b/tests/fuzzers/txfetcher/corpus/c010b0cd70c7edbc5bd332fc9e2e91c6a1cbcdc4-5
new file mode 100644
index 000000000000..474f14d89bcb
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/c010b0cd70c7edbc5bd332fc9e2e91c6a1cbcdc4-5
@@ -0,0 +1,4 @@
+
+Xc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nhgN4H
+qzzVtxx7vWrjrIgPbJpvfb
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/c1690698607eb0f4c4244e9f9629968be4beb6bc-8 b/tests/fuzzers/txfetcher/corpus/c1690698607eb0f4c4244e9f9629968be4beb6bc-8
new file mode 100644
index 000000000000..d184a2d8a46f
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/c1690698607eb0f4c4244e9f9629968be4beb6bc-8
@@ -0,0 +1,2 @@
+&Ƚ�� �
+���
���ï¿ï¿½ï¿½ï¿½ÿÿÿ����������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/¿½0
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/c1f435e4f53a9a17578d9e8c4789860f962a1379-6 b/tests/fuzzers/txfetcher/corpus/c1f435e4f53a9a17578d9e8c4789860f962a1379-6
new file mode 100644
index 000000000000..f2a68ec3de94
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/c1f435e4f53a9a17578d9e8c4789860f962a1379-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/c298a75334c3acf04bd129a8867447a25c8bacf8-7 b/tests/fuzzers/txfetcher/corpus/c298a75334c3acf04bd129a8867447a25c8bacf8-7
new file mode 100644
index 000000000000..0b437f22608a
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/c298a75334c3acf04bd129a8867447a25c8bacf8-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/c42287c7d225e530e822f23bbbba6819a9e48f38-6 b/tests/fuzzers/txfetcher/corpus/c42287c7d225e530e822f23bbbba6819a9e48f38-6
new file mode 100644
index 000000000000..91818f563488
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/c42287c7d225e530e822f23bbbba6819a9e48f38-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/c4cdbb891f3ee76476b7375d5ed51691fed95421-10 b/tests/fuzzers/txfetcher/corpus/c4cdbb891f3ee76476b7375d5ed51691fed95421-10
new file mode 100644
index 000000000000..e365cc52623e
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/c4cdbb891f3ee76476b7375d5ed51691fed95421-10 differ
diff --git a/tests/fuzzers/txfetcher/corpus/cc9572d72dfa2937074b1766dcbcff9cc58d1137-4 b/tests/fuzzers/txfetcher/corpus/cc9572d72dfa2937074b1766dcbcff9cc58d1137-4
new file mode 100644
index 000000000000..b72a78f5291e
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/cc9572d72dfa2937074b1766dcbcff9cc58d1137-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/cd1d73b4e101bc7b979e3f6f135cb12d4594d348-5 b/tests/fuzzers/txfetcher/corpus/cd1d73b4e101bc7b979e3f6f135cb12d4594d348-5
new file mode 100644
index 000000000000..3079de555758
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/cd1d73b4e101bc7b979e3f6f135cb12d4594d348-5
@@ -0,0 +1 @@
+822452601031714757585602556
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/d0acdc8fca32bbd58d368eeac3bd9eaa46f59d27-5 b/tests/fuzzers/txfetcher/corpus/d0acdc8fca32bbd58d368eeac3bd9eaa46f59d27-5
new file mode 100644
index 000000000000..794d5d86c6a1
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/d0acdc8fca32bbd58d368eeac3bd9eaa46f59d27-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/d0e43b715fd00953f7bdd6dfad95811985e81396-4 b/tests/fuzzers/txfetcher/corpus/d0e43b715fd00953f7bdd6dfad95811985e81396-4
new file mode 100644
index 000000000000..742db5fb3ba9
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/d0e43b715fd00953f7bdd6dfad95811985e81396-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/d925fbd22c8bc0de34d6a9d1258ce3d2928d0927-8 b/tests/fuzzers/txfetcher/corpus/d925fbd22c8bc0de34d6a9d1258ce3d2928d0927-8
new file mode 100644
index 000000000000..5920dfe60128
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/d925fbd22c8bc0de34d6a9d1258ce3d2928d0927-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/d9ba78cb7425724185d5fa300cd5c03aec2683bb-7 b/tests/fuzzers/txfetcher/corpus/d9ba78cb7425724185d5fa300cd5c03aec2683bb-7
new file mode 100644
index 000000000000..c4df1cf210eb
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/d9ba78cb7425724185d5fa300cd5c03aec2683bb-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709 b/tests/fuzzers/txfetcher/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/tests/fuzzers/txfetcher/corpus/dcdb7758b87648b5d766b1b341a65834420cf621-7 b/tests/fuzzers/txfetcher/corpus/dcdb7758b87648b5d766b1b341a65834420cf621-7
new file mode 100644
index 000000000000..78cf11ae2170
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/dcdb7758b87648b5d766b1b341a65834420cf621-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/dd441bd24581332c9ce19e008260a69287aa3cbc-6 b/tests/fuzzers/txfetcher/corpus/dd441bd24581332c9ce19e008260a69287aa3cbc-6
new file mode 100644
index 000000000000..4e0c14006eee
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/dd441bd24581332c9ce19e008260a69287aa3cbc-6
@@ -0,0 +1,2 @@
+Dtf1nWk78c
+/fWklyEQQ/+hgNzVtxxmDgS5TDETgeXVlXsjLZ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/def879fe0fd637a745c00c8f1da340518db8688c-2 b/tests/fuzzers/txfetcher/corpus/def879fe0fd637a745c00c8f1da340518db8688c-2
new file mode 100644
index 000000000000..555752f0ed16
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/def879fe0fd637a745c00c8f1da340518db8688c-2
@@ -0,0 +1 @@
+ù ´
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/df6c30a9781b93bd6d2f5e97e5592d5945210003-7 b/tests/fuzzers/txfetcher/corpus/df6c30a9781b93bd6d2f5e97e5592d5945210003-7
new file mode 100644
index 000000000000..2a7adb093bcf
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/df6c30a9781b93bd6d2f5e97e5592d5945210003-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/dfc1c3a2e3ccdaf6f88c515fd00e8ad08421e431-6 b/tests/fuzzers/txfetcher/corpus/dfc1c3a2e3ccdaf6f88c515fd00e8ad08421e431-6
new file mode 100644
index 000000000000..59f3442c053c
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/dfc1c3a2e3ccdaf6f88c515fd00e8ad08421e431-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/e1dcc4e7ead6dfd1139ece7bf57d776cb9dac72d-7 b/tests/fuzzers/txfetcher/corpus/e1dcc4e7ead6dfd1139ece7bf57d776cb9dac72d-7
new file mode 100644
index 000000000000..5ba489f99ddd
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/e1dcc4e7ead6dfd1139ece7bf57d776cb9dac72d-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/e39c2de2c8937d2cbd4339b13d6a0ce94d94f8d2-8 b/tests/fuzzers/txfetcher/corpus/e39c2de2c8937d2cbd4339b13d6a0ce94d94f8d2-8
new file mode 100644
index 000000000000..0e9508938e4f
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/e39c2de2c8937d2cbd4339b13d6a0ce94d94f8d2-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/e72f76b9579c792e545d02fe405d9186f0d6c39b-6 b/tests/fuzzers/txfetcher/corpus/e72f76b9579c792e545d02fe405d9186f0d6c39b-6
new file mode 100644
index 000000000000..c4d34b1732a2
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/e72f76b9579c792e545d02fe405d9186f0d6c39b-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/eb70814d6355a4498b8f301ba8dbc34f895a9947-5 b/tests/fuzzers/txfetcher/corpus/eb70814d6355a4498b8f301ba8dbc34f895a9947-5
new file mode 100644
index 000000000000..bd57a22fb1e1
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/eb70814d6355a4498b8f301ba8dbc34f895a9947-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/ebdc17efe343e412634dca57cecd5a0e1ce1c1c7-5 b/tests/fuzzers/txfetcher/corpus/ebdc17efe343e412634dca57cecd5a0e1ce1c1c7-5
new file mode 100644
index 000000000000..aaa3f695ab36
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/ebdc17efe343e412634dca57cecd5a0e1ce1c1c7-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/ec0a25eba8966b8f628d821b3cfbdf2dfd4bbb4c-3 b/tests/fuzzers/txfetcher/corpus/ec0a25eba8966b8f628d821b3cfbdf2dfd4bbb4c-3
new file mode 100644
index 000000000000..65cf0df80139
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/ec0a25eba8966b8f628d821b3cfbdf2dfd4bbb4c-3
@@ -0,0 +1,13 @@
+¸&^£áo‡È—-----BEGIN RSA TESTING KEY-----
+MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iA5sBBZzHi3z0h1YV8PuxEbi4nW91IJm2gsvvZhIrHS3l6afab4pZB
+l2+XsDulrKBxKKtD1rGxlG4Ljncdabn9vLZad2bSysqz/qTAUStvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1K1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzQIvVdfQbRc6+MUVeLKwZatTXtZru+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbW6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hg4
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLcj2pIMPQroozvjg1AkEA/v13/5M47K9vCxmb8QeD/aydfsgS5TeuNi8DoUBEmiSJwmaXY
+fFUtxv7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4bjeLKH8Q+ph2
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+AYiZ6PYj013sovGKFYqVXVlxtIX
+qyUBnu3X9s8ZfjZO7BAkl4R5Yl6cGhaJQYZHOe3JEMhVFaFf9Oes0UUothgiDktdQxdNLj7+5CWA==
+-----END RSASQ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/eebe3b76aeba6deed965d17d2b024f7eae1a43f1-5 b/tests/fuzzers/txfetcher/corpus/eebe3b76aeba6deed965d17d2b024f7eae1a43f1-5
new file mode 100644
index 000000000000..20d62e15b32d
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/eebe3b76aeba6deed965d17d2b024f7eae1a43f1-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/ef8741a9faf030794d98ff113f556c68a24719a5-6 b/tests/fuzzers/txfetcher/corpus/ef8741a9faf030794d98ff113f556c68a24719a5-6
new file mode 100644
index 000000000000..09fcd86d77c2
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/ef8741a9faf030794d98ff113f556c68a24719a5-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/efb7410d02418befeba25a43d676cc6124129125-4 b/tests/fuzzers/txfetcher/corpus/efb7410d02418befeba25a43d676cc6124129125-4
new file mode 100644
index 000000000000..2191a7324a16
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/efb7410d02418befeba25a43d676cc6124129125-4
@@ -0,0 +1 @@
+88242871'392752200424452601091531672177074144720616417147514758635765020556616¿
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/f6f97d781a5a749903790e07db8619866cb7c3a1-6 b/tests/fuzzers/txfetcher/corpus/f6f97d781a5a749903790e07db8619866cb7c3a1-6
new file mode 100644
index 000000000000..219a8d3682f5
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/f6f97d781a5a749903790e07db8619866cb7c3a1-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/f7a3cd00fa0e57742e7dbbb8283dcaea067eaf7b-5 b/tests/fuzzers/txfetcher/corpus/f7a3cd00fa0e57742e7dbbb8283dcaea067eaf7b-5
new file mode 100644
index 000000000000..f01ccd89efa4
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/f7a3cd00fa0e57742e7dbbb8283dcaea067eaf7b-5
@@ -0,0 +1,2 @@
+Xyt0Xl/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYi
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/f94d60a6c556ce485ab60088291760b8be25776c-6 b/tests/fuzzers/txfetcher/corpus/f94d60a6c556ce485ab60088291760b8be25776c-6
new file mode 100644
index 000000000000..58d841ff036d
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/f94d60a6c556ce485ab60088291760b8be25776c-6
@@ -0,0 +1,2 @@
+HZB4cQZde3JMNRcVFMO8dDFo
+f9OeosiDdQQl
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/f9e627b2cb82ffa1ea5e0c6d7f2802f3000b18a8-6 b/tests/fuzzers/txfetcher/corpus/f9e627b2cb82ffa1ea5e0c6d7f2802f3000b18a8-6
new file mode 100644
index 000000000000..b5dfecc1e9d1
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/f9e627b2cb82ffa1ea5e0c6d7f2802f3000b18a8-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/fb3775aa24e5667e658920c05ba4b7b19ff256fb-5 b/tests/fuzzers/txfetcher/corpus/fb3775aa24e5667e658920c05ba4b7b19ff256fb-5
new file mode 100644
index 000000000000..6f4927d822d4
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/fb3775aa24e5667e658920c05ba4b7b19ff256fb-5
@@ -0,0 +1 @@
+HZB4c2cPclieoverpGsumgUtWj3NMYPZ/F8tá5YlNR8dDFoiDdQQl
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/fd6386548e119a50db96b2fa406e54924c45a2d5-6 b/tests/fuzzers/txfetcher/corpus/fd6386548e119a50db96b2fa406e54924c45a2d5-6
new file mode 100644
index 000000000000..6fff60edd4f0
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/fd6386548e119a50db96b2fa406e54924c45a2d5-6 differ
diff --git a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
new file mode 100644
index 000000000000..c7037cc97f1b
--- /dev/null
+++ b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
@@ -0,0 +1,199 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package txfetcher
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "time"
+
+ "github.com/XinFinOrg/XDPoSChain/common"
+ "github.com/XinFinOrg/XDPoSChain/common/mclock"
+ "github.com/XinFinOrg/XDPoSChain/core/types"
+ "github.com/XinFinOrg/XDPoSChain/eth/fetcher"
+)
+
+var (
+ peers []string
+ txs []*types.Transaction
+)
+
+func init() {
+ // Random is nice, but we need it deterministic
+ rand := rand.New(rand.NewSource(0x3a29))
+
+ peers = make([]string, 10)
+ for i := 0; i < len(peers); i++ {
+ peers[i] = fmt.Sprintf("Peer #%d", i)
+ }
+ txs = make([]*types.Transaction, 65536) // We need to bump enough to hit all the limits
+ for i := 0; i < len(txs); i++ {
+ txs[i] = types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil)
+ }
+}
+
+func Fuzz(input []byte) int {
+ // Don't generate insanely large test cases, not much value in them
+ if len(input) > 16*1024 {
+ return -1
+ }
+ r := bytes.NewReader(input)
+
+ // Reduce the problem space for certain fuzz runs. Small tx space is better
+ // for testing clashes and in general the fetcher, but we should still run
+ // some tests with large spaces to hit potential issues on limits.
+ limit, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ switch limit % 4 {
+ case 0:
+ txs = txs[:4]
+ case 1:
+ txs = txs[:256]
+ case 2:
+ txs = txs[:4096]
+ case 3:
+ // Full run
+ }
+ // Create a fetcher and hook into it's simulated fields
+ clock := new(mclock.Simulated)
+ rand := rand.New(rand.NewSource(0x3a29)) // Same used in package tests!!!
+
+ f := fetcher.NewTxFetcherForTests(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ clock, rand,
+ )
+ f.Start()
+ defer f.Stop()
+
+ // Try to throw random junk at the fetcher
+ for {
+ // Read the next command and abort if we're done
+ cmd, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ switch cmd % 4 {
+ case 0:
+ // Notify a new set of transactions:
+ // Byte 1: Peer index to announce with
+ // Byte 2: Number of hashes to announce
+ // Byte 3-4, 5-6, etc: Transaction indices (2 byte) to announce
+ peerIdx, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ peer := peers[int(peerIdx)%len(peers)]
+
+ announceCnt, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ announce := int(announceCnt) % (2 * len(txs)) // No point in generating too many duplicates
+
+ var (
+ announceIdxs = make([]int, announce)
+ announces = make([]common.Hash, announce)
+ )
+ for i := 0; i < len(announces); i++ {
+ annBuf := make([]byte, 2)
+ if n, err := r.Read(annBuf); err != nil || n != 2 {
+ return 0
+ }
+ announceIdxs[i] = (int(annBuf[0])*256 + int(annBuf[1])) % len(txs)
+ announces[i] = txs[announceIdxs[i]].Hash()
+ }
+ fmt.Println("Notify", peer, announceIdxs)
+ if err := f.Notify(peer, announces); err != nil {
+ panic(err)
+ }
+
+ case 1:
+ // Deliver a new set of transactions:
+ // Byte 1: Peer index to announce with
+ // Byte 2: Number of hashes to announce
+ // Byte 3-4, 5-6, etc: Transaction indices (2 byte) to announce
+ peerIdx, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ peer := peers[int(peerIdx)%len(peers)]
+
+ deliverCnt, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ deliver := int(deliverCnt) % (2 * len(txs)) // No point in generating too many duplicates
+
+ var (
+ deliverIdxs = make([]int, deliver)
+ deliveries = make([]*types.Transaction, deliver)
+ )
+ for i := 0; i < len(deliveries); i++ {
+ deliverBuf := make([]byte, 2)
+ if n, err := r.Read(deliverBuf); err != nil || n != 2 {
+ return 0
+ }
+ deliverIdxs[i] = (int(deliverBuf[0])*256 + int(deliverBuf[1])) % len(txs)
+ deliveries[i] = txs[deliverIdxs[i]]
+ }
+ directFlag, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ direct := (directFlag % 2) == 0
+
+ fmt.Println("Enqueue", peer, deliverIdxs, direct)
+ if err := f.Enqueue(peer, deliveries, direct); err != nil {
+ panic(err)
+ }
+
+ case 2:
+ // Drop a peer:
+ // Byte 1: Peer index to drop
+ peerIdx, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ peer := peers[int(peerIdx)%len(peers)]
+
+ fmt.Println("Drop", peer)
+ if err := f.Drop(peer); err != nil {
+ panic(err)
+ }
+
+ case 3:
+ // Move the simulated clock forward
+ // Byte 1: 100ms increment to move forward
+ tickCnt, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ tick := time.Duration(tickCnt) * 100 * time.Millisecond
+
+ fmt.Println("Sleep", tick)
+ clock.Run(tick)
+ }
+ }
+}
diff --git a/whisper/whisperv5/api.go b/whisper/whisperv5/api.go
index b28ea5075d5d..d375e4ce38da 100644
--- a/whisper/whisperv5/api.go
+++ b/whisper/whisperv5/api.go
@@ -28,7 +28,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common/hexutil"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/rpc"
)
@@ -100,12 +100,12 @@ func (api *PublicWhisperAPI) SetMinPoW(ctx context.Context, pow float64) (bool,
// MarkTrustedPeer marks a peer trusted. , which will allow it to send historic (expired) messages.
// Note: This function is not adding new nodes, the node needs to exists as a peer.
-func (api *PublicWhisperAPI) MarkTrustedPeer(ctx context.Context, enode string) (bool, error) {
- n, err := discover.ParseNode(enode)
+func (api *PublicWhisperAPI) MarkTrustedPeer(ctx context.Context, url string) (bool, error) {
+ n, err := enode.ParseV4(url)
if err != nil {
return false, err
}
- return true, api.w.AllowP2PMessagesFromPeer(n.ID[:])
+ return true, api.w.AllowP2PMessagesFromPeer(n.ID().Bytes())
}
// NewKeyPair generates a new public and private key pair for message decryption and encryption.
@@ -274,11 +274,11 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
// send to specific node (skip PoW check)
if len(req.TargetPeer) > 0 {
- n, err := discover.ParseNode(req.TargetPeer)
+ n, err := enode.ParseV4(req.TargetPeer)
if err != nil {
return false, fmt.Errorf("failed to parse target peer: %s", err)
}
- return true, api.w.SendP2PMessage(n.ID[:], env)
+ return true, api.w.SendP2PMessage(n.ID().Bytes(), env)
}
// ensure that the message PoW meets the node's minimum accepted PoW
diff --git a/whisper/whisperv5/peer_test.go b/whisper/whisperv5/peer_test.go
index 0ed18fc0186c..4cc80c49172d 100644
--- a/whisper/whisperv5/peer_test.go
+++ b/whisper/whisperv5/peer_test.go
@@ -19,7 +19,6 @@ package whisperv5
import (
"bytes"
"crypto/ecdsa"
- "fmt"
"net"
"sync"
"testing"
@@ -28,7 +27,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/nat"
)
@@ -109,8 +108,6 @@ func TestSimulation(t *testing.T) {
func initialize(t *testing.T) {
var err error
- ip := net.IPv4(127, 0, 0, 1)
- port0 := 30303
for i := 0; i < NumNodes; i++ {
var node TestNode
@@ -135,29 +132,16 @@ func initialize(t *testing.T) {
if err != nil {
t.Fatalf("failed convert the key: %s.", keys[i])
}
- port := port0 + i
- addr := fmt.Sprintf(":%d", port) // e.g. ":30303"
name := common.MakeName("whisper-go", "2.0")
- var peers []*discover.Node
- if i > 0 {
- peerNodeId := nodes[i-1].id
- peerPort := uint16(port - 1)
- peerNode := discover.PubkeyID(&peerNodeId.PublicKey)
- peer := discover.NewNode(peerNode, ip, peerPort, peerPort)
- peers = append(peers, peer)
- }
node.server = &p2p.Server{
Config: p2p.Config{
- PrivateKey: node.id,
- MaxPeers: NumNodes/2 + 1,
- Name: name,
- Protocols: node.shh.Protocols(),
- ListenAddr: addr,
- NAT: nat.Any(),
- BootstrapNodes: peers,
- StaticNodes: peers,
- TrustedNodes: peers,
+ PrivateKey: node.id,
+ MaxPeers: NumNodes/2 + 1,
+ Name: name,
+ Protocols: node.shh.Protocols(),
+ ListenAddr: "127.0.0.1:0",
+ NAT: nat.Any(),
},
}
@@ -166,6 +150,14 @@ func initialize(t *testing.T) {
t.Fatalf("failed to start server %d.", i)
}
+ for j := 0; j < i; j++ {
+ peerNodeId := nodes[j].id
+ address, _ := net.ResolveTCPAddr("tcp", nodes[j].server.ListenAddr)
+ peerPort := uint16(address.Port)
+ peer := enode.NewV4(&peerNodeId.PublicKey, address.IP, int(peerPort), int(peerPort))
+ node.server.AddPeer(peer)
+ }
+
nodes[i] = &node
}
}
diff --git a/whisper/whisperv6/api.go b/whisper/whisperv6/api.go
index 8711d6b19538..9521ef02a1e3 100644
--- a/whisper/whisperv6/api.go
+++ b/whisper/whisperv6/api.go
@@ -28,7 +28,7 @@ import (
"github.com/XinFinOrg/XDPoSChain/common/hexutil"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/log"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/rpc"
)
@@ -106,12 +106,12 @@ func (api *PublicWhisperAPI) SetBloomFilter(ctx context.Context, bloom hexutil.B
// MarkTrustedPeer marks a peer trusted, which will allow it to send historic (expired) messages.
// Note: This function is not adding new nodes, the node needs to exists as a peer.
-func (api *PublicWhisperAPI) MarkTrustedPeer(ctx context.Context, enode string) (bool, error) {
- n, err := discover.ParseNode(enode)
+func (api *PublicWhisperAPI) MarkTrustedPeer(ctx context.Context, url string) (bool, error) {
+ n, err := enode.ParseV4(url)
if err != nil {
return false, err
}
- return true, api.w.AllowP2PMessagesFromPeer(n.ID[:])
+ return true, api.w.AllowP2PMessagesFromPeer(n.ID().Bytes())
}
// NewKeyPair generates a new public and private key pair for message decryption and encryption.
@@ -231,8 +231,9 @@ type newMessageOverride struct {
Padding hexutil.Bytes
}
-// Post a message on the Whisper network.
-func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, error) {
+// Post posts a message on the Whisper network.
+// returns the hash of the message in case of success.
+func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (hexutil.Bytes, error) {
var (
symKeyGiven = len(req.SymKeyID) > 0
pubKeyGiven = len(req.PublicKey) > 0
@@ -241,7 +242,7 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
// user must specify either a symmetric or an asymmetric key
if (symKeyGiven && pubKeyGiven) || (!symKeyGiven && !pubKeyGiven) {
- return false, ErrSymAsym
+ return nil, ErrSymAsym
}
params := &MessageParams{
@@ -256,58 +257,70 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
// Set key that is used to sign the message
if len(req.Sig) > 0 {
if params.Src, err = api.w.GetPrivateKey(req.Sig); err != nil {
- return false, err
+ return nil, err
}
}
// Set symmetric key that is used to encrypt the message
if symKeyGiven {
if params.Topic == (TopicType{}) { // topics are mandatory with symmetric encryption
- return false, ErrNoTopics
+ return nil, ErrNoTopics
}
if params.KeySym, err = api.w.GetSymKey(req.SymKeyID); err != nil {
- return false, err
+ return nil, err
}
if !validateDataIntegrity(params.KeySym, aesKeyLength) {
- return false, ErrInvalidSymmetricKey
+ return nil, ErrInvalidSymmetricKey
}
}
// Set asymmetric key that is used to encrypt the message
if pubKeyGiven {
if params.Dst, err = crypto.UnmarshalPubkey(req.PublicKey); err != nil {
- return false, ErrInvalidPublicKey
+ return nil, ErrInvalidPublicKey
}
}
// encrypt and sent message
whisperMsg, err := NewSentMessage(params)
if err != nil {
- return false, err
+ return nil, err
}
+ var result []byte
env, err := whisperMsg.Wrap(params)
if err != nil {
- return false, err
+ return nil, err
}
// send to specific node (skip PoW check)
if len(req.TargetPeer) > 0 {
- n, err := discover.ParseNode(req.TargetPeer)
+ n, err := enode.ParseV4(req.TargetPeer)
if err != nil {
- return false, fmt.Errorf("failed to parse target peer: %s", err)
+ return nil, fmt.Errorf("failed to parse target peer: %s", err)
}
- return true, api.w.SendP2PMessage(n.ID[:], env)
+ err = api.w.SendP2PMessage(n.ID().Bytes(), env)
+ if err == nil {
+ hash := env.Hash()
+ result = hash[:]
+ }
+ return result, err
}
// ensure that the message PoW meets the node's minimum accepted PoW
if req.PowTarget < api.w.MinPow() {
- return false, ErrTooLowPoW
+ return nil, ErrTooLowPoW
}
- return true, api.w.Send(env)
+ err = api.w.Send(env)
+ if err == nil {
+ hash := env.Hash()
+ result = hash[:]
+ }
+ return result, err
}
+
//go:generate gencodec -type Criteria -field-override criteriaOverride -out gen_criteria_json.go
// Criteria holds various filter options for inbound messages.
diff --git a/whisper/whisperv6/peer_test.go b/whisper/whisperv6/peer_test.go
index d5869e180c04..4030556212d5 100644
--- a/whisper/whisperv6/peer_test.go
+++ b/whisper/whisperv6/peer_test.go
@@ -21,18 +21,20 @@ import (
"crypto/ecdsa"
"fmt"
mrand "math/rand"
- "net"
"sync"
"sync/atomic"
"testing"
"time"
+ "net"
+
"github.com/XinFinOrg/XDPoSChain/common"
"github.com/XinFinOrg/XDPoSChain/common/hexutil"
"github.com/XinFinOrg/XDPoSChain/crypto"
"github.com/XinFinOrg/XDPoSChain/p2p"
- "github.com/XinFinOrg/XDPoSChain/p2p/discover"
+ "github.com/XinFinOrg/XDPoSChain/p2p/enode"
"github.com/XinFinOrg/XDPoSChain/p2p/nat"
+ "github.com/XinFinOrg/XDPoSChain/rlp"
)
var keys = []string{
@@ -174,8 +176,6 @@ func initialize(t *testing.T) {
initBloom(t)
var err error
- ip := net.IPv4(127, 0, 0, 1)
- port0 := 30303
for i := 0; i < NumNodes; i++ {
var node TestNode
@@ -200,29 +200,15 @@ func initialize(t *testing.T) {
if err != nil {
t.Fatalf("failed convert the key: %s.", keys[i])
}
- port := port0 + i
- addr := fmt.Sprintf(":%d", port) // e.g. ":30303"
name := common.MakeName("whisper-go", "2.0")
- var peers []*discover.Node
- if i > 0 {
- peerNodeID := nodes[i-1].id
- peerPort := uint16(port - 1)
- peerNode := discover.PubkeyID(&peerNodeID.PublicKey)
- peer := discover.NewNode(peerNode, ip, peerPort, peerPort)
- peers = append(peers, peer)
- }
-
node.server = &p2p.Server{
Config: p2p.Config{
- PrivateKey: node.id,
- MaxPeers: NumNodes/2 + 1,
- Name: name,
- Protocols: node.shh.Protocols(),
- ListenAddr: addr,
- NAT: nat.Any(),
- BootstrapNodes: peers,
- StaticNodes: peers,
- TrustedNodes: peers,
+ PrivateKey: node.id,
+ MaxPeers: NumNodes/2 + 1,
+ Name: name,
+ Protocols: node.shh.Protocols(),
+ ListenAddr: "127.0.0.1:0",
+ NAT: nat.Any(),
},
}
@@ -230,7 +216,12 @@ func initialize(t *testing.T) {
}
for i := 0; i < NumNodes; i++ {
- go startServer(t, nodes[i].server)
+ for j := 0; j < i; j++ {
+ peerNodeId := nodes[j].id
+ address, _ := net.ResolveTCPAddr("tcp", nodes[j].server.ListenAddr)
+ peer := enode.NewV4(&peerNodeId.PublicKey, address.IP, address.Port, address.Port)
+ nodes[i].server.AddPeer(peer)
+ }
}
waitForServersToStart(t)
@@ -438,7 +429,7 @@ func checkPowExchangeForNodeZeroOnce(t *testing.T, mustPass bool) bool {
cnt := 0
for i, node := range nodes {
for peer := range node.shh.peers {
- if peer.peer.ID() == discover.PubkeyID(&nodes[0].id.PublicKey) {
+ if peer.peer.ID() == nodes[0].server.Self().ID() {
cnt++
if peer.powRequirement != masterPow {
if mustPass {
@@ -459,7 +450,7 @@ func checkPowExchangeForNodeZeroOnce(t *testing.T, mustPass bool) bool {
func checkPowExchange(t *testing.T) {
for i, node := range nodes {
for peer := range node.shh.peers {
- if peer.peer.ID() != discover.PubkeyID(&nodes[0].id.PublicKey) {
+ if peer.peer.ID() != nodes[0].server.Self().ID() {
if peer.powRequirement != masterPow {
t.Fatalf("node %d: failed to exchange pow requirement in round %d; expected %f, got %f",
i, round, masterPow, peer.powRequirement)
@@ -513,3 +504,39 @@ func waitForServersToStart(t *testing.T) {
}
t.Fatalf("Failed to start all the servers, running: %d", started)
}
+
+// two generic whisper node handshake
+func TestPeerHandshakeWithTwoFullNode(t *testing.T) {
+ w1 := Whisper{}
+ p1 := newPeer(&w1, p2p.NewPeer(enode.ID{}, "test", []p2p.Cap{}), &rwStub{[]interface{}{ProtocolVersion, uint64(123), make([]byte, BloomFilterSize), false}})
+ err := p1.handshake()
+ if err != nil {
+ t.Fatal()
+ }
+}
+
+// two generic whisper node handshake. one don't send light flag
+func TestHandshakeWithOldVersionWithoutLightModeFlag(t *testing.T) {
+ w1 := Whisper{}
+ p1 := newPeer(&w1, p2p.NewPeer(enode.ID{}, "test", []p2p.Cap{}), &rwStub{[]interface{}{ProtocolVersion, uint64(123), make([]byte, BloomFilterSize)}})
+ err := p1.handshake()
+ if err != nil {
+ t.Fatal()
+ }
+}
+
+type rwStub struct {
+ payload []interface{}
+}
+
+func (stub *rwStub) ReadMsg() (p2p.Msg, error) {
+ size, r, err := rlp.EncodeToReader(stub.payload)
+ if err != nil {
+ return p2p.Msg{}, err
+ }
+ return p2p.Msg{Code: statusCode, Size: uint32(size), Payload: r}, nil
+}
+
+func (stub *rwStub) WriteMsg(m p2p.Msg) error {
+ return nil
+}