diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000..68a94afe49
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,44 @@
+*.log
+*.log.archive
+*.out
+*.prof
+coverage.html
+
+# cadaver
+*.cdv
+*.cdv.archive
+
+# swagger
+swagger.json
+swagger.json.validated
+kmdSwaggerWrappers.go
+bundledSpecInject.go
+
+# Exclude GoLand files
+.idea/
+
+# Exclude VSCode files
+.vscode/
+
+# Exclude go binaries built in-place
+cmd/algod/algod
+cmd/goal/goal
+cmd/updater/updater
+
+# Exclude our local temp directory
+tmp/dev_pkg
+tmp/out
+tmp/node_pkgs
+
+# Ignore vim backup and swap files
+*~
+*.swp
+*.swo
+
+# Mac
+.DS_Store
+
+# doc intermediates
+data/transactions/logic/*.md
+
+*.pem
diff --git a/.gitignore b/.gitignore
index 44bec0113d..319b89b478 100644
--- a/.gitignore
+++ b/.gitignore
@@ -41,6 +41,10 @@ crypto/libsodium-fork/**/Makefile.in
crypto/libsodium-fork/aclocal.m4
crypto/libsodium-fork/build-aux/
+# Ignore libsodium files generated during ci process
+crypto/copies
+crypto/libs
+
# Ignore vim backup and swap files
*~
*.swp
@@ -54,3 +58,6 @@ data/transactions/logic/*.md
*.pem
+# Folder for collecting release assets
+assets
+
diff --git a/Makefile b/Makefile
index da976915aa..4a9b979bbd 100644
--- a/Makefile
+++ b/Makefile
@@ -6,6 +6,7 @@ export GO111MODULE
UNAME := $(shell uname)
SRCPATH := $(shell pwd)
ARCH := $(shell ./scripts/archtype.sh)
+OS_TYPE := $(shell ./scripts/ostype.sh)
# If build number already set, use it - to ensure same build number across multiple platforms being built
BUILDNUMBER ?= $(shell ./scripts/compute_build_number.sh)
@@ -27,6 +28,13 @@ endif
GOTAGSLIST += osusergo netgo static_build
GOBUILDMODE := -buildmode pie
endif
+ifeq ($(ARCH), arm)
+ifneq ("$(wildcard /etc/alpine-release)","")
+EXTLDFLAGS += -static
+GOTAGSLIST += osusergo netgo static_build
+GOBUILDMODE := -buildmode pie
+endif
+endif
endif
GOTAGS := --tags "$(GOTAGSLIST)"
@@ -44,7 +52,7 @@ GOLDFLAGS := $(GOLDFLAGS_BASE) \
UNIT_TEST_SOURCES := $(sort $(shell GO111MODULE=off go list ./... | grep -v /go-algorand/test/ ))
ALGOD_API_PACKAGES := $(sort $(shell GO111MODULE=off cd daemon/algod/api; go list ./... ))
-MSGP_GENERATE := ./protocol ./crypto ./data/basics ./data/transactions ./data/committee ./data/bookkeeping ./data/hashable ./auction ./agreement
+MSGP_GENERATE := ./protocol ./crypto ./data/basics ./data/transactions ./data/committee ./data/bookkeeping ./data/hashable ./auction ./agreement ./rpcs ./node
default: build
@@ -85,14 +93,16 @@ generate: deps
msgp: $(patsubst %,%/msgp_gen.go,$(MSGP_GENERATE))
%/msgp_gen.go: deps ALWAYS
- $(GOPATH1)/bin/msgp -file ./$(@D) -o $@ 1>/dev/null
+ $(GOPATH1)/bin/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand
ALWAYS:
# build our fork of libsodium, placing artifacts into crypto/lib/ and crypto/include/
-crypto/lib/libsodium.a:
- cd crypto/libsodium-fork && \
- ./autogen.sh && \
- ./configure --disable-shared --prefix="$(SRCPATH)/crypto/" && \
+crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a:
+ mkdir -p crypto/copies/$(OS_TYPE)/$(ARCH)
+ cp -R crypto/libsodium-fork crypto/copies/$(OS_TYPE)/$(ARCH)/libsodium-fork
+ cd crypto/copies/$(OS_TYPE)/$(ARCH)/libsodium-fork && \
+ ./autogen.sh --prefix $(SRCPATH)/crypto/libs/$(OS_TYPE)/$(ARCH) && \
+ ./configure --disable-shared --prefix="$(SRCPATH)/crypto/libs/$(OS_TYPE)/$(ARCH)" && \
$(MAKE) && \
$(MAKE) install
@@ -108,7 +118,7 @@ ALGOD_API_FILES := $(shell find daemon/algod/api/server/common daemon/algod/api/
ALGOD_API_SWAGGER_INJECT := daemon/algod/api/server/lib/bundledSpecInject.go
# Note that swagger.json requires the go-swagger dep.
-$(ALGOD_API_SWAGGER_SPEC): $(ALGOD_API_FILES) crypto/lib/libsodium.a
+$(ALGOD_API_SWAGGER_SPEC): $(ALGOD_API_FILES) crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a
cd daemon/algod/api && \
PATH=$(GOPATH1)/bin:$$PATH \
go generate ./...
@@ -122,7 +132,7 @@ KMD_API_FILES := $(shell find daemon/kmd/api/ -type f | grep -v $(KMD_API_SWAGGE
KMD_API_SWAGGER_WRAPPER := kmdSwaggerWrappers.go
KMD_API_SWAGGER_INJECT := daemon/kmd/lib/kmdapi/bundledSpecInject.go
-$(KMD_API_SWAGGER_SPEC): $(KMD_API_FILES) crypto/lib/libsodium.a
+$(KMD_API_SWAGGER_SPEC): $(KMD_API_FILES) crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a
cd daemon/kmd/lib/kmdapi && \
python genSwaggerWrappers.py $(KMD_API_SWAGGER_WRAPPER)
cd daemon/kmd && \
@@ -147,7 +157,7 @@ $(KMD_API_SWAGGER_INJECT): $(KMD_API_SWAGGER_SPEC) $(KMD_API_SWAGGER_SPEC).valid
build: buildsrc gen
-buildsrc: crypto/lib/libsodium.a node_exporter NONGO_BIN deps $(ALGOD_API_SWAGGER_INJECT) $(KMD_API_SWAGGER_INJECT)
+buildsrc: crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a node_exporter NONGO_BIN deps $(ALGOD_API_SWAGGER_INJECT) $(KMD_API_SWAGGER_INJECT)
go install $(GOTRIMPATH) $(GOTAGS) $(GOBUILDMODE) -ldflags="$(GOLDFLAGS)" ./...
SOURCES_RACE := github.com/algorand/go-algorand/cmd/kmd
@@ -209,6 +219,8 @@ clean:
cd crypto/libsodium-fork && \
test ! -e Makefile || make clean
rm -rf crypto/lib
+ rm -rf crypto/libs
+ rm -rf crypto/copies
# clean without crypto
cleango:
@@ -265,3 +277,13 @@ install: build
scripts/dev_install.sh -p $(GOPATH1)/bin
.PHONY: default fmt vet lint check_license check_shell sanity cover prof deps build test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN
+
+### TARGETS FOR CICD PROCESS
+
+ci-deps:
+ scripts/configure_dev-deps.sh && \
+ scripts/check_deps.sh
+
+ci-build: buildsrc gen
+ mkdir -p $(SRCPATH)/tmp/node_pkgs/$(OS_TYPE)/$(ARCH) && \
+ PKG_ROOT=$(SRCPATH)/tmp/node_pkgs/$(OS_TYPE)/$(ARCH) NO_BUILD=True VARIATIONS=$(OS_TYPE)/$(ARCH) scripts/build_packages.sh $(OS_TYPE)/$(ARCH)
diff --git a/agreement/abstractions.go b/agreement/abstractions.go
index 97b1c0f2f7..b8d8c80e60 100644
--- a/agreement/abstractions.go
+++ b/agreement/abstractions.go
@@ -201,9 +201,9 @@ type LedgerWriter interface {
// as above.
EnsureValidatedBlock(ValidatedBlock, Certificate)
- // EnsureDigest waits until some Block that corresponds to a given
- // Certificate appears in the ledger. EnsureDigest does not wait for
- // the block to be written to disk; use Wait() if needed.
+ // EnsureDigest signals the Ledger to attempt to fetch a Block matching
+ // the given Certificate. EnsureDigest does not wait for the block to
+ // be written to disk; use Wait() if needed.
//
// The Ledger must guarantee that after this method returns, any Seed,
// Record, or Circulation call reflects the contents of the Block
@@ -215,7 +215,7 @@ type LedgerWriter interface {
// this is the case, the behavior of Ledger is undefined.
// (Implementations are encouraged to panic or otherwise fail loudly in
// this case, because it means that a fork has occurred.)
- EnsureDigest(Certificate, chan struct{}, *AsyncVoteVerifier)
+ EnsureDigest(Certificate, *AsyncVoteVerifier)
}
// A KeyManager stores and deletes participation keys.
diff --git a/agreement/actions.go b/agreement/actions.go
index 376a3ce151..7058e2e391 100644
--- a/agreement/actions.go
+++ b/agreement/actions.go
@@ -46,6 +46,7 @@ const (
// ledger
ensure
+ stageDigest
// time
rezero
@@ -123,7 +124,7 @@ func (a networkAction) do(ctx context.Context, s *Service) {
if a.T == broadcastVotes {
tag := protocol.AgreementVoteTag
for i, uv := range a.UnauthenticatedVotes {
- data := protocol.Encode(uv)
+ data := protocol.Encode(&uv)
sendErr := s.Network.Broadcast(tag, data)
if sendErr != nil {
s.log.Warnf("Network was unable to queue votes for broadcast(%v). %d / %d votes for round %d period %d step %d were dropped.",
@@ -144,16 +145,16 @@ func (a networkAction) do(ctx context.Context, s *Service) {
var data []byte
switch a.Tag {
case protocol.AgreementVoteTag:
- data = protocol.Encode(a.UnauthenticatedVote)
+ data = protocol.Encode(&a.UnauthenticatedVote)
case protocol.VoteBundleTag:
- data = protocol.Encode(a.UnauthenticatedBundle)
+ data = protocol.Encode(&a.UnauthenticatedBundle)
case protocol.ProposalPayloadTag:
msg := a.CompoundMessage
payload := transmittedPayload{
unauthenticatedProposal: msg.Proposal,
PriorVote: msg.Vote,
}
- data = protocol.Encode(payload)
+ data = protocol.Encode(&payload)
}
switch a.T {
@@ -178,6 +179,7 @@ type cryptoAction struct {
Proposal proposalValue // TODO deprecate
Round round
Period period
+ Step step
Pinned bool
TaskIndex int
}
@@ -197,15 +199,16 @@ func (a cryptoAction) do(ctx context.Context, s *Service) {
case verifyPayload:
s.demux.verifyPayload(ctx, a.M, a.Round, a.Period, a.Pinned)
case verifyBundle:
- s.demux.verifyBundle(ctx, a.M, a.Round, a.Period)
+ s.demux.verifyBundle(ctx, a.M, a.Round, a.Period, a.Step)
}
}
type ensureAction struct {
nonpersistent
- Payload proposal
- PayloadOk bool
+ // the payload that we will give to the ledger
+ Payload proposal
+ // the certificate proving commitment
Certificate Certificate
}
@@ -214,7 +217,7 @@ func (a ensureAction) t() actionType {
}
func (a ensureAction) String() string {
- return fmt.Sprintf("%v: %.5v", a.t().String(), a.Payload.Digest().String())
+ return fmt.Sprintf("%s: %.5s: %v, %v, %.5s", a.t().String(), a.Payload.Digest().String(), a.Certificate.Round, a.Certificate.Period, a.Certificate.Proposal.BlockDigest.String())
}
func (a ensureAction) do(ctx context.Context, s *Service) {
@@ -227,7 +230,7 @@ func (a ensureAction) do(ctx context.Context, s *Service) {
if a.Payload.ve != nil {
logEvent.Type = logspec.RoundConcluded
- s.log.with(logEvent).Infof("committed round %v with pre-validated block %v", a.Certificate.Round, a.Certificate.Proposal)
+ s.log.with(logEvent).Infof("committed round %d with pre-validated block %v", a.Certificate.Round, a.Certificate.Proposal)
s.log.EventWithDetails(telemetryspec.Agreement, telemetryspec.BlockAcceptedEvent, telemetryspec.BlockAcceptedEventDetails{
Address: a.Certificate.Proposal.OriginalProposer.String(),
Hash: a.Certificate.Proposal.BlockDigest.String(),
@@ -236,28 +239,48 @@ func (a ensureAction) do(ctx context.Context, s *Service) {
s.Ledger.EnsureValidatedBlock(a.Payload.ve, a.Certificate)
} else {
block := a.Payload.Block
- if !a.PayloadOk {
- logEvent.Type = logspec.RoundWaiting
- s.log.with(logEvent).Infof("round %v concluded without block for %v; waiting on ledger", a.Certificate.Round, a.Certificate.Proposal)
- s.Ledger.EnsureDigest(a.Certificate, s.quit, s.voteVerifier)
- } else {
- logEvent.Type = logspec.RoundConcluded
- s.log.with(logEvent).Infof("committed round %v with block %v", a.Certificate.Round, a.Certificate.Proposal)
- s.log.EventWithDetails(telemetryspec.Agreement, telemetryspec.BlockAcceptedEvent, telemetryspec.BlockAcceptedEventDetails{
- Address: a.Certificate.Proposal.OriginalProposer.String(),
- Hash: a.Certificate.Proposal.BlockDigest.String(),
- Round: uint64(a.Certificate.Round),
- })
- s.Ledger.EnsureBlock(block, a.Certificate)
- }
+ logEvent.Type = logspec.RoundConcluded
+ s.log.with(logEvent).Infof("committed round %d with block %v", a.Certificate.Round, a.Certificate.Proposal)
+ s.log.EventWithDetails(telemetryspec.Agreement, telemetryspec.BlockAcceptedEvent, telemetryspec.BlockAcceptedEventDetails{
+ Address: a.Certificate.Proposal.OriginalProposer.String(),
+ Hash: a.Certificate.Proposal.BlockDigest.String(),
+ Round: uint64(a.Certificate.Round),
+ })
+ s.Ledger.EnsureBlock(block, a.Certificate)
}
logEventStart := logEvent
logEventStart.Type = logspec.RoundStart
- s.log.with(logEventStart).Infof("finished round %v", a.Certificate.Round)
+ s.log.with(logEventStart).Infof("finished round %d", a.Certificate.Round)
s.tracer.timeR().StartRound(a.Certificate.Round + 1)
s.tracer.timeR().RecStep(0, propose, bottom)
}
+type stageDigestAction struct {
+ nonpersistent
+ // Certificate identifies a block and is a proof commitment
+ Certificate Certificate // a block digest is probably sufficient; keep certificate for now to match ledger interface
+}
+
+func (a stageDigestAction) t() actionType {
+ return stageDigest
+}
+
+func (a stageDigestAction) String() string {
+ return fmt.Sprintf("%s: %.5s. %v. %v", a.t().String(), a.Certificate.Proposal.BlockDigest.String(), a.Certificate.Round, a.Certificate.Period)
+}
+
+func (a stageDigestAction) do(ctx context.Context, service *Service) {
+ logEvent := logspec.AgreementEvent{
+ Hash: a.Certificate.Proposal.BlockDigest.String(),
+ Round: uint64(a.Certificate.Round),
+ Period: uint64(a.Certificate.Period),
+ Sender: a.Certificate.Proposal.OriginalProposer.String(),
+ Type: logspec.RoundWaiting,
+ }
+ service.log.with(logEvent).Infof("round %v concluded without block for %v; (async) waiting on ledger", a.Certificate.Round, a.Certificate.Proposal)
+ service.Ledger.EnsureDigest(a.Certificate, service.voteVerifier)
+}
+
type rezeroAction struct {
nonpersistent
@@ -408,10 +431,6 @@ func relayAction(e messageEvent, tag protocol.Tag, o interface{}) action {
return a
}
-func verifyBundleAction(e messageEvent, r round, p period) action {
- return cryptoAction{T: verifyBundle, M: e.Input, Round: r, Period: p}
-}
-
func verifyVoteAction(e messageEvent, r round, p period, taskIndex int) action {
return cryptoAction{T: verifyVote, M: e.Input, Round: r, Period: p, TaskIndex: taskIndex}
}
@@ -420,6 +439,10 @@ func verifyPayloadAction(e messageEvent, r round, p period, pinned bool) action
return cryptoAction{T: verifyPayload, M: e.Input, Round: r, Period: p, Pinned: pinned}
}
+func verifyBundleAction(e messageEvent, r round, p period, s step) action {
+ return cryptoAction{T: verifyBundle, M: e.Input, Round: r, Period: p, Step: s}
+}
+
func zeroAction(t actionType) action {
switch t {
case noop:
diff --git a/agreement/actiontype_string.go b/agreement/actiontype_string.go
index a9279d7723..c27b9138bb 100644
--- a/agreement/actiontype_string.go
+++ b/agreement/actiontype_string.go
@@ -18,16 +18,17 @@ func _() {
_ = x[verifyPayload-7]
_ = x[verifyBundle-8]
_ = x[ensure-9]
- _ = x[rezero-10]
- _ = x[attest-11]
- _ = x[assemble-12]
- _ = x[repropose-13]
- _ = x[checkpoint-14]
+ _ = x[stageDigest-10]
+ _ = x[rezero-11]
+ _ = x[attest-12]
+ _ = x[assemble-13]
+ _ = x[repropose-14]
+ _ = x[checkpoint-15]
}
-const _actionType_name = "noopignorebroadcastrelaydisconnectbroadcastVotesverifyVoteverifyPayloadverifyBundleensurerezeroattestassemblereproposecheckpoint"
+const _actionType_name = "noopignorebroadcastrelaydisconnectbroadcastVotesverifyVoteverifyPayloadverifyBundleensurestageDigestrezeroattestassemblereproposecheckpoint"
-var _actionType_index = [...]uint8{0, 4, 10, 19, 24, 34, 48, 58, 71, 83, 89, 95, 101, 109, 118, 128}
+var _actionType_index = [...]uint8{0, 4, 10, 19, 24, 34, 48, 58, 71, 83, 89, 100, 106, 112, 120, 129, 139}
func (i actionType) String() string {
if i < 0 || i >= actionType(len(_actionType_index)-1) {
diff --git a/agreement/agreementtest/simulate.go b/agreement/agreementtest/simulate.go
index c08ece3679..43b716ca58 100644
--- a/agreement/agreementtest/simulate.go
+++ b/agreement/agreementtest/simulate.go
@@ -18,9 +18,7 @@
package agreementtest
import (
- "context"
"fmt"
- "net/http"
"strconv"
"time"
@@ -28,12 +26,11 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/agreement/gossip"
+ "github.com/algorand/go-algorand/components/mocks"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
- "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
"github.com/algorand/go-algorand/util/timers"
)
@@ -116,48 +113,14 @@ func (i *instant) HasPending(queueName string) bool {
return true
}
-type blackhole struct{}
+type blackhole struct {
+ mocks.MockNetwork
+}
func (b *blackhole) Address() (string, bool) {
return "blackhole", true
}
-func (b *blackhole) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except network.Peer) error {
- return nil
-}
-
-func (b *blackhole) Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except network.Peer) error {
- return nil
-}
-
-func (b *blackhole) Disconnect(badpeer network.Peer) {}
-
-func (b *blackhole) DisconnectPeers() {}
-
-func (b *blackhole) GetPeers(options ...network.PeerOption) []network.Peer {
- return nil
-}
-
-func (b *blackhole) Ready() chan struct{} {
- var closed chan struct{}
- close(closed)
- return closed
-}
-
-func (b *blackhole) RegisterRPCName(string, interface{}) {}
-func (b *blackhole) RegisterHTTPHandler(path string, handler http.Handler) {
-}
-
-func (b *blackhole) RequestConnectOutgoing(bool, <-chan struct{}) {}
-
-func (b *blackhole) Start() {}
-
-func (b *blackhole) Stop() {}
-
-func (b *blackhole) RegisterHandlers(dispatch []network.TaggedMessageHandler) {}
-
-func (b *blackhole) ClearHandlers() {}
-
// CryptoRandomSource is a random source that is based off our crypto library.
type CryptoRandomSource struct{}
@@ -218,7 +181,7 @@ func Simulate(dbname string, n basics.Round, roundDeadline time.Duration, ledger
select {
case <-ledger.Wait(r):
case <-deadlineCh:
- return fmt.Errorf("agreementtest.Simulate: round %v failed to complete by the deadline (%v)", r, roundDeadline)
+ return fmt.Errorf("agreementtest.Simulate: round %d failed to complete by the deadline (%v)", r, roundDeadline)
}
}
diff --git a/agreement/agreementtest/simulate_test.go b/agreement/agreementtest/simulate_test.go
index 0d9a83a5a2..ab472146d4 100644
--- a/agreement/agreementtest/simulate_test.go
+++ b/agreement/agreementtest/simulate_test.go
@@ -42,7 +42,7 @@ import (
var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-var deadline = time.Second
+var deadline = time.Second * 5
var proto = protocol.ConsensusCurrentVersion
@@ -251,7 +251,7 @@ func (l *testLedger) EnsureBlock(e bookkeeping.Block, c agreement.Certificate) {
if _, ok := l.entries[e.Round()]; ok {
if l.entries[e.Round()].Digest() != e.Digest() {
- err := fmt.Errorf("testLedger.EnsureBlock called with conflicting entries in round %v", e.Round())
+ err := fmt.Errorf("testLedger.EnsureBlock called with conflicting entries in round %d", e.Round())
panic(err)
}
}
@@ -266,7 +266,7 @@ func (l *testLedger) EnsureBlock(e bookkeeping.Block, c agreement.Certificate) {
l.notify(e.Round())
}
-func (l *testLedger) EnsureDigest(c agreement.Certificate, quit chan struct{}, verifier *agreement.AsyncVoteVerifier) {
+func (l *testLedger) EnsureDigest(c agreement.Certificate, verifier *agreement.AsyncVoteVerifier) {
r := c.Round
consistencyCheck := func() bool {
l.mu.Lock()
@@ -274,7 +274,7 @@ func (l *testLedger) EnsureDigest(c agreement.Certificate, quit chan struct{}, v
if r < l.nextRound {
if l.entries[r].Digest() != c.Proposal.BlockDigest {
- err := fmt.Errorf("testLedger.EnsureDigest called with conflicting entries in round %v", r)
+ err := fmt.Errorf("testLedger.EnsureDigest called with conflicting entries in round %d", r)
panic(err)
}
return true
@@ -286,14 +286,10 @@ func (l *testLedger) EnsureDigest(c agreement.Certificate, quit chan struct{}, v
return
}
- select {
- case <-quit:
- return
- case <-l.Wait(r):
- if !consistencyCheck() {
- err := fmt.Errorf("Wait channel fired without matching block in round %v", r)
- panic(err)
- }
+ <-l.Wait(r)
+ if !consistencyCheck() {
+ err := fmt.Errorf("Wait channel fired without matching block in round %d", r)
+ panic(err)
}
}
diff --git a/agreement/common_test.go b/agreement/common_test.go
index 39c7db72a8..9d9dfa2cf4 100644
--- a/agreement/common_test.go
+++ b/agreement/common_test.go
@@ -315,7 +315,7 @@ func (l *testLedger) EnsureBlock(e bookkeeping.Block, c Certificate) {
if _, ok := l.entries[e.Round()]; ok {
if l.entries[e.Round()].Digest() != e.Digest() {
- err := fmt.Errorf("testLedger.EnsureBlock: called with conflicting entries in round %v", e.Round())
+ err := fmt.Errorf("testLedger.EnsureBlock: called with conflicting entries in round %d", e.Round())
panic(err)
}
}
@@ -326,42 +326,28 @@ func (l *testLedger) EnsureBlock(e bookkeeping.Block, c Certificate) {
if l.nextRound == e.Round() {
l.nextRound = e.Round() + 1
} else if l.nextRound < e.Round() {
- err := fmt.Errorf("testLedger.EnsureBlock: attempted to write block in future round: %v < %v", l.nextRound, e.Round())
+ err := fmt.Errorf("testLedger.EnsureBlock: attempted to write block in future round: %d < %d", l.nextRound, e.Round())
panic(err)
}
l.notify(e.Round())
}
-func (l *testLedger) EnsureDigest(c Certificate, quit chan struct{}, verifier *AsyncVoteVerifier) {
+func (l *testLedger) EnsureDigest(c Certificate, verifier *AsyncVoteVerifier) {
r := c.Round
- consistencyCheck := func() bool {
- l.mu.Lock()
- defer l.mu.Unlock()
-
- if r < l.nextRound {
- if l.entries[r].Digest() != c.Proposal.BlockDigest {
- err := fmt.Errorf("testLedger.EnsureDigest called with conflicting entries in round %v", r)
- panic(err)
- }
- return true
- }
- return false
- }
-
- if consistencyCheck() {
- return
- }
+ l.mu.Lock()
+ defer l.mu.Unlock()
- select {
- case <-quit:
- return
- case <-l.Wait(r):
- if !consistencyCheck() {
- err := fmt.Errorf("Wait channel fired without matching block in round %v", r)
+ if r < l.nextRound {
+ if l.entries[r].Digest() != c.Proposal.BlockDigest {
+ err := fmt.Errorf("testLedger.EnsureDigest called with conflicting entries in round %d", r)
panic(err)
}
}
+ // the mock ledger does not actually need to wait for the block.
+ // Agreement should function properly even if it never happens.
+ // No test right now expects the ledger to eventually ensure digest (we can add one if need be)
+ return
}
func (l *testLedger) ConsensusParams(r basics.Round) (config.ConsensusParams, error) {
@@ -383,7 +369,7 @@ type testAccountData struct {
func makeProposalsTesting(accs testAccountData, round basics.Round, period period, factory BlockFactory, ledger Ledger) (ps []proposal, vs []vote) {
ve, err := factory.AssembleBlock(round, time.Now().Add(time.Minute))
if err != nil {
- logging.Base().Errorf("Could not generate a proposal for round %v: %v", round, err)
+ logging.Base().Errorf("Could not generate a proposal for round %d: %v", round, err)
return nil, nil
}
diff --git a/agreement/cryptoRequestContext.go b/agreement/cryptoRequestContext.go
index 8a94ef7c8f..6bb2234a86 100644
--- a/agreement/cryptoRequestContext.go
+++ b/agreement/cryptoRequestContext.go
@@ -18,8 +18,6 @@ package agreement
import (
"context"
-
- "github.com/algorand/go-algorand/protocol"
)
// periodRequestsContext keeps a context for all tasks associated with the same period, so we can cancel them if the period becomes irrelevant.
@@ -35,7 +33,10 @@ type periodRequestsContext struct {
// It allows for the pinned value to act as a sentinel.
type cryptoRequestCtxKey struct {
period period
- pinned bool // If this is set, period should be 0.
+
+ // note: following two booleans are mutually exclusive
+ certify bool // If this is set, period should be 0.
+ pinned bool // If this is set, period should be 0.
}
// roundRequestsContext keeps a the root context for all cryptoRequests associated with a round.
@@ -53,71 +54,58 @@ func makePendingRequestsContext() pendingRequestsContext {
return make(map[round]roundRequestsContext)
}
-// add returns a context associated with a given request
-func (pending pendingRequestsContext) add(request cryptoRequest) context.Context {
+// getReqCtx gets the roundRequestsContext for a round and cryptoRequestCtxKey.
+func (pending pendingRequestsContext) getReqCtx(rnd round, pkey cryptoRequestCtxKey) periodRequestsContext {
// create round context
- if _, has := pending[request.Round]; !has {
+ if _, has := pending[rnd]; !has {
roundCtx, cancel := context.WithCancel(context.Background())
- pending[request.Round] = roundRequestsContext{ctx: roundCtx, cancel: cancel, periods: make(map[cryptoRequestCtxKey]periodRequestsContext)}
- }
-
- pkey := cryptoRequestCtxKey{period: request.Period}
- if request.Pinned {
- pkey = cryptoRequestCtxKey{pinned: request.Pinned}
+ pending[rnd] = roundRequestsContext{ctx: roundCtx, cancel: cancel, periods: make(map[cryptoRequestCtxKey]periodRequestsContext)}
}
// create period context
- if _, has := pending[request.Round].periods[pkey]; !has {
- periodCtx, periodCancel := context.WithCancel(pending[request.Round].ctx)
- pending[request.Round].periods[pkey] = periodRequestsContext{ctx: periodCtx, cancel: periodCancel}
- }
-
- // find the right context for the request
- roundCtx := pending[request.Round]
- periodCtx, has := roundCtx.periods[pkey]
-
- if request.Tag == protocol.ProposalPayloadTag {
- // we have a new proposal, so cancel validation of an old proposal for the same round and period
- if has && periodCtx.proposalCancelFunc != nil {
- periodCtx.proposalCancelFunc()
- }
-
- // create a context for the new proposal
- var proposalContext context.Context
- proposalContext, periodCtx.proposalCancelFunc = context.WithCancel(periodCtx.ctx)
- pending[request.Round].periods[pkey] = periodCtx
- return proposalContext
+ if _, has := pending[rnd].periods[pkey]; !has {
+ periodCtx, periodCancel := context.WithCancel(pending[rnd].ctx)
+ pending[rnd].periods[pkey] = periodRequestsContext{ctx: periodCtx, cancel: periodCancel}
}
- return periodCtx.ctx
+ return pending[rnd].periods[pkey]
}
-// add returns a context associated with a given request
+// addVote returns a context associated with a given request
func (pending pendingRequestsContext) addVote(request cryptoVoteRequest) context.Context {
- // create round context
- if _, has := pending[request.Round]; !has {
- roundCtx, cancel := context.WithCancel(context.Background())
- pending[request.Round] = roundRequestsContext{ctx: roundCtx, cancel: cancel, periods: make(map[cryptoRequestCtxKey]periodRequestsContext)}
- }
+ return pending.getReqCtx(request.Round, cryptoRequestCtxKey{period: request.Period}).ctx
+}
+// addProposal returns a context associated with a given request (and cancels any older similar request)
+func (pending pendingRequestsContext) addProposal(request cryptoProposalRequest) context.Context {
pkey := cryptoRequestCtxKey{period: request.Period}
if request.Pinned {
pkey = cryptoRequestCtxKey{pinned: request.Pinned}
}
+ rqctx := pending.getReqCtx(request.Round, pkey)
- // create period context
- if _, has := pending[request.Round].periods[pkey]; !has {
- periodCtx, periodCancel := context.WithCancel(pending[request.Round].ctx)
- pending[request.Round].periods[pkey] = periodRequestsContext{ctx: periodCtx, cancel: periodCancel}
+ if rqctx.proposalCancelFunc != nil {
+ // we have a new proposal, so cancel validation of an old proposal for the same round and period
+ rqctx.proposalCancelFunc()
}
- // find the right context for the request
- roundCtx := pending[request.Round]
- periodCtx := roundCtx.periods[pkey]
- return periodCtx.ctx
+ // create a context for the new proposal
+ var proposalContext context.Context
+ proposalContext, rqctx.proposalCancelFunc = context.WithCancel(rqctx.ctx)
+ pending[request.Round].periods[pkey] = rqctx
+ return proposalContext
+}
+
+// addBundle returns a context associated with a given request
+func (pending pendingRequestsContext) addBundle(request cryptoBundleRequest) context.Context {
+ pkey := cryptoRequestCtxKey{period: request.Period}
+ if request.Certify {
+ pkey = cryptoRequestCtxKey{certify: request.Certify}
+ }
+ return pending.getReqCtx(request.Round, pkey).ctx
}
// clearStaleContexts cancels contexts associated with cryptoRequests that are no longer relevant at the given round and period
-func (pending pendingRequestsContext) clearStaleContexts(r round, p period, pinned bool) {
+func (pending pendingRequestsContext) clearStaleContexts(r round, p period, pinned bool, certify bool) {
// at round r + 2 we can clear tasks from round r
oldRounds := make([]round, 0)
for round := range pending {
@@ -125,22 +113,22 @@ func (pending pendingRequestsContext) clearStaleContexts(r round, p period, pinn
oldRounds = append(oldRounds, round)
}
}
-
- // we got a new pinned proposal: do not clear period tasks
- if pinned {
- return
- }
-
- // at period p + 3 we can clear tasks from period p
for _, oldRound := range oldRounds {
pending[oldRound].cancel()
delete(pending, oldRound)
}
+ // we got a new pinned proposal or a cert bundle:
+ // do not clear period tasks
+ if pinned || certify {
+ return
+ }
+
+ // at period p + 3 we can clear tasks from period p
if _, has := pending[r]; has {
oldPeriods := make([]cryptoRequestCtxKey, 0)
for pkey := range pending[r].periods {
- if !pkey.pinned && pkey.period+3 <= p {
+ if !pkey.pinned && !pkey.certify && pkey.period+3 <= p {
oldPeriods = append(oldPeriods, pkey)
}
}
diff --git a/agreement/cryptoRequestContext_test.go b/agreement/cryptoRequestContext_test.go
index aa107c1826..648dcdc3b6 100644
--- a/agreement/cryptoRequestContext_test.go
+++ b/agreement/cryptoRequestContext_test.go
@@ -17,6 +17,7 @@
package agreement
import (
+ "context"
"testing"
"github.com/stretchr/testify/require"
@@ -24,51 +25,88 @@ import (
"github.com/algorand/go-algorand/protocol"
)
+func forEachTagDo(fn func(protocol.Tag)) {
+ for _, tag := range []protocol.Tag{protocol.AgreementVoteTag, protocol.ProposalPayloadTag, protocol.VoteBundleTag} {
+ fn(tag)
+ }
+}
+
func TestCryptoRequestContextAddCancelRound(t *testing.T) {
pending := makePendingRequestsContext()
- req := cryptoRequest{Round: 10, Period: 10}
- ctx := pending.add(req)
-
- roundCtx, hasRound := pending[req.Round]
- require.True(t, hasRound)
-
- _, hasPeriod := pending[req.Round].periods[cryptoRequestCtxKey{period: req.Period}]
- require.True(t, hasPeriod)
-
- roundCtx.cancel()
- select {
- case <-ctx.Done():
- default:
- t.Errorf("did not cancel request")
- }
+ rnd := round(10)
+ per := period(10)
+ forEachTagDo(func(tag protocol.Tag) {
+ var ctx context.Context
+ switch tag {
+ case protocol.AgreementVoteTag:
+ req := cryptoVoteRequest{Round: rnd, Period: per}
+ ctx = pending.addVote(req)
+ case protocol.ProposalPayloadTag:
+ req := cryptoProposalRequest{Round: rnd, Period: per}
+ ctx = pending.addProposal(req)
+ case protocol.VoteBundleTag:
+ req := cryptoBundleRequest{Round: rnd, Period: per}
+ ctx = pending.addBundle(req)
+ }
+
+ roundCtx, hasRound := pending[rnd]
+ require.True(t, hasRound)
+
+ _, hasPeriod := pending[rnd].periods[cryptoRequestCtxKey{period: per}]
+ require.True(t, hasPeriod)
+
+ roundCtx.cancel()
+ select {
+ case <-ctx.Done():
+ default:
+ t.Errorf("did not cancel request")
+ }
+ })
}
func TestCryptoRequestContextAddCancelPeriod(t *testing.T) {
pending := makePendingRequestsContext()
- req := cryptoRequest{Round: 10, Period: 10}
- ctx := pending.add(req)
-
- _, hasRound := pending[req.Round]
- require.True(t, hasRound)
-
- periodCtx, hasPeriod := pending[req.Round].periods[cryptoRequestCtxKey{period: req.Period}]
- require.True(t, hasPeriod)
-
- periodCtx.cancel()
- select {
- case <-ctx.Done():
- default:
- t.Errorf("did not cancel request")
- }
+ rnd := round(10)
+ per := period(10)
+
+ forEachTagDo(func(tag protocol.Tag) {
+ var ctx context.Context
+ switch tag {
+ case protocol.AgreementVoteTag:
+ req := cryptoVoteRequest{Round: rnd, Period: per}
+ ctx = pending.addVote(req)
+ case protocol.ProposalPayloadTag:
+ req := cryptoProposalRequest{Round: rnd, Period: per}
+ ctx = pending.addProposal(req)
+ case protocol.VoteBundleTag:
+ req := cryptoBundleRequest{Round: rnd, Period: per}
+ ctx = pending.addBundle(req)
+ }
+
+ _, hasRound := pending[rnd]
+ require.True(t, hasRound)
+
+ periodCtx, hasPeriod := pending[rnd].periods[cryptoRequestCtxKey{period: per}]
+ require.True(t, hasPeriod)
+
+ periodCtx.cancel()
+ select {
+ case <-ctx.Done():
+ default:
+ t.Errorf("did not cancel request")
+ }
+ })
}
func TestCryptoRequestContextAddCancelProposal(t *testing.T) {
pending := makePendingRequestsContext()
- proposal := cryptoRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: 10, Period: 10}
- ctx := pending.add(proposal)
+ rnd := round(10)
+ per := period(10)
+ proposal := cryptoProposalRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: rnd, Period: per}
+ ctx := pending.addProposal(proposal)
- proposal2 := cryptoRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: 10, Period: 10}
- ctx2 := pending.add(proposal2)
+ proposal2 := cryptoProposalRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: rnd, Period: per}
+ ctx2 := pending.addProposal(proposal2)
select {
case <-ctx.Done():
@@ -86,11 +124,12 @@ func TestCryptoRequestContextAddCancelProposal(t *testing.T) {
func TestCryptoRequestContextAddCancelPinnedProposal(t *testing.T) {
pending := makePendingRequestsContext()
- proposal := cryptoRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: 10, Pinned: true}
- ctx := pending.add(proposal)
+ rnd := round(10)
+ proposal := cryptoProposalRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: rnd, Pinned: true}
+ ctx := pending.addProposal(proposal)
- proposal2 := cryptoRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: 10, Pinned: true}
- ctx2 := pending.add(proposal2)
+ proposal2 := cryptoProposalRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: rnd, Pinned: true}
+ ctx2 := pending.addProposal(proposal2)
select {
case <-ctx.Done():
@@ -108,11 +147,13 @@ func TestCryptoRequestContextAddCancelPinnedProposal(t *testing.T) {
func TestCryptoRequestContextAddNoCancelPinnedProposal(t *testing.T) {
pending := makePendingRequestsContext()
- proposal := cryptoRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: 10, Pinned: true}
- ctx := pending.add(proposal)
+ rnd := round(10)
+ per := period(10)
+ proposal := cryptoProposalRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: rnd, Pinned: true}
+ ctx := pending.addProposal(proposal)
- proposal2 := cryptoRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: 10, Period: 10}
- ctx2 := pending.add(proposal2)
+ proposal2 := cryptoProposalRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: rnd, Period: per}
+ ctx2 := pending.addProposal(proposal2)
select {
case <-ctx.Done():
@@ -129,11 +170,13 @@ func TestCryptoRequestContextAddNoCancelPinnedProposal(t *testing.T) {
func TestCryptoRequestContextAddNoInterferencePinnedProposal(t *testing.T) {
pending := makePendingRequestsContext()
- proposal := cryptoRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: 10, Period: 10}
- ctx := pending.add(proposal)
+ rnd := round(10)
+ per := period(10)
+ proposal := cryptoProposalRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: rnd, Period: per}
+ ctx := pending.addProposal(proposal)
- proposal2 := cryptoRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: 10, Pinned: true}
- ctx2 := pending.add(proposal2)
+ proposal2 := cryptoProposalRequest{message: message{Tag: protocol.ProposalPayloadTag}, Round: rnd, Pinned: true}
+ ctx2 := pending.addProposal(proposal2)
select {
case <-ctx.Done():
@@ -150,129 +193,215 @@ func TestCryptoRequestContextAddNoInterferencePinnedProposal(t *testing.T) {
func TestCryptoRequestContextCleanupByRound(t *testing.T) {
pending := makePendingRequestsContext()
- req := cryptoRequest{Round: 10, Period: 10}
- ctx := pending.add(req)
-
- _, hasRound := pending[req.Round]
- require.True(t, hasRound)
-
- _, hasPeriod := pending[req.Round].periods[cryptoRequestCtxKey{period: req.Period}]
- require.True(t, hasPeriod)
-
- pending.clearStaleContexts(11, 20, false)
- select {
- case <-ctx.Done():
- t.Errorf("cancelled request")
- default:
- }
-
- pending.clearStaleContexts(12, 20, false)
- select {
- case <-ctx.Done():
- default:
- t.Errorf("did not cancel request")
- }
-
- _, hasRound = pending[req.Round]
- require.False(t, hasRound)
-
- _, hasPeriod = pending[req.Round].periods[cryptoRequestCtxKey{period: req.Period}]
- require.False(t, hasPeriod)
+ rnd := round(10)
+ per := period(10)
+
+ forEachTagDo(func(tag protocol.Tag) {
+ var ctx context.Context
+ switch tag {
+ case protocol.AgreementVoteTag:
+ req := cryptoVoteRequest{Round: rnd, Period: per}
+ ctx = pending.addVote(req)
+ case protocol.ProposalPayloadTag:
+ req := cryptoProposalRequest{Round: rnd, Period: per}
+ ctx = pending.addProposal(req)
+ case protocol.VoteBundleTag:
+ req := cryptoBundleRequest{Round: rnd, Period: per}
+ ctx = pending.addBundle(req)
+ }
+
+ _, hasRound := pending[rnd]
+ require.True(t, hasRound)
+
+ _, hasPeriod := pending[rnd].periods[cryptoRequestCtxKey{period: per}]
+ require.True(t, hasPeriod)
+
+ pending.clearStaleContexts(rnd+1, 20, false, false)
+ select {
+ case <-ctx.Done():
+ t.Errorf("cancelled request")
+ default:
+ }
+
+ pending.clearStaleContexts(rnd+2, 20, false, false)
+ select {
+ case <-ctx.Done():
+ default:
+ t.Errorf("did not cancel request")
+ }
+
+ _, hasRound = pending[rnd]
+ require.False(t, hasRound)
+
+ _, hasPeriod = pending[rnd].periods[cryptoRequestCtxKey{period: per}]
+ require.False(t, hasPeriod)
+ })
}
-func TestCryptoRequestContextCleanupByRoundPinned(t *testing.T) {
+func TestCryptoRequestContextCleanupByRoundPinnedCertify(t *testing.T) {
pending := makePendingRequestsContext()
- req := cryptoRequest{Round: 10, Pinned: true}
- ctx := pending.add(req)
-
- _, hasRound := pending[req.Round]
- require.True(t, hasRound)
-
- _, hasPeriod := pending[req.Round].periods[cryptoRequestCtxKey{pinned: req.Pinned}]
- require.True(t, hasPeriod)
-
- pending.clearStaleContexts(11, 20, false)
- select {
- case <-ctx.Done():
- t.Errorf("cancelled request")
- default:
- }
-
- pending.clearStaleContexts(12, 20, false)
- select {
- case <-ctx.Done():
- default:
- t.Errorf("did not cancel request")
- }
-
- _, hasRound = pending[req.Round]
- require.False(t, hasRound)
-
- _, hasPeriod = pending[req.Round].periods[cryptoRequestCtxKey{pinned: req.Pinned}]
- require.False(t, hasPeriod)
+ rnd := round(10)
+
+ forEachTagDo(func(tag protocol.Tag) {
+ var ctx context.Context
+ var hasRound, hasPeriod bool
+ switch tag {
+ case protocol.AgreementVoteTag:
+ return
+ case protocol.ProposalPayloadTag:
+ req := cryptoProposalRequest{Round: rnd, Pinned: true}
+ ctx = pending.addProposal(req)
+
+ _, hasRound = pending[rnd]
+ require.True(t, hasRound)
+
+ _, hasPeriod = pending[rnd].periods[cryptoRequestCtxKey{pinned: true}]
+ require.True(t, hasPeriod)
+
+ case protocol.VoteBundleTag:
+ req := cryptoBundleRequest{Round: rnd, Certify: true}
+ ctx = pending.addBundle(req)
+
+ _, hasRound = pending[rnd]
+ require.True(t, hasRound)
+
+ _, hasPeriod = pending[rnd].periods[cryptoRequestCtxKey{certify: true}]
+ require.True(t, hasPeriod)
+ }
+
+ pending.clearStaleContexts(rnd+1, 20, false, false)
+ select {
+ case <-ctx.Done():
+ t.Errorf("cancelled request")
+ default:
+ }
+
+ pending.clearStaleContexts(rnd+2, 20, false, false)
+ select {
+ case <-ctx.Done():
+ default:
+ t.Errorf("did not cancel request")
+ }
+
+ _, hasRound = pending[rnd]
+ require.False(t, hasRound)
+
+ switch tag {
+ case protocol.AgreementVoteTag:
+ return
+ case protocol.ProposalPayloadTag:
+ _, hasPeriod = pending[rnd].periods[cryptoRequestCtxKey{pinned: true}]
+ require.False(t, hasPeriod)
+ case protocol.VoteBundleTag:
+ _, hasPeriod = pending[rnd].periods[cryptoRequestCtxKey{certify: true}]
+ require.False(t, hasPeriod)
+ }
+ })
}
func TestCryptoRequestContextCleanupByPeriod(t *testing.T) {
pending := makePendingRequestsContext()
- req := cryptoRequest{Round: 10, Period: 10}
- ctx := pending.add(req)
-
- _, hasRound := pending[req.Round]
- require.True(t, hasRound)
-
- _, hasPeriod := pending[req.Round].periods[cryptoRequestCtxKey{period: req.Period}]
- require.True(t, hasPeriod)
-
- pending.clearStaleContexts(10, 12, false)
- select {
- case <-ctx.Done():
- t.Errorf("cancelled request")
- default:
- }
-
- pending.clearStaleContexts(10, 13, true)
- select {
- case <-ctx.Done():
- t.Errorf("cancelled request via pinned")
- default:
- }
-
- pending.clearStaleContexts(10, 13, false)
- select {
- case <-ctx.Done():
- default:
- t.Errorf("did not cancel request")
- }
-
- _, hasRound = pending[req.Round]
- require.False(t, hasRound)
-
- _, hasPeriod = pending[req.Round].periods[cryptoRequestCtxKey{period: req.Period}]
- require.False(t, hasPeriod)
+ rnd := round(10)
+ per := period(10)
+
+ forEachTagDo(func(tag protocol.Tag) {
+ var ctx context.Context
+ switch tag {
+ case protocol.AgreementVoteTag:
+ req := cryptoVoteRequest{Round: rnd, Period: per}
+ ctx = pending.addVote(req)
+ case protocol.ProposalPayloadTag:
+ req := cryptoProposalRequest{Round: rnd, Period: per}
+ ctx = pending.addProposal(req)
+ case protocol.VoteBundleTag:
+ req := cryptoBundleRequest{Round: rnd, Period: per}
+ ctx = pending.addBundle(req)
+ }
+
+ _, hasRound := pending[rnd]
+ require.True(t, hasRound)
+
+ _, hasPeriod := pending[rnd].periods[cryptoRequestCtxKey{period: per}]
+ require.True(t, hasPeriod)
+
+ pending.clearStaleContexts(rnd, per+2, false, false)
+ select {
+ case <-ctx.Done():
+ t.Errorf("cancelled request")
+ default:
+ }
+
+ pending.clearStaleContexts(rnd, per+3, true, false)
+ select {
+ case <-ctx.Done():
+ t.Errorf("cancelled request via pinned")
+ default:
+ }
+
+ pending.clearStaleContexts(rnd, per+3, false, true)
+ select {
+ case <-ctx.Done():
+ t.Errorf("cancelled request via certify")
+ default:
+ }
+
+ pending.clearStaleContexts(rnd, per+3, false, false)
+ select {
+ case <-ctx.Done():
+ default:
+ t.Errorf("did not cancel request")
+ }
+
+ _, hasRound = pending[rnd]
+ require.False(t, hasRound)
+
+ _, hasPeriod = pending[rnd].periods[cryptoRequestCtxKey{period: per}]
+ require.False(t, hasPeriod)
+ })
}
func TestCryptoRequestContextCleanupByPeriodPinned(t *testing.T) {
pending := makePendingRequestsContext()
- req := cryptoRequest{Round: 10, Pinned: true}
- ctx := pending.add(req)
-
- _, hasRound := pending[req.Round]
- require.True(t, hasRound)
-
- _, hasPeriod := pending[req.Round].periods[cryptoRequestCtxKey{pinned: req.Pinned}]
- require.True(t, hasPeriod)
-
- pending.clearStaleContexts(10, 12, false)
- select {
- case <-ctx.Done():
- t.Errorf("cancelled request")
- default:
- }
-
- pending.clearStaleContexts(10, 13, false)
- select {
- case <-ctx.Done():
- t.Errorf("cancelled request but pinned")
- default:
- }
+ rnd := round(10)
+
+ forEachTagDo(func(tag protocol.Tag) {
+ var ctx context.Context
+ switch tag {
+ case protocol.AgreementVoteTag:
+ return
+ case protocol.ProposalPayloadTag:
+ req := cryptoProposalRequest{Round: rnd, Pinned: true}
+ ctx = pending.addProposal(req)
+
+ _, hasRound := pending[rnd]
+ require.True(t, hasRound)
+
+ _, hasPeriod := pending[rnd].periods[cryptoRequestCtxKey{pinned: req.Pinned}]
+ require.True(t, hasPeriod)
+
+ case protocol.VoteBundleTag:
+ req := cryptoBundleRequest{Round: rnd, Certify: true}
+ ctx = pending.addBundle(req)
+
+ _, hasRound := pending[rnd]
+ require.True(t, hasRound)
+
+ _, hasPeriod := pending[rnd].periods[cryptoRequestCtxKey{certify: req.Certify}]
+ require.True(t, hasPeriod)
+ }
+
+ pending.clearStaleContexts(rnd, 12, false, false)
+ select {
+ case <-ctx.Done():
+ t.Errorf("cancelled request")
+ default:
+ }
+
+ pending.clearStaleContexts(rnd, 13, false, false)
+ select {
+ case <-ctx.Done():
+ t.Errorf("cancelled request but pinned/certify set")
+ default:
+ }
+ })
}
diff --git a/agreement/cryptoVerifier.go b/agreement/cryptoVerifier.go
index 5a695a7cc7..5d69ac669a 100644
--- a/agreement/cryptoVerifier.go
+++ b/agreement/cryptoVerifier.go
@@ -41,19 +41,20 @@ type (
// If no goroutine is dequeuing cryptoResults from cryptoVerifier.Verified*, deadlock could occur.
// To avoid this scenario, callers should call cryptoVerifier.ChannelFull to back off from submitting requests.
cryptoVerifier interface {
- // Verify enqueues the request to be verified.
+ // VerifyVote enqueues the request to be verified.
//
// The passed-in context ctx may be used to cancel the enqueuing request.
- //
- // The Verify function supports proposals and bundles.
- Verify(ctx context.Context, request cryptoRequest)
+ VerifyVote(ctx context.Context, request cryptoVoteRequest)
- // VerifyVote enqueues the request to be verified.
+ // VerifyProposal enqueues the request to be verified.
//
// The passed-in context ctx may be used to cancel the enqueuing request.
+ VerifyProposal(ctx context.Context, request cryptoProposalRequest)
+
+ // VerifyBundle enqueues the request to be verified.
//
- // The VerifyVote function supports votes only.
- VerifyVote(ctx context.Context, request cryptoVoteRequest)
+ // The passed-in context ctx may be used to cancel the enqueuing request.
+ VerifyBundle(ctx context.Context, request cryptoBundleRequest)
// Verified returns a channel which contains verification results.
//
@@ -82,11 +83,10 @@ type (
TaskIndex int // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
Round round // The round that we're going to test against.
Period period // The period associated with the message we're going to test.
- Pinned bool // A flag that is set if this is a pinned value for the given round.
ctx context.Context // A context for this request, if the context is cancelled then the request is stale.
}
- cryptoRequest struct {
+ cryptoProposalRequest struct {
message // the message we would like to verify.
TaskIndex int // Caller specific number that would be passed back in the cryptoResult.TaskIndex field
Round round // The round that we're going to test against.
@@ -95,6 +95,15 @@ type (
ctx context.Context // A context for this request, if the context is cancelled then the request is stale.
}
+ cryptoBundleRequest struct {
+ message // the message we would like to verify.
+ TaskIndex int // Caller specific number that would be passed back in the asyncVerifyVoteResponse.TaskIndex field
+ Round round // The round that we're going to test against.
+ Period period // The period associated with the message we're going to test.
+ Certify bool // A flag that set if this is a cert bundle.
+ ctx context.Context // A context for this request, if the context is cancelled then the request is stale.
+ }
+
cryptoResult struct {
message
Err serializableError
@@ -106,8 +115,8 @@ type (
poolCryptoVerifier struct {
voteVerifier *AsyncVoteVerifier
votes voteChanPair
- bundles chanPair
- proposals chanPair
+ proposals proposalChanPair
+ bundles bundleChanPair
validator BlockValidator
ledger LedgerReader
@@ -118,16 +127,21 @@ type (
wg sync.WaitGroup
}
- chanPair struct {
- in chan cryptoRequest
- out chan cryptoResult
- }
-
voteChanPair struct {
in chan cryptoVoteRequest
out chan asyncVerifyVoteResponse
}
+ proposalChanPair struct {
+ in chan cryptoProposalRequest
+ out chan cryptoResult
+ }
+
+ bundleChanPair struct {
+ in chan cryptoBundleRequest
+ out chan cryptoResult
+ }
+
bundleFuture struct {
message
index int
@@ -147,8 +161,8 @@ func makeCryptoVerifier(l LedgerReader, v BlockValidator, voteVerifier *AsyncVot
in: make(chan cryptoVoteRequest, voteVerifier.Parallelism()),
out: make(chan asyncVerifyVoteResponse, 3*voteVerifier.Parallelism()),
}
- c.bundles = chanPair{
- in: make(chan cryptoRequest, 1),
+ c.bundles = bundleChanPair{
+ in: make(chan cryptoBundleRequest, 1),
out: make(chan cryptoResult, 3),
}
@@ -156,8 +170,8 @@ func makeCryptoVerifier(l LedgerReader, v BlockValidator, voteVerifier *AsyncVot
// TODO We want proper backpressure from the proposalTable into the network.
baseBuffer := 3
maxVotes := cap(c.votes.in) + cap(c.votes.out) + voteVerifier.Parallelism()
- c.proposals = chanPair{
- in: make(chan cryptoRequest, 1),
+ c.proposals = proposalChanPair{
+ in: make(chan cryptoProposalRequest, 1),
out: make(chan cryptoResult, maxVotes+baseBuffer),
}
@@ -247,15 +261,24 @@ func (c *poolCryptoVerifier) bundleWaitWorker(fromVoteFill <-chan bundleFuture)
}
}
-func (c *poolCryptoVerifier) Verify(ctx context.Context, request cryptoRequest) {
- c.proposalContexts.clearStaleContexts(request.Round, request.Period, request.Pinned)
- request.ctx = c.proposalContexts.add(request)
+func (c *poolCryptoVerifier) VerifyVote(ctx context.Context, request cryptoVoteRequest) {
+ c.proposalContexts.clearStaleContexts(request.Round, request.Period, false, false)
+ request.ctx = c.proposalContexts.addVote(request)
switch request.Tag {
- case protocol.VoteBundleTag:
+ case protocol.AgreementVoteTag:
select {
- case c.bundles.in <- request:
+ case c.votes.in <- request:
case <-ctx.Done():
}
+ default:
+ logging.Base().Panicf("Verify action called on bad type: request is %v", request)
+ }
+}
+
+func (c *poolCryptoVerifier) VerifyProposal(ctx context.Context, request cryptoProposalRequest) {
+ c.proposalContexts.clearStaleContexts(request.Round, request.Period, request.Pinned, false)
+ request.ctx = c.proposalContexts.addProposal(request)
+ switch request.Tag {
case protocol.ProposalPayloadTag:
select {
case c.proposals.in <- request:
@@ -266,13 +289,13 @@ func (c *poolCryptoVerifier) Verify(ctx context.Context, request cryptoRequest)
}
}
-func (c *poolCryptoVerifier) VerifyVote(ctx context.Context, request cryptoVoteRequest) {
- c.proposalContexts.clearStaleContexts(request.Round, request.Period, request.Pinned)
- request.ctx = c.proposalContexts.addVote(request)
+func (c *poolCryptoVerifier) VerifyBundle(ctx context.Context, request cryptoBundleRequest) {
+ c.proposalContexts.clearStaleContexts(request.Round, request.Period, false, request.Certify)
+ request.ctx = c.proposalContexts.addBundle(request)
switch request.Tag {
- case protocol.AgreementVoteTag:
+ case protocol.VoteBundleTag:
select {
- case c.votes.in <- request:
+ case c.bundles.in <- request:
case <-ctx.Done():
}
default:
@@ -332,7 +355,7 @@ func (c *poolCryptoVerifier) proposalVerifyWorker() {
}
}
-func (c *poolCryptoVerifier) verifyProposalPayload(request cryptoRequest) cryptoResult {
+func (c *poolCryptoVerifier) verifyProposalPayload(request cryptoProposalRequest) cryptoResult {
m := request.message
up := request.UnauthenticatedProposal
diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go
index b6b0f88a4d..b31d09169f 100644
--- a/agreement/cryptoVerifier_test.go
+++ b/agreement/cryptoVerifier_test.go
@@ -157,7 +157,14 @@ func TestCryptoVerifierBuffers(t *testing.T) {
msgID := msgIDs[0]
msgIDs = msgIDs[1:]
usedMsgIDs[msgID] = struct{}{}
- verifier.Verify(ctx, cryptoRequest{message: makeMessage(msgID, msgType, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()})
+ switch msgType {
+ case protocol.AgreementVoteTag:
+ verifier.VerifyVote(ctx, cryptoVoteRequest{message: makeMessage(msgID, msgType, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()})
+ case protocol.ProposalPayloadTag:
+ verifier.VerifyProposal(ctx, cryptoProposalRequest{message: makeMessage(msgID, msgType, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()})
+ case protocol.VoteBundleTag:
+ verifier.VerifyBundle(ctx, cryptoBundleRequest{message: makeMessage(msgID, msgType, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()})
+ }
}
// test to see that queues are full
assert.Equal(t, len(verifier.Verified(msgType)), getSelectorCapacity(msgType)*2)
@@ -195,7 +202,15 @@ func TestCryptoVerifierBuffers(t *testing.T) {
msgIDs = msgIDs[1:]
usedMsgIDs[msgID] = struct{}{}
msgIDMutex.Unlock()
- verifier.Verify(ctx, cryptoRequest{message: makeMessage(msgID, tag, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()})
+
+ switch tag {
+ case protocol.AgreementVoteTag:
+ verifier.VerifyVote(ctx, cryptoVoteRequest{message: makeMessage(msgID, tag, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()})
+ case protocol.ProposalPayloadTag:
+ verifier.VerifyProposal(ctx, cryptoProposalRequest{message: makeMessage(msgID, tag, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()})
+ case protocol.VoteBundleTag:
+ verifier.VerifyBundle(ctx, cryptoBundleRequest{message: makeMessage(msgID, tag, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()})
+ }
} else {
atomic.AddInt32(&writeTotals, -1)
return
@@ -268,11 +283,11 @@ func BenchmarkCryptoVerifierVoteVertification(b *testing.B) {
c := verifier.Verified(protocol.AgreementVoteTag)
senderIdx := findSender(ledger, basics.Round(300), 0, 0, addresses, selections)
- request := cryptoRequest{message: makeMessage(0, protocol.AgreementVoteTag, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()}
+ request := cryptoVoteRequest{message: makeMessage(0, protocol.AgreementVoteTag, addresses[senderIdx], ledger, selections[senderIdx], votings[senderIdx], 300, 0, 0), Round: ledger.NextRound()}
b.ResetTimer()
go func() {
for n := 0; n < b.N; n++ {
- verifier.Verify(ctx, request)
+ verifier.VerifyVote(ctx, request)
}
}()
for n := 0; n < b.N; n++ {
@@ -310,7 +325,7 @@ func BenchmarkCryptoVerifierProposalVertification(b *testing.B) {
verifier := makeCryptoVerifier(ledger, testBlockValidator{}, MakeAsyncVoteVerifier(nil), logging.Base())
c := verifier.Verified(protocol.ProposalPayloadTag)
- request := cryptoRequest{
+ request := cryptoProposalRequest{
message: message{
MessageHandle: MessageHandle(0),
Tag: protocol.ProposalPayloadTag,
@@ -322,7 +337,7 @@ func BenchmarkCryptoVerifierProposalVertification(b *testing.B) {
b.ResetTimer()
go func() {
for n := 0; n < b.N; n++ {
- verifier.Verify(ctx, request)
+ verifier.VerifyProposal(ctx, request)
}
}()
for n := 0; n < b.N; n++ {
@@ -339,7 +354,7 @@ func BenchmarkCryptoVerifierBundleVertification(b *testing.B) {
Step := step(5)
senders := findSenders(ledger, ledger.NextRound(), 0, Step, addresses, selections)
- request := cryptoRequest{message: makeMessage(0, protocol.VoteBundleTag, addresses[senders[0]], ledger, selections[senders[0]], votings[senders[0]], ledger.NextRound(), 0, Step), Round: ledger.NextRound()}
+ request := cryptoBundleRequest{message: makeMessage(0, protocol.VoteBundleTag, addresses[senders[0]], ledger, selections[senders[0]], votings[senders[0]], ledger.NextRound(), 0, Step), Round: ledger.NextRound()}
for _, senderIdx := range senders {
uv := makeUnauthenticatedVote(ledger, addresses[senderIdx], selections[senderIdx], votings[senderIdx], request.message.UnauthenticatedBundle.Round, request.message.UnauthenticatedBundle.Period, Step, request.message.UnauthenticatedBundle.Proposal)
v, err := uv.verify(ledger)
@@ -359,7 +374,7 @@ func BenchmarkCryptoVerifierBundleVertification(b *testing.B) {
b.ResetTimer()
go func() {
for n := 0; n < b.N; n++ {
- verifier.Verify(ctx, request)
+ verifier.VerifyBundle(ctx, request)
}
}()
for n := 0; n < b.N; n++ {
diff --git a/agreement/demux.go b/agreement/demux.go
index 12877aa954..3917822e99 100644
--- a/agreement/demux.go
+++ b/agreement/demux.go
@@ -158,14 +158,14 @@ func (d *demux) verifyVote(ctx context.Context, m message, taskIndex int, r roun
func (d *demux) verifyPayload(ctx context.Context, m message, r round, p period, pinned bool) {
d.UpdateEventsQueue(eventQueueCryptoVerifierProposal, 1)
d.monitor.inc(cryptoVerifierCoserviceType)
- d.crypto.Verify(ctx, cryptoRequest{message: m, Round: r, Period: p, Pinned: pinned})
+ d.crypto.VerifyProposal(ctx, cryptoProposalRequest{message: m, Round: r, Period: p, Pinned: pinned})
}
// verifyBundle enqueues a bundle message to be verified.
-func (d *demux) verifyBundle(ctx context.Context, m message, r round, p period) {
+func (d *demux) verifyBundle(ctx context.Context, m message, r round, p period, s step) {
d.UpdateEventsQueue(eventQueueCryptoVerifierBundle, 1)
d.monitor.inc(cryptoVerifierCoserviceType)
- d.crypto.Verify(ctx, cryptoRequest{message: m, Round: r, Period: p})
+ d.crypto.VerifyBundle(ctx, cryptoBundleRequest{message: m, Round: r, Period: p, Certify: s == cert})
}
// next blocks until it observes an external input event of interest for the state machine.
@@ -232,7 +232,7 @@ func (d *demux) next(s *Service, deadline time.Duration, fastDeadline time.Durat
fastDeadlineCh = s.Clock.TimeoutAt(fastDeadline)
}
if err != nil {
- logging.Base().Errorf("could not get consensus parameters for round %v: %v", ParamsRound(currentRound), err)
+ logging.Base().Errorf("could not get consensus parameters for round %d: %v", ParamsRound(currentRound), err)
}
d.UpdateEventsQueue(eventQueueDemux, 0)
@@ -271,7 +271,7 @@ func (d *demux) next(s *Service, deadline time.Duration, fastDeadline time.Durat
Round: uint64(previousRound),
}
- s.log.with(logEvent).Infof("agreement: round %v ended early due to concurrent write; next round is %v", previousRound, nextRound)
+ s.log.with(logEvent).Infof("agreement: round %d ended early due to concurrent write; next round is %d", previousRound, nextRound)
e = roundInterruptionEvent{Round: nextRound}
d.UpdateEventsQueue(eventQueueDemux, 1)
d.monitor.inc(demuxCoserviceType)
diff --git a/agreement/demux_test.go b/agreement/demux_test.go
index c585d2e048..c5fb6c1621 100644
--- a/agreement/demux_test.go
+++ b/agreement/demux_test.go
@@ -515,12 +515,17 @@ func (t *demuxTester) EnsureValidatedBlock(ValidatedBlock, Certificate) {
}
// implement Ledger
-func (t *demuxTester) EnsureDigest(Certificate, chan struct{}, *AsyncVoteVerifier) {
+func (t *demuxTester) EnsureDigest(Certificate, *AsyncVoteVerifier) {
// we don't care about this function in this test.
}
// implement cryptoVerifier
-func (t *demuxTester) Verify(ctx context.Context, request cryptoRequest) {
+func (t *demuxTester) VerifyProposal(ctx context.Context, request cryptoProposalRequest) {
+ // we don't care about this function in this test.
+}
+
+// implement cryptoVerifier
+func (t *demuxTester) VerifyBundle(ctx context.Context, request cryptoBundleRequest) {
// we don't care about this function in this test.
}
diff --git a/agreement/events.go b/agreement/events.go
index c28397a915..1a4d448108 100644
--- a/agreement/events.go
+++ b/agreement/events.go
@@ -108,6 +108,8 @@ const (
// roundInterruption is emitted by the Ledger as input to the player
// state machine when an external source observes that the player's
// current round has completed concurrent with the player's operation.
+ // roundInterruption is also emitted (internally, by the player itself) after
+ // calling ensureBlock.
roundInterruption
// timeout is emitted by the Clock as input to the player state machine
@@ -658,7 +660,7 @@ func (e thresholdEvent) ComparableStr() string {
//
// The ordering is given as follows:
//
-// - certThreshold events are fresher than all other events.
+// - certThreshold events are fresher than all other non-certThreshold events.
// - Events from a later period are fresher than events from an older period.
// - nextThreshold events are fresher than softThreshold events from the same
// period.
diff --git a/agreement/fuzzer/ledger_test.go b/agreement/fuzzer/ledger_test.go
index 20c0c564dd..dce87b17d9 100644
--- a/agreement/fuzzer/ledger_test.go
+++ b/agreement/fuzzer/ledger_test.go
@@ -206,7 +206,7 @@ func (l *testLedger) Seed(r basics.Round) (committee.Seed, error) {
defer l.mu.Unlock()
if r >= l.nextRound {
- err := fmt.Errorf("Seed for round %v doesn't exists in ledger. Current ledger round is %v", r, l.nextRound-1)
+ err := fmt.Errorf("Seed for round %d doesn't exists in ledger. Current ledger round is %d", r, l.nextRound-1)
return committee.Seed{}, err
}
@@ -219,7 +219,7 @@ func (l *testLedger) LookupDigest(r basics.Round) (crypto.Digest, error) {
defer l.mu.Unlock()
if r >= l.nextRound {
- err := fmt.Errorf("LookupDigest called on future round: %v >= %v! (this is probably a bug)", r, l.nextRound)
+ err := fmt.Errorf("LookupDigest called on future round: %d >= %d! (this is probably a bug)", r, l.nextRound)
panic(err)
}
@@ -231,7 +231,7 @@ func (l *testLedger) BalanceRecord(r basics.Round, a basics.Address) (basics.Bal
defer l.mu.Unlock()
if r >= l.nextRound {
- err := fmt.Errorf("BalanceRecord called on future round: %v >= %v! (this is probably a bug)", r, l.nextRound)
+ err := fmt.Errorf("BalanceRecord called on future round: %d >= %d! (this is probably a bug)", r, l.nextRound)
panic(err)
}
return l.state[a], nil
@@ -242,7 +242,7 @@ func (l *testLedger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
defer l.mu.Unlock()
if r >= l.nextRound {
- err := fmt.Errorf("Circulation called on future round: %v >= %v! (this is probably a bug)", r, l.nextRound)
+ err := fmt.Errorf("Circulation called on future round: %d >= %d! (this is probably a bug)", r, l.nextRound)
panic(err)
}
@@ -263,7 +263,7 @@ func (l *testLedger) EnsureBlock(e bookkeeping.Block, c agreement.Certificate) {
if _, ok := l.entries[e.Round()]; ok {
if l.entries[e.Round()].Digest() != e.Digest() {
- err := fmt.Errorf("testLedger.EnsureBlock: called with conflicting entries in round %v", e.Round())
+ err := fmt.Errorf("testLedger.EnsureBlock: called with conflicting entries in round %d", e.Round())
panic(err)
}
}
@@ -274,7 +274,7 @@ func (l *testLedger) EnsureBlock(e bookkeeping.Block, c agreement.Certificate) {
if l.nextRound == e.Round() {
l.nextRound = e.Round() + 1
} else if l.nextRound < e.Round() {
- err := fmt.Errorf("testLedger.EnsureBlock: attempted to write block in future round: %v < %v", l.nextRound, e.Round())
+ err := fmt.Errorf("testLedger.EnsureBlock: attempted to write block in future round: %d < %d", l.nextRound, e.Round())
panic(err)
}
@@ -282,7 +282,7 @@ func (l *testLedger) EnsureBlock(e bookkeeping.Block, c agreement.Certificate) {
l.catchingUp = false
}
-func (l *testLedger) EnsureDigest(c agreement.Certificate, quit chan struct{}, verifier *agreement.AsyncVoteVerifier) {
+func (l *testLedger) EnsureDigest(c agreement.Certificate, verifier *agreement.AsyncVoteVerifier) {
r := c.Round
consistencyCheck := func() bool {
l.mu.Lock()
@@ -290,7 +290,7 @@ func (l *testLedger) EnsureDigest(c agreement.Certificate, quit chan struct{}, v
if r < l.nextRound {
if l.entries[r].Digest() != c.Proposal.BlockDigest {
- err := fmt.Errorf("testLedger.EnsureDigest called with conflicting entries in round %v", r)
+ err := fmt.Errorf("testLedger.EnsureDigest called with conflicting entries in round %d", r)
panic(err)
}
return true
diff --git a/agreement/fuzzer/tests_test.go b/agreement/fuzzer/tests_test.go
index a2599e03f5..89c1217236 100644
--- a/agreement/fuzzer/tests_test.go
+++ b/agreement/fuzzer/tests_test.go
@@ -66,7 +66,7 @@ func printResults(t *testing.T, r *RunResult) {
return
}
require.Truef(t, (r.PostRecoveryHighRound-r.PostRecoveryLowRound <= 1),
- "Initial Rounds %v-%v\nPre Recovery Rounds %v-%v\nPost Recovery Rounds %v-%v",
+ "Initial Rounds %d-%d\nPre Recovery Rounds %d-%d\nPost Recovery Rounds %d-%d",
r.StartLowRound, r.StartHighRound,
r.PreRecoveryLowRound, r.PreRecoveryHighRound,
r.PostRecoveryLowRound, r.PostRecoveryHighRound,
@@ -79,12 +79,12 @@ func printResults(t *testing.T, r *RunResult) {
)
if r.PreRecoveryHighRound != r.PreRecoveryLowRound {
// network got disupted by the filters.
- fmt.Printf("%v partitioned the network ( %v - %v ), but recovered correctly reaching round %v\n", t.Name(), r.PreRecoveryLowRound, r.PreRecoveryHighRound, r.PostRecoveryHighRound)
+ fmt.Printf("%v partitioned the network ( %d - %d ), but recovered correctly reaching round %d\n", t.Name(), r.PreRecoveryLowRound, r.PreRecoveryHighRound, r.PostRecoveryHighRound)
} else {
if r.PreRecoveryHighRound == r.StartLowRound {
- fmt.Printf("%v stalled the network, and the network reached round %v\n", t.Name(), r.PostRecoveryHighRound)
+ fmt.Printf("%v stalled the network, and the network reached round %d\n", t.Name(), r.PostRecoveryHighRound)
} else {
- fmt.Printf("%v did not partition the network, and the network reached round %v\n", t.Name(), r.PostRecoveryHighRound)
+ fmt.Printf("%v did not partition the network, and the network reached round %d\n", t.Name(), r.PostRecoveryHighRound)
}
}
}*/
@@ -112,7 +112,7 @@ func TestCircularNetworkTopology(t *testing.T) {
}
for i := range nodeCounts {
nodeCount := nodeCounts[i]
- t.Run(fmt.Sprintf("TestCircularNetworkTopology-%v", nodeCount),
+ t.Run(fmt.Sprintf("TestCircularNetworkTopology-%d", nodeCount),
func(t *testing.T) {
nodes := nodeCount
topologyConfig := TopologyFilterConfig{
@@ -122,7 +122,7 @@ func TestCircularNetworkTopology(t *testing.T) {
topologyConfig.NodesConnection[i] = []int{(i + nodes - 1) % nodes, (i + 1) % nodes}
}
netConfig := &FuzzerConfig{
- FuzzerName: fmt.Sprintf("circularNetworkTopology-%v", nodes),
+ FuzzerName: fmt.Sprintf("circularNetworkTopology-%d", nodes),
NodesCount: nodes,
Filters: []NetworkFilterFactory{NetworkFilterFactory(MakeTopologyFilter(topologyConfig))},
LogLevel: logging.Info,
@@ -491,7 +491,7 @@ func TestNetworkBandwidth(t *testing.T) {
}
for i := range nodeCounts {
nodeCount := nodeCounts[i]
- t.Run(fmt.Sprintf("TestNetworkBandwidth-%v", nodeCount),
+ t.Run(fmt.Sprintf("TestNetworkBandwidth-%d", nodeCount),
func(t *testing.T) {
nodes := nodeCount
topologyConfig := TopologyFilterConfig{
@@ -518,7 +518,7 @@ func TestNetworkBandwidth(t *testing.T) {
}
netConfig := &FuzzerConfig{
- FuzzerName: fmt.Sprintf("networkBandwidth-%v", nodes),
+ FuzzerName: fmt.Sprintf("networkBandwidth-%d", nodes),
NodesCount: nodes + relayCounts,
OnlineNodes: onlineNodes,
Filters: []NetworkFilterFactory{
@@ -593,7 +593,7 @@ func TestUnstakedNetworkLinearGrowth(t *testing.T) {
statFilterFactory := MakeTrafficStatisticsFilterFactory(statConf)
netConfig := &FuzzerConfig{
- FuzzerName: fmt.Sprintf("networkUnstakedLinearGrowth-%v", nodes),
+ FuzzerName: fmt.Sprintf("networkUnstakedLinearGrowth-%d", nodes),
NodesCount: nodes + relayCount,
OnlineNodes: onlineNodes,
Filters: []NetworkFilterFactory{
@@ -704,7 +704,7 @@ func TestStakedNetworkQuadricGrowth(t *testing.T) {
statFilterFactory := MakeTrafficStatisticsFilterFactory(statConf)
netConfig := &FuzzerConfig{
- FuzzerName: fmt.Sprintf("stakedNetworkQuadricGrowth-%v", nodes),
+ FuzzerName: fmt.Sprintf("stakedNetworkQuadricGrowth-%d", nodes),
NodesCount: nodes + relayCount,
OnlineNodes: onlineNodes,
Filters: []NetworkFilterFactory{
@@ -810,7 +810,7 @@ func TestRegossipinngElimination(t *testing.T) {
}
netConfig := &FuzzerConfig{
- FuzzerName: fmt.Sprintf("networkRegossiping-baseline-%v", nodes),
+ FuzzerName: fmt.Sprintf("networkRegossiping-baseline-%d", nodes),
NodesCount: nodes + relayCounts,
OnlineNodes: onlineNodes,
Filters: []NetworkFilterFactory{
@@ -829,7 +829,7 @@ func TestRegossipinngElimination(t *testing.T) {
validator.Go(netConfig)
netConfig2 := &FuzzerConfig{
- FuzzerName: fmt.Sprintf("networkRegossiping-eliminated-regossip-%v", nodes),
+ FuzzerName: fmt.Sprintf("networkRegossiping-eliminated-regossip-%d", nodes),
NodesCount: nodes + relayCounts,
OnlineNodes: onlineNodes,
Filters: []NetworkFilterFactory{
@@ -910,7 +910,7 @@ func BenchmarkNetworkPerformance(b *testing.B) {
statFilterFactory := MakeTrafficStatisticsFilterFactory(statConf)
netConfig := &FuzzerConfig{
- FuzzerName: fmt.Sprintf("networkUnstakedLinearGrowth-%v", nodes),
+ FuzzerName: fmt.Sprintf("networkUnstakedLinearGrowth-%d", nodes),
NodesCount: nodes + relayCount,
OnlineNodes: onlineNodes,
Filters: []NetworkFilterFactory{
diff --git a/agreement/fuzzer/validator_test.go b/agreement/fuzzer/validator_test.go
index 74d1acaeef..d78a72420d 100644
--- a/agreement/fuzzer/validator_test.go
+++ b/agreement/fuzzer/validator_test.go
@@ -70,7 +70,7 @@ func (v *Validator) CheckNetworkRecovery() {
return
}
require.Truef(v.tb, (v.runResult.PostRecoveryHighRound-v.runResult.PostRecoveryLowRound <= 1),
- "Initial Rounds %v-%v\nPre Recovery Rounds %v-%v\nPost Recovery Rounds %v-%v",
+ "Initial Rounds %d-%d\nPre Recovery Rounds %d-%d\nPost Recovery Rounds %d-%d",
v.runResult.StartLowRound, v.runResult.StartHighRound,
v.runResult.PreRecoveryLowRound, v.runResult.PreRecoveryHighRound,
v.runResult.PostRecoveryLowRound, v.runResult.PostRecoveryHighRound,
@@ -83,12 +83,12 @@ func (v *Validator) CheckNetworkRecovery() {
)
if v.runResult.PreRecoveryHighRound != v.runResult.PreRecoveryLowRound {
// network got disupted by the filters.
- fmt.Printf("%v partitioned the network ( %v - %v ), but recovered correctly reaching round %v\n", v.tb.Name(), v.runResult.PreRecoveryLowRound, v.runResult.PreRecoveryHighRound, v.runResult.PostRecoveryHighRound)
+ fmt.Printf("%v partitioned the network ( %d - %d ), but recovered correctly reaching round %d\n", v.tb.Name(), v.runResult.PreRecoveryLowRound, v.runResult.PreRecoveryHighRound, v.runResult.PostRecoveryHighRound)
} else {
if v.runResult.PreRecoveryHighRound == v.runResult.StartLowRound {
- fmt.Printf("%v stalled the network, and the network reached round %v\n", v.tb.Name(), v.runResult.PostRecoveryHighRound)
+ fmt.Printf("%v stalled the network, and the network reached round %d\n", v.tb.Name(), v.runResult.PostRecoveryHighRound)
} else {
- fmt.Printf("%v did not partition the network, and the network reached round %v\n", v.tb.Name(), v.runResult.PostRecoveryHighRound)
+ fmt.Printf("%v did not partition the network, and the network reached round %d\n", v.tb.Name(), v.runResult.PostRecoveryHighRound)
}
}
}
diff --git a/agreement/fuzzer/voteFilter_test.go b/agreement/fuzzer/voteFilter_test.go
index 16188113ef..044e8e37b7 100644
--- a/agreement/fuzzer/voteFilter_test.go
+++ b/agreement/fuzzer/voteFilter_test.go
@@ -111,7 +111,7 @@ func (n *VoteFilter) Eval(tag protocol.Tag, data []byte, direction string) bool
}
if !included {
if n.config.DebugMessages {
- fmt.Printf("VoteFilter(%s) service-%v : (%v,%v,%v) skipped. Rules (%v-%v, %v-%v, %v-%v)\n", direction, n.nodeID, uv.R.Round, uv.R.Period, uv.R.Step, n.config.IncludeMasks[0].StartRound, n.config.IncludeMasks[0].EndRound, n.config.IncludeMasks[0].StartPeriod, n.config.IncludeMasks[0].EndPeriod, n.config.IncludeMasks[0].StartStep, n.config.IncludeMasks[0].EndStep)
+ fmt.Printf("VoteFilter(%s) service-%v : (%d,%d,%d) skipped. Rules (%d-%d, %d-%d, %d-%d)\n", direction, n.nodeID, uv.R.Round, uv.R.Period, uv.R.Step, n.config.IncludeMasks[0].StartRound, n.config.IncludeMasks[0].EndRound, n.config.IncludeMasks[0].StartPeriod, n.config.IncludeMasks[0].EndPeriod, n.config.IncludeMasks[0].StartStep, n.config.IncludeMasks[0].EndStep)
}
return false
}
@@ -128,13 +128,13 @@ func (n *VoteFilter) Eval(tag protocol.Tag, data []byte, direction string) bool
if excluded {
if n.config.DebugMessages {
- fmt.Printf("VoteFilter(%s) service-%v : (%v,%v,%v) skipped\n", direction, n.nodeID, uv.R.Round, uv.R.Period, uv.R.Step)
+ fmt.Printf("VoteFilter(%s) service-%v : (%d,%d,%d) skipped\n", direction, n.nodeID, uv.R.Round, uv.R.Period, uv.R.Step)
}
return false
}
if n.config.DebugMessages {
- fmt.Printf("VoteFilter(%s) service-%v : (%v,%v,%v) passed\n", direction, n.nodeID, uv.R.Round, uv.R.Period, uv.R.Step)
+ fmt.Printf("VoteFilter(%s) service-%v : (%d,%d,%d) passed\n", direction, n.nodeID, uv.R.Round, uv.R.Period, uv.R.Step)
}
return true
}
diff --git a/agreement/gossip/networkFull_test.go b/agreement/gossip/networkFull_test.go
index b888ee1e50..dc68ab0731 100644
--- a/agreement/gossip/networkFull_test.go
+++ b/agreement/gossip/networkFull_test.go
@@ -54,15 +54,14 @@ func spinNetwork(t *testing.T, nodesCount int) ([]*networkImpl, []*messageCounte
cfg.OutgoingMessageFilterBucketCount = 3
cfg.OutgoingMessageFilterBucketSize = 32
cfg.EnableOutgoingNetworkMessageFiltering = false
+ cfg.DNSBootstrapID = "" // prevent attempts of getting bootstrap SRV from DNS server(s)
log := logging.TestingLog(t)
start := time.Now()
nodesAddresses := []string{}
gossipNodes := []network.GossipNode{}
- phonebooks := make([]*network.ThreadsafePhonebook, nodesCount)
for nodeIdx := 0; nodeIdx < nodesCount; nodeIdx++ {
- phonebooks[nodeIdx] = network.MakeThreadsafePhonebook()
- gossipNode, err := network.NewWebsocketGossipNode(log.With("node", nodeIdx), cfg, phonebooks[nodeIdx], "go-test-agreement-network-genesis", config.Devtestnet)
+ gossipNode, err := network.NewWebsocketGossipNode(log.With("node", nodeIdx), cfg, nodesAddresses, "go-test-agreement-network-genesis", config.Devtestnet)
if err != nil {
t.Fatalf("fail making ws node: %v", err)
}
@@ -73,11 +72,7 @@ func spinNetwork(t *testing.T, nodesCount int) ([]*networkImpl, []*messageCounte
gossipNodes = append(gossipNodes, gossipNode)
}
- for nodeIdx, gossipNode := range gossipNodes {
- others := []string{}
- others = append(others, nodesAddresses[nodeIdx+1:]...)
- phonebooks[nodeIdx].ReplacePeerList(others)
- log.Debugf("phonebook[%d] %#v", nodeIdx, others)
+ for _, gossipNode := range gossipNodes {
gossipNode.RequestConnectOutgoing(false, nil) // no disconnect.
}
diff --git a/agreement/msgp_gen.go b/agreement/msgp_gen.go
index 100fefd7a3..cb534f001a 100644
--- a/agreement/msgp_gen.go
+++ b/agreement/msgp_gen.go
@@ -1211,6 +1211,9 @@ func (z period) MarshalMsg(b []byte) (o []byte, err error) {
func (_ period) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(period)
+ if !ok {
+ _, ok = (z).(*period)
+ }
return ok
}
@@ -2839,6 +2842,9 @@ func (z serializableErrorUnderlying) MarshalMsg(b []byte) (o []byte, err error)
func (_ serializableErrorUnderlying) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(serializableErrorUnderlying)
+ if !ok {
+ _, ok = (z).(*serializableErrorUnderlying)
+ }
return ok
}
@@ -2882,6 +2888,9 @@ func (z step) MarshalMsg(b []byte) (o []byte, err error) {
func (_ step) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(step)
+ if !ok {
+ _, ok = (z).(*step)
+ }
return ok
}
diff --git a/agreement/persistence.go b/agreement/persistence.go
index ce0ee23b0c..ff20602a1a 100644
--- a/agreement/persistence.go
+++ b/agreement/persistence.go
@@ -51,14 +51,14 @@ func persistent(as []action) bool {
// encode serializes the current state into a byte array.
func encode(t timers.Clock, rr rootRouter, p player, a []action) []byte {
var s diskState
- s.Router = protocol.Encode(rr)
- s.Player = protocol.Encode(p)
+ s.Router = protocol.EncodeReflect(rr)
+ s.Player = protocol.EncodeReflect(p)
s.Clock = t.Encode()
for _, act := range a {
s.ActionTypes = append(s.ActionTypes, act.t())
- s.Actions = append(s.Actions, protocol.Encode(act))
+ s.Actions = append(s.Actions, protocol.EncodeReflect(act))
}
- raw := protocol.Encode(s)
+ raw := protocol.EncodeReflect(s)
return raw
}
@@ -173,7 +173,7 @@ func decode(raw []byte, t0 timers.Clock) (t timers.Clock, rr rootRouter, p playe
a2 := []action{}
var s diskState
- err = protocol.Decode(raw, &s)
+ err = protocol.DecodeReflect(raw, &s)
if err != nil {
logging.Base().Errorf("decode (agreement): error decoding retrieved state (len = %v): %v", len(raw), err)
return
@@ -184,20 +184,20 @@ func decode(raw []byte, t0 timers.Clock) (t timers.Clock, rr rootRouter, p playe
return
}
- err = protocol.Decode(s.Player, &p2)
+ err = protocol.DecodeReflect(s.Player, &p2)
if err != nil {
return
}
rr2 = makeRootRouter(p2)
- err = protocol.Decode(s.Router, &rr2)
+ err = protocol.DecodeReflect(s.Router, &rr2)
if err != nil {
return
}
for i := range s.Actions {
act := zeroAction(s.ActionTypes[i])
- err = protocol.Decode(s.Actions[i], &act)
+ err = protocol.DecodeReflect(s.Actions[i], &act)
if err != nil {
return
}
@@ -282,12 +282,6 @@ func (p *asyncPersistenceLoop) loop(ctx context.Context) {
case <-p.ledger.Wait(s.round.SubSaturate(1)):
}
- // sanity check
- _, _, _, _, derr := decode(s.raw, s.clock)
- if derr != nil {
- logging.Base().Errorf("could not decode own encoded disk state: %v", derr)
- }
-
// store the state.
err := persist(p.log, p.crashDb, s.round, s.period, s.step, s.raw)
@@ -299,5 +293,13 @@ func (p *asyncPersistenceLoop) loop(ctx context.Context) {
done: s.done,
}
close(s.events)
+
+ // sanity check; we check it after the fact, since it's not expected to ever happen.
+ // performance-wise, it takes approximitly 300000ns to execute, and we don't want it to
+ // block the persist operation.
+ _, _, _, _, derr := decode(s.raw, s.clock)
+ if derr != nil {
+ logging.Base().Errorf("could not decode own encoded disk state: %v", derr)
+ }
}
}
diff --git a/agreement/player.go b/agreement/player.go
index 61c1639621..c2b731bb8e 100644
--- a/agreement/player.go
+++ b/agreement/player.go
@@ -198,7 +198,11 @@ func (p *player) issueNextVote(r routerHandle) []action {
res := r.dispatch(*p, nextThresholdStatusRequestEvent{}, voteMachinePeriod, p.Round, p.Period-1, 0)
nextStatus := res.(nextThresholdStatusEvent) // panic if violate postcondition
if !nextStatus.Bottom {
- // note that this is bottom if we fast-forwarded to this period or entered via a soft threshold.
+ // if we fast-forwarded to this period or entered via a soft/cert threshold,
+ // nextStatus.Bottom will be false and we will next vote bottom.
+ // As long as a majority of honest users (in the cert threshold case) do not vote bottom (as assumed), we are safe.
+ // Note that cert threshold fast-forwarding will never change a next value vote to a next bottom vote -
+ // if a player has voted for a value, they have the block, and should have ended the round.
a.Proposal = nextStatus.Proposal
}
}
@@ -231,8 +235,8 @@ func (p *player) issueFastVote(r routerHandle) (actions []action) {
res := r.dispatch(*p, nextThresholdStatusRequestEvent{}, voteMachinePeriod, p.Round, p.Period-1, 0)
nextStatus := res.(nextThresholdStatusEvent) // panic if violate postcondition
if !nextStatus.Bottom {
- // note that this is bottom if we fast-forwarded to this period or entered via a soft threshold.
a.Step = redo
+ // note that this is bottom if we fast-forwarded to this period or entered via a soft/cert threshold.
a.Proposal = nextStatus.Proposal
}
}
@@ -258,39 +262,53 @@ func (p *player) handleCheckpointEvent(r routerHandle, e checkpointEvent) []acti
func (p *player) handleThresholdEvent(r routerHandle, e thresholdEvent) []action {
r.t.timeR().RecThreshold(e)
- // Special case all cert thresholds: we must not ignore them, because they are the freshest bundle
var actions []action
- if e.t() == certThreshold {
- // this threshold must be for p.Round, and originates from the vote SM tree
- cert := Certificate(e.Bundle)
+ switch e.t() {
+ case certThreshold:
+ // for future periods, fast-forwarding below will ensure correct staging
+ // for past periods, having a freshest certThreshold will prevent losing the block
+ r.dispatch(*p, e, proposalMachine, 0, 0, 0)
+ // Now, also check if we have the block.
res := stagedValue(*p, r, e.Round, e.Period)
- a0 := ensureAction{Payload: res.Payload, PayloadOk: res.Committable, Certificate: cert}
- actions = append(actions, a0)
- as := p.enterRound(r, e, p.Round+1)
- return append(actions, as...)
- }
-
- // We might receive a next threshold event for the previous period due to fast-forwarding or a soft threshold.
- // If we do, this is okay, but the proposalMachine contract-checker will complain.
- // TODO test this case and update the contract-checker so it does not complain when this is benign
- if p.Period >= e.Period+1 {
- return nil
- }
+ if res.Committable {
+ cert := Certificate(e.Bundle)
+ a0 := ensureAction{Payload: res.Payload, Certificate: cert}
+ actions = append(actions, a0)
+ as := p.enterRound(r, e, p.Round+1)
+ return append(actions, as...)
+ }
+ // we don't have the block! We need to ensure we will be able to receive the block.
+ // In addition, hint to the ledger to fetch by digest.
+ actions = append(actions, stageDigestAction{Certificate: Certificate(e.Bundle)})
+ if p.Period < e.Period {
+ actions = append(actions, p.enterPeriod(r, e, e.Period)...)
+ }
+ return actions
- switch e.t() {
case softThreshold:
- if p.Period == e.Period {
- ec := r.dispatch(*p, e, proposalMachine, p.Round, p.Period, 0)
- if ec.t() == proposalCommittable && p.Step <= cert {
- actions = append(actions, p.issueCertVote(r, ec.(committableEvent)))
- }
- return actions
+ // note that it is ok not to stage softThresholds from previous periods; relaying the pinned block
+ // handles any edge case (w.r.t. resynchronization, at least)
+ if p.Period > e.Period {
+ return nil
}
- return p.enterPeriod(r, e, e.Period)
+ if p.Period < e.Period {
+ return p.enterPeriod(r, e, e.Period)
+ }
+ ec := r.dispatch(*p, e, proposalMachine, p.Round, p.Period, 0)
+ if ec.t() == proposalCommittable && p.Step <= cert {
+ actions = append(actions, p.issueCertVote(r, ec.(committableEvent)))
+ }
+ return actions
+
case nextThreshold:
+ // We might receive a next threshold event for the previous period due to fast-forwarding or a soft threshold.
+ // If we do, this is okay, but the proposalMachine contract-checker will complain.
+ // TODO test this case and update the contract-checker so it does not complain when this is benign
+ if p.Period > e.Period {
+ return nil
+ }
return p.enterPeriod(r, e, e.Period+1)
default:
- // certThreshold was handled previously
panic("bad event")
}
}
@@ -335,11 +353,17 @@ func (p *player) enterPeriod(r routerHandle, source thresholdEvent, target perio
func (p *player) enterRound(r routerHandle, source event, target round) []action {
var actions []action
- // this happens here so that the proposalMachine contract does not complain
- e := r.dispatch(*p, source, proposalMachine, target, 0, 0)
- if source.t() != roundInterruption {
+ newRoundEvent := source
+ // passing in a cert threshold to the proposalMachine is now ambiguous,
+ // so replace with an explicit new round event.
+ // In addition, handle a new source: payloadVerified (which can trigger new round if
+ // received after cert threshold)
+ if source.t() == certThreshold || source.t() == payloadVerified { // i.e., source.t() != roundInterruption
r.t.logRoundStart(*p, target)
+ newRoundEvent = roundInterruptionEvent{Round: target}
}
+ // this happens here so that the proposalMachine contract does not complain
+ e := r.dispatch(*p, newRoundEvent, proposalMachine, target, 0, 0)
p.LastConcluding = p.Step
p.Round = target
@@ -365,7 +389,6 @@ func (p *player) enterRound(r routerHandle, source event, target round) []action
}
// we might need to handle a pipelined threshold event
-
res := r.dispatch(*p, freshestBundleRequestEvent{}, voteMachineRound, p.Round, 0, 0)
freshestRes := res.(freshestBundleEvent) // panic if violate postcondition
if freshestRes.Ok {
@@ -540,6 +563,20 @@ func (p *player) handleMessageEvent(r routerHandle, e messageEvent) (actions []a
a := relayAction(e, protocol.ProposalPayloadTag, compoundMessage{Proposal: up, Vote: uv})
actions = append(actions, a)
+ // If the payload is valid, check it against any received cert threshold.
+ // Of course, this should only trigger for payloadVerified case.
+ // This allows us to handle late payloads (relative to cert-bundles, i.e., certificates) without resorting to catchup.
+ if ef.t() == proposalCommittable || ef.t() == payloadAccepted {
+ freshestRes := r.dispatch(*p, freshestBundleRequestEvent{}, voteMachineRound, p.Round, 0, 0).(freshestBundleEvent)
+ if freshestRes.Ok && freshestRes.Event.t() == certThreshold && freshestRes.Event.Proposal == e.Input.Proposal.value() {
+ cert := Certificate(freshestRes.Event.Bundle)
+ a0 := ensureAction{Payload: e.Input.Proposal, Certificate: cert}
+ actions = append(actions, a0)
+ as := p.enterRound(r, delegatedE, cert.Round+1)
+ return append(actions, as...)
+ }
+ }
+
if ef.t() == proposalCommittable && p.Step <= cert {
actions = append(actions, p.issueCertVote(r, ef.(committableEvent)))
}
@@ -578,7 +615,7 @@ func (p *player) handleMessageEvent(r routerHandle, e messageEvent) (actions []a
}
if e.t() == bundlePresent {
ub := e.Input.UnauthenticatedBundle
- return append(actions, verifyBundleAction(e, ub.Round, ub.Period))
+ return append(actions, verifyBundleAction(e, ub.Round, ub.Period, ub.Step))
}
a0 := relayAction(e, protocol.VoteBundleTag, ef.(thresholdEvent).Bundle)
a1 := p.handle(r, ef)
diff --git a/agreement/player_test.go b/agreement/player_test.go
index f2f505406b..719d04ee81 100644
--- a/agreement/player_test.go
+++ b/agreement/player_test.go
@@ -24,10 +24,20 @@ import (
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/committee" //TODO(upgrade) remove this line
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
+// TODO(upgrade) remove the entire lessMaybeBuggy function once the upgrade goes through
+func lessMaybeBuggy(cred, other committee.Credential) bool {
+ // this function calls either Less or LessBuggy depending on ConsensusCurrentVersion, which is what the agreement tests use
+ if config.Consensus[protocol.ConsensusCurrentVersion].UseBuggyProposalLowestOutput {
+ return cred.LessBuggy(other)
+ }
+ return cred.Less(other)
+}
+
var playerTracer tracer
func init() {
@@ -59,7 +69,8 @@ func generateProposalEvents(t *testing.T, player player, accs testAccountData, f
lowestCredential := votes[0].Cred
lowestProposal = votes[0].R.Proposal
for _, vote := range votes {
- if vote.Cred.Less(lowestCredential) {
+ // if vote.Cred.Less(lowestCredential) { //TODO(upgrade) uncomment this line
+ if lessMaybeBuggy(vote.Cred, lowestCredential) { // TODO(upgrade) remove this line
lowestCredential = vote.Cred
lowestProposal = vote.R.Proposal
}
@@ -1424,9 +1435,71 @@ func TestPlayerProposesNewRound(t *testing.T) {
const r = round(209)
const p = period(0)
pWhite, pM, helper := setupP(t, r-1, p, soft)
+ pP, pV := helper.MakeRandomProposalPayload(t, r-1)
+
+ // send a payload
+ // store an arbitrary proposal/payload
+ vVote := helper.MakeVerifiedVote(t, 0, r-1, p, propose, *pV)
+ inMsg := messageEvent{
+ T: voteVerified,
+ Input: message{
+ Vote: vVote,
+ UnauthenticatedVote: vVote.u(),
+ },
+ }
+ err, panicErr := pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ inMsg = messageEvent{
+ T: payloadVerified,
+ Input: message{
+ Proposal: *pP,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
// gen cert to move into the next round
- pV := helper.MakeRandomProposalValue()
+ votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
+ for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
+ votes[i] = helper.MakeVerifiedVote(t, i, r-1, p, cert, *pV)
+ }
+ bun := unauthenticatedBundle{
+ Round: r - 1,
+ Period: p,
+ Proposal: *pV,
+ }
+ inMsg = messageEvent{
+ T: bundleVerified,
+ Input: message{
+ Bundle: bundle{
+ U: bun,
+ Votes: votes,
+ },
+ UnauthenticatedBundle: bun,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+
+ require.Equalf(t, r, pWhite.Round, "player did not enter new round")
+ require.Equalf(t, period(0), pWhite.Period, "player did not enter period 0 in new round")
+ assembleEvent := ev(pseudonodeAction{T: assemble, Round: r, Period: 0})
+ require.Truef(t, pM.getTrace().Contains(assembleEvent), "Player should try to assemble new proposal")
+}
+
+func TestPlayerCertificateThenPayloadEntersNewRound(t *testing.T) {
+ // player should create a new proposal on new round
+ const r = round(209)
+ const p = period(0)
+ pWhite, pM, helper := setupP(t, r-1, p, soft)
+ pP, pV := helper.MakeRandomProposalPayload(t, r-1)
+
+ // gen cert; this should not advance into next round
votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
votes[i] = helper.MakeVerifiedVote(t, i, r-1, p, cert, *pV)
@@ -1451,9 +1524,24 @@ func TestPlayerProposesNewRound(t *testing.T) {
require.NoError(t, err)
require.NoError(t, panicErr)
+ require.Equalf(t, r-1, pWhite.Round, "player entered new round but shouldn't have without payload")
+ assembleEvent := ev(pseudonodeAction{T: assemble, Round: r, Period: 0})
+ require.Falsef(t, pM.getTrace().Contains(assembleEvent), "Player should not try to assemble new proposal without new round")
+
+ // send a payload corresponding with previous cert. now we should enter new round
+ inMsg = messageEvent{
+ T: payloadVerified,
+ Input: message{
+ Proposal: *pP,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+
require.Equalf(t, r, pWhite.Round, "player did not enter new round")
require.Equalf(t, period(0), pWhite.Period, "player did not enter period 0 in new round")
- assembleEvent := ev(pseudonodeAction{T: assemble, Round: r, Period: 0})
require.Truef(t, pM.getTrace().Contains(assembleEvent), "Player should try to assemble new proposal")
}
@@ -1602,9 +1690,33 @@ func TestPlayerCommitsCertThreshold(t *testing.T) {
const r = round(20239)
const p = period(1001)
pWhite, pM, helper := setupP(t, r-1, p, soft)
+ pP, pV := helper.MakeRandomProposalPayload(t, r-1)
+
+ // send a payload
+ // store an arbitrary proposal/payload
+ vVote := helper.MakeVerifiedVote(t, 0, r-1, p, propose, *pV)
+ inMsg := messageEvent{
+ T: voteVerified,
+ Input: message{
+ Vote: vVote,
+ UnauthenticatedVote: vVote.u(),
+ },
+ }
+ err, panicErr := pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ inMsg = messageEvent{
+ T: payloadVerified,
+ Input: message{
+ Proposal: *pP,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
// gen cert to move into the next round
- pV := helper.MakeRandomProposalValue()
votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
votes[i] = helper.MakeVerifiedVote(t, i, r-1, p, cert, *pV)
@@ -1614,7 +1726,7 @@ func TestPlayerCommitsCertThreshold(t *testing.T) {
Period: p,
Proposal: *pV,
}
- inMsg := messageEvent{
+ inMsg = messageEvent{
T: bundleVerified,
Input: message{
Bundle: bundle{
@@ -1625,13 +1737,13 @@ func TestPlayerCommitsCertThreshold(t *testing.T) {
},
Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
}
- err, panicErr := pM.transition(inMsg)
+ err, panicErr = pM.transition(inMsg)
require.NoError(t, err)
require.NoError(t, panicErr)
require.Equalf(t, r, pWhite.Round, "player did not enter new round")
require.Equalf(t, period(0), pWhite.Period, "player did not enter period 0 in new round")
- commitEvent := ev(ensureAction{Certificate: Certificate(bun)})
+ commitEvent := ev(ensureAction{Certificate: Certificate(bun), Payload: *pP})
require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should try to ensure block/digest on ledger")
}
@@ -2172,14 +2284,39 @@ func TestPlayerRequestsPipelinedPayloadVerification(t *testing.T) {
require.Falsef(t, pM.getTrace().Contains(verifyEvent), "Player should not verify payload from r + 1")
// now enter next round
+ pP, pV := helper.MakeRandomProposalPayload(t, r)
+ // send a payload
+ // store an arbitrary proposal/payload
+ vVote := helper.MakeVerifiedVote(t, 0, r, p, propose, *pV)
+ inMsg = messageEvent{
+ T: voteVerified,
+ Input: message{
+ Vote: vVote,
+ UnauthenticatedVote: vVote.u(),
+ },
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ inMsg = messageEvent{
+ T: payloadVerified,
+ Input: message{
+ Proposal: *pP,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+
votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
- votes[i] = helper.MakeVerifiedVote(t, i, r, p, cert, *pVTwo)
+ votes[i] = helper.MakeVerifiedVote(t, i, r, p, cert, *pV)
}
bun := unauthenticatedBundle{
Round: r,
Period: p,
- Proposal: *pVTwo,
+ Proposal: *pV,
}
inMsg = messageEvent{
T: bundleVerified,
@@ -2198,7 +2335,7 @@ func TestPlayerRequestsPipelinedPayloadVerification(t *testing.T) {
require.Equalf(t, r+1, pWhite.Round, "player did not enter new round")
require.Equalf(t, period(0), pWhite.Period, "player did not enter period 0 in new round")
- commitEvent := ev(ensureAction{Certificate: Certificate(bun)})
+ commitEvent := ev(ensureAction{Certificate: Certificate(bun), Payload: *pP})
require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should try to ensure block/digest on ledger")
// make sure we sent out pipelined payload verify requests
@@ -2253,7 +2390,30 @@ func TestPlayerHandlesPipelinedThresholds(t *testing.T) {
}
// now, enter next round
- _, pVTwo := helper.MakeRandomProposalPayload(t, r)
+ pPTwo, pVTwo := helper.MakeRandomProposalPayload(t, r)
+ // store pPTwo
+ vVote := helper.MakeVerifiedVote(t, 0, r, p, propose, *pVTwo)
+ inMsg := messageEvent{
+ T: voteVerified,
+ Input: message{
+ Vote: vVote,
+ UnauthenticatedVote: vVote.u(),
+ },
+ }
+ err, panicErr := pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ inMsg = messageEvent{
+ T: payloadVerified,
+ Input: message{
+ Proposal: *pPTwo,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+
votes = make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
votes[i] = helper.MakeVerifiedVote(t, i, r, p, cert, *pVTwo)
@@ -2263,7 +2423,7 @@ func TestPlayerHandlesPipelinedThresholds(t *testing.T) {
Period: p,
Proposal: *pVTwo,
}
- inMsg := messageEvent{
+ inMsg = messageEvent{
T: bundleVerified,
Input: message{
Bundle: bundle{
@@ -2274,7 +2434,7 @@ func TestPlayerHandlesPipelinedThresholds(t *testing.T) {
},
Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
}
- err, panicErr := pM.transition(inMsg)
+ err, panicErr = pM.transition(inMsg)
require.NoError(t, err)
require.NoError(t, panicErr)
require.Equalf(t, r+1, pWhite.Round, "player did not enter new round")
@@ -2304,7 +2464,7 @@ func TestPlayerRegression_EnsuresCertThreshFromOldPeriod_8ba23942(t *testing.T)
pWhite, pM, helper := setupP(t, r, p, cert)
// send a next threshold to send player into period 1
- pV := helper.MakeRandomProposalValue()
+ pP, pV := helper.MakeRandomProposalPayload(t, r)
votes := make([]vote, int(next.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
for i := 0; i < int(next.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
votes[i] = helper.MakeVerifiedVote(t, i, r, p, next, *pV)
@@ -2332,6 +2492,17 @@ func TestPlayerRegression_EnsuresCertThreshFromOldPeriod_8ba23942(t *testing.T)
require.Equalf(t, p+1, pWhite.Period, "player did not fast forward to new period")
// gen cert threshold in period 0, should move into next round
+ // store an arbitrary payload. It should be accepted since the next quorum pinned pV.
+ inMsg = messageEvent{
+ T: payloadVerified,
+ Input: message{
+ Proposal: *pP,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
votes = make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
votes[i] = helper.MakeVerifiedVote(t, i, r, p, cert, *pV) // period 0
@@ -2355,8 +2526,340 @@ func TestPlayerRegression_EnsuresCertThreshFromOldPeriod_8ba23942(t *testing.T)
}
require.Equalf(t, r+1, pWhite.Round, "player did not enter new round")
require.Equalf(t, period(0), pWhite.Period, "player did not enter period 0 in new round")
- commitEvent := ev(ensureAction{Certificate: Certificate(bun)})
- require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should try to ensure block/digest on ledger")
+ commitEvent := ev(ensureAction{Certificate: Certificate(bun), Payload: *pP})
+ require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should try to ensure block on ledger")
+}
+
+func TestPlayer_RejectsCertThresholdFromPreviousRound(t *testing.T) {
+ const r = round(20)
+ const p = period(0)
+ pWhite, pM, helper := setupP(t, r, p, cert)
+
+ _, pV := helper.MakeRandomProposalPayload(t, r)
+ votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
+ for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
+ votes[i] = helper.MakeVerifiedVote(t, i, r-1, p+1, cert, *pV)
+ msg := messageEvent{
+ T: voteVerified,
+ Input: message{
+ Vote: votes[i],
+ UnauthenticatedVote: votes[i].u(),
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr := pM.transition(msg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ }
+ bun := unauthenticatedBundle{
+ Round: r - 1,
+ Period: p + 1,
+ Step: cert,
+ Proposal: *pV,
+ }
+ require.Equalf(t, r, pWhite.Round, "player entered new round... bad!")
+ require.Equalf(t, p, pWhite.Period, "player changed periods... bad!")
+ commitEvent := ev(stageDigestAction{Certificate: Certificate(bun)})
+ require.Falsef(t, pM.getTrace().Contains(commitEvent), "Player should not try to stage anything")
+}
+
+func TestPlayer_CommitsCertThresholdWithoutPreStaging(t *testing.T) {
+ // if player has pinned a block, then sees a cert threshold, it should commit
+ const r = round(20)
+ const p = period(0)
+ pWhite, pM, helper := setupP(t, r, p, cert)
+
+ // send a next threshold to send player into period 1
+ pP, pV := helper.MakeRandomProposalPayload(t, r)
+ votes := make([]vote, int(next.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
+ for i := 0; i < int(next.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
+ votes[i] = helper.MakeVerifiedVote(t, i, r, p, next, *pV)
+ }
+ bun := unauthenticatedBundle{
+ Round: r,
+ Period: p,
+ Step: next,
+ Proposal: *pV,
+ }
+ inMsg := messageEvent{
+ T: bundleVerified,
+ Input: message{
+ Bundle: bundle{
+ U: bun,
+ Votes: votes,
+ },
+ UnauthenticatedBundle: bun,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr := pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ require.Equalf(t, p+1, pWhite.Period, "player did not fast forward to new period")
+
+ // store an arbitrary payload. It should be accepted since the next quorum pinned pV.
+ inMsg = messageEvent{
+ T: payloadVerified,
+ Input: message{
+ Proposal: *pP,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+
+ // generate a cert threshold for period 1. This should ensureBlock since we have the payload.
+ votes = make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
+ for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
+ votes[i] = helper.MakeVerifiedVote(t, i, r, p+1, cert, *pV) // period 0
+ msg := messageEvent{
+ T: voteVerified,
+ Input: message{
+ Vote: votes[i],
+ UnauthenticatedVote: votes[i].u(),
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(msg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ }
+ bun = unauthenticatedBundle{
+ Round: r,
+ Period: p + 1,
+ Step: cert,
+ Proposal: *pV,
+ }
+ require.Equalf(t, r+1, pWhite.Round, "player did not enter new round")
+ require.Equalf(t, period(0), pWhite.Period, "player did not enter period 0 in new round")
+ commitEvent := ev(ensureAction{Certificate: Certificate(bun), Payload: *pP})
+ require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should try to ensure block on ledger")
+}
+
+func TestPlayer_CertThresholdDoesNotBlock(t *testing.T) {
+ // check that ledger gets a hint to stage digest
+ const r = round(20)
+ const p = period(0)
+ pWhite, pM, helper := setupP(t, r, p, cert)
+
+ _, pV := helper.MakeRandomProposalPayload(t, r)
+ votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
+ for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
+ votes[i] = helper.MakeVerifiedVote(t, i, r, p, cert, *pV)
+ msg := messageEvent{
+ T: voteVerified,
+ Input: message{
+ Vote: votes[i],
+ UnauthenticatedVote: votes[i].u(),
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr := pM.transition(msg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ }
+ bun := unauthenticatedBundle{
+ Round: r,
+ Period: p,
+ Step: cert,
+ Proposal: *pV,
+ }
+ require.Equalf(t, r, pWhite.Round, "player entered new round... bad!")
+ require.Equalf(t, p, pWhite.Period, "player changed periods... bad!")
+ commitEvent := ev(stageDigestAction{Certificate: Certificate(bun)})
+ require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should have staged something but didn't")
+}
+
+func TestPlayer_CertThresholdDoesNotBlockFuturePeriod(t *testing.T) {
+ // check that ledger gets a hint to stage digest
+ const r = round(20)
+ const p = period(0)
+ pWhite, pM, helper := setupP(t, r, p, cert)
+
+ _, pV := helper.MakeRandomProposalPayload(t, r)
+ votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
+ for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
+ votes[i] = helper.MakeVerifiedVote(t, i, r, p+1, cert, *pV)
+ msg := messageEvent{
+ T: voteVerified,
+ Input: message{
+ Vote: votes[i],
+ UnauthenticatedVote: votes[i].u(),
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr := pM.transition(msg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ }
+ bun := unauthenticatedBundle{
+ Round: r,
+ Period: p + 1,
+ Step: cert,
+ Proposal: *pV,
+ }
+ require.Equalf(t, r, pWhite.Round, "player entered new round... bad!")
+ require.Equalf(t, p+1, pWhite.Period, "player should have changed periods but didn't")
+ commitEvent := ev(stageDigestAction{Certificate: Certificate(bun)})
+ require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should have staged something but didn't")
+}
+
+func TestPlayer_CertThresholdFastForwards(t *testing.T) {
+ const r = round(20)
+ const p = period(0)
+ pWhite, pM, helper := setupP(t, r, p, cert)
+
+ _, pV := helper.MakeRandomProposalPayload(t, r)
+ // send a bundle - individual votes will get filtered.
+ votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
+ for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
+ votes[i] = helper.MakeVerifiedVote(t, i, r, p+2, cert, *pV)
+ }
+ bun := unauthenticatedBundle{
+ Round: r,
+ Period: p + 2,
+ Step: cert,
+ Proposal: *pV,
+ }
+ inMsg := messageEvent{
+ T: bundleVerified,
+ Input: message{
+ Bundle: bundle{
+ U: bun,
+ Votes: votes,
+ },
+ UnauthenticatedBundle: bun,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr := pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+
+ require.Equalf(t, r, pWhite.Round, "player entered new round... bad!")
+ require.Equalf(t, p+2, pWhite.Period, "player should have changed periods but didn't")
+ commitEvent := ev(stageDigestAction{Certificate: Certificate(bun)})
+ require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should have staged something but didn't")
+}
+
+func TestPlayer_CertThresholdCommitsFuturePeriodIfAlreadyHasBlock(t *testing.T) {
+ const r = round(20)
+ const p = period(0)
+ pWhite, pM, helper := setupP(t, r, p, cert)
+
+ payload, pV := helper.MakeRandomProposalPayload(t, r)
+ // give player a proposal/payload.
+ proposalVote := helper.MakeVerifiedVote(t, 0, r, p, propose, *pV)
+ inMsg := messageEvent{
+ T: voteVerified,
+ Input: message{
+ Vote: proposalVote,
+ UnauthenticatedVote: proposalVote.u(),
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr := pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ inMsg = messageEvent{
+ T: payloadVerified,
+ Input: message{
+ Proposal: *payload,
+ UnauthenticatedProposal: payload.u(),
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+
+ // send a bundle - individual votes will get filtered.
+ votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
+ for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
+ votes[i] = helper.MakeVerifiedVote(t, i, r, p+2, cert, *pV)
+ }
+ bun := unauthenticatedBundle{
+ Round: r,
+ Period: p + 2,
+ Step: cert,
+ Proposal: *pV,
+ }
+ inMsg = messageEvent{
+ T: bundleVerified,
+ Input: message{
+ Bundle: bundle{
+ U: bun,
+ Votes: votes,
+ },
+ UnauthenticatedBundle: bun,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+
+ require.Equalf(t, r+1, pWhite.Round, "player did not enter new round... bad!")
+ require.Equalf(t, period(0), pWhite.Period, "player should have entered period 0 of new round but didn't")
+ commitEvent := ev(ensureAction{Certificate: Certificate(bun), Payload: *payload})
+ require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should have commited a block but didn't")
+}
+
+func TestPlayer_PayloadAfterCertThresholdCommits(t *testing.T) {
+ const r = round(20)
+ const p = period(0)
+ pWhite, pM, helper := setupP(t, r, p, cert)
+
+ pP, pV := helper.MakeRandomProposalPayload(t, r)
+ // send a bundle - individual votes will get filtered.
+ votes := make([]vote, int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])))
+ for i := 0; i < int(cert.threshold(config.Consensus[protocol.ConsensusCurrentVersion])); i++ {
+ votes[i] = helper.MakeVerifiedVote(t, i, r, p+2, cert, *pV)
+ }
+ bun := unauthenticatedBundle{
+ Round: r,
+ Period: p + 2,
+ Step: cert,
+ Proposal: *pV,
+ }
+ inMsg := messageEvent{
+ T: bundleVerified,
+ Input: message{
+ Bundle: bundle{
+ U: bun,
+ Votes: votes,
+ },
+ UnauthenticatedBundle: bun,
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr := pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+
+ require.Equalf(t, r, pWhite.Round, "player entered new round... bad!")
+ require.Equalf(t, p+2, pWhite.Period, "player should have changed periods but didn't")
+ commitEvent := ev(stageDigestAction{Certificate: Certificate(bun)})
+ require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should have staged something but didn't")
+ pM.resetTrace()
+
+ // now, deliver payload, commit.
+ inMsg = messageEvent{
+ T: payloadVerified,
+ Input: message{
+ Proposal: *pP,
+ UnauthenticatedProposal: pP.u(),
+ },
+ Proto: ConsensusVersionView{Version: protocol.ConsensusCurrentVersion},
+ }
+ err, panicErr = pM.transition(inMsg)
+ require.NoError(t, err)
+ require.NoError(t, panicErr)
+ require.Equalf(t, r+1, pWhite.Round, "player did not enter new round... bad!")
+ require.Equalf(t, period(0), pWhite.Period, "player should have entered period 0 but didn't")
+ commitEvent = ev(ensureAction{Certificate: Certificate(bun), Payload: *pP})
+ require.Truef(t, pM.getTrace().Contains(commitEvent), "Player should have committed but didn't")
}
func TestPlayerAlwaysResynchsPinnedValue(t *testing.T) {
diff --git a/agreement/proposal.go b/agreement/proposal.go
index 84cbfe7fa8..1e305d0f23 100644
--- a/agreement/proposal.go
+++ b/agreement/proposal.go
@@ -62,7 +62,7 @@ type unauthenticatedProposal struct {
// ToBeHashed implements the Hashable interface.
func (p unauthenticatedProposal) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.Payload, protocol.Encode(p)
+ return protocol.Payload, protocol.Encode(&p)
}
// value returns the proposal-value associated with this proposal.
@@ -111,7 +111,7 @@ type proposerSeed struct {
// ToBeHashed implements the Hashable interface.
func (s proposerSeed) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.ProposerSeed, protocol.Encode(s)
+ return protocol.ProposerSeed, protocol.Encode(&s)
}
// A seedInput is a Hashable input to seed rerandomization.
@@ -124,7 +124,7 @@ type seedInput struct {
// ToBeHashed implements the Hashable interface.
func (i seedInput) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.ProposerSeed, protocol.Encode(i)
+ return protocol.ProposerSeed, protocol.Encode(&i)
}
func deriveNewSeed(address basics.Address, vrf *crypto.VRFSecrets, rnd round, period period, ledger LedgerReader) (newSeed committee.Seed, seedProof crypto.VRFProof, reterr error) {
@@ -133,13 +133,13 @@ func deriveNewSeed(address basics.Address, vrf *crypto.VRFSecrets, rnd round, pe
cparams, err := ledger.ConsensusParams(ParamsRound(rnd))
if err != nil {
- err = fmt.Errorf("failed to obtain consensus parameters in round %v: %v", ParamsRound(rnd), err)
+ err = fmt.Errorf("failed to obtain consensus parameters in round %d: %v", ParamsRound(rnd), err)
return
}
var alpha crypto.Digest
prevSeed, err := ledger.Seed(seedRound(rnd, cparams))
if err != nil {
- reterr = fmt.Errorf("failed read seed of round %v: %v", seedRound(rnd, cparams), err)
+ reterr = fmt.Errorf("failed read seed of round %d: %v", seedRound(rnd, cparams), err)
return
}
@@ -166,7 +166,7 @@ func deriveNewSeed(address basics.Address, vrf *crypto.VRFSecrets, rnd round, pe
digrnd := rnd.SubSaturate(basics.Round(cparams.SeedLookback * cparams.SeedRefreshInterval))
oldDigest, err := ledger.LookupDigest(digrnd)
if err != nil {
- reterr = fmt.Errorf("could not lookup old entry digest (for seed) from round %v: %v", digrnd, err)
+ reterr = fmt.Errorf("could not lookup old entry digest (for seed) from round %d: %v", digrnd, err)
return
}
input.History = oldDigest
@@ -180,19 +180,19 @@ func verifyNewSeed(p unauthenticatedProposal, ledger LedgerReader) error {
rnd := p.Round()
cparams, err := ledger.ConsensusParams(ParamsRound(rnd))
if err != nil {
- return fmt.Errorf("failed to obtain consensus parameters in round %v: %v", ParamsRound(rnd), err)
+ return fmt.Errorf("failed to obtain consensus parameters in round %d: %v", ParamsRound(rnd), err)
}
balanceRound := balanceRound(rnd, cparams)
proposerRecord, err := ledger.BalanceRecord(balanceRound, value.OriginalProposer)
if err != nil {
- return fmt.Errorf("failed to obtain balance record for address %v in round %v: %v", value.OriginalProposer, balanceRound, err)
+ return fmt.Errorf("failed to obtain balance record for address %v in round %d: %v", value.OriginalProposer, balanceRound, err)
}
var alpha crypto.Digest
prevSeed, err := ledger.Seed(seedRound(rnd, cparams))
if err != nil {
- return fmt.Errorf("failed read seed of round %v: %v", seedRound(rnd, cparams), err)
+ return fmt.Errorf("failed read seed of round %d: %v", seedRound(rnd, cparams), err)
}
if value.OriginalPeriod == 0 {
@@ -218,7 +218,7 @@ func verifyNewSeed(p unauthenticatedProposal, ledger LedgerReader) error {
digrnd := rnd.SubSaturate(basics.Round(cparams.SeedLookback * cparams.SeedRefreshInterval))
oldDigest, err := ledger.LookupDigest(digrnd)
if err != nil {
- return fmt.Errorf("could not lookup old entry digest (for seed) from round %v: %v", digrnd, err)
+ return fmt.Errorf("could not lookup old entry digest (for seed) from round %d: %v", digrnd, err)
}
input.History = oldDigest
}
diff --git a/agreement/proposalManager.go b/agreement/proposalManager.go
index 21013c438f..eb2276d7c4 100644
--- a/agreement/proposalManager.go
+++ b/agreement/proposalManager.go
@@ -45,12 +45,12 @@ func (m *proposalManager) underlying() listener {
// - It applies message relay rules to votePresent, voteVerified,
// payloadPresent, and payloadVerified events.
//
-// - It enters a new round given a roundInterruption or a certThreshold event.
+// - It enters a new round given a roundInterruption.
//
// - It enters a new period given a nextThreshold event. It also enters a new
-// period given a softThreshold event, if necessary.
-// - On entering a new period due to a softThreshold event, it dispatches
-// this event to the proposalMachineRound.
+// period given a softThreshold/certThreshold event, if necessary.
+// - On entering a new period due to a softThreshold/certThreshold, it
+// dispatches this event to the proposalMachineRound.
//
// For more details, see each method's respective documentation below.
func (m *proposalManager) handle(r routerHandle, p player, e event) event {
@@ -59,9 +59,7 @@ func (m *proposalManager) handle(r routerHandle, p player, e event) event {
return m.handleMessageEvent(r, p, e.(filterableMessageEvent))
case roundInterruption:
return m.handleNewRound(r, p, e.(roundInterruptionEvent).Round)
- case certThreshold:
- return m.handleNewRound(r, p, e.(thresholdEvent).Round+1)
- case softThreshold:
+ case softThreshold, certThreshold:
e := e.(thresholdEvent)
if p.Period < e.Period {
r = m.handleNewPeriod(r, p, e)
@@ -85,9 +83,8 @@ func (m *proposalManager) handleNewRound(r routerHandle, p player, round round)
return e
}
-// handleNewPeriod is called for nextThreshold events and softThreshold events
-// (when the softThreshold event is for a new period). These events are
-// dispatched to the proposalMachineRound, and an empty event is returned.
+// handleNewPeriod is called for threshold events that move the state machine into a new period.
+// These events are dispatched to the proposalMachineRound, and an empty event is returned.
func (m *proposalManager) handleNewPeriod(r routerHandle, p player, e thresholdEvent) routerHandle {
target := e.Period
if e.t() == nextThreshold {
@@ -227,7 +224,7 @@ func (m *proposalManager) filterProposalVote(p player, r routerHandle, uv unauth
qe := voteFilterRequestEvent{RawVote: uv.R}
sawVote := r.dispatch(p, qe, proposalMachinePeriod, uv.R.Round, uv.R.Period, 0)
if sawVote.t() == voteFiltered {
- return fmt.Errorf("proposalManager: filtered proposal-vote: sender %v had already sent a vote in round %v period %v", uv.R.Sender, uv.R.Round, uv.R.Period)
+ return fmt.Errorf("proposalManager: filtered proposal-vote: sender %v had already sent a vote in round %d period %d", uv.R.Sender, uv.R.Round, uv.R.Period)
}
return nil
}
@@ -237,17 +234,17 @@ func proposalFresh(freshData freshnessData, vote unauthenticatedVote) error {
switch vote.R.Round {
case freshData.PlayerRound:
if freshData.PlayerPeriod != 0 && freshData.PlayerPeriod-1 > vote.R.Period {
- return fmt.Errorf("filtered stale proposal: period %v - 1 > %v", freshData.PlayerPeriod, vote.R.Period)
+ return fmt.Errorf("filtered stale proposal: period %d - 1 > %d", freshData.PlayerPeriod, vote.R.Period)
}
if freshData.PlayerPeriod+1 < vote.R.Period {
- return fmt.Errorf("filtered premature proposal: period %v + 1 < %v", freshData.PlayerPeriod, vote.R.Period)
+ return fmt.Errorf("filtered premature proposal: period %d + 1 < %d", freshData.PlayerPeriod, vote.R.Period)
}
case freshData.PlayerRound + 1:
if vote.R.Period != 0 {
- return fmt.Errorf("filtered premature proposal from next round: period %v > 0", vote.R.Period)
+ return fmt.Errorf("filtered premature proposal from next round: period %d > 0", vote.R.Period)
}
default:
- return fmt.Errorf("filtered proposal from bad round: p.Round=%v, vote.Round=%v", freshData.PlayerRound, vote.R.Round)
+ return fmt.Errorf("filtered proposal from bad round: p.Round=%d, vote.Round=%d", freshData.PlayerRound, vote.R.Round)
}
return nil
}
diff --git a/agreement/proposalManager_test.go b/agreement/proposalManager_test.go
index eb38161578..5cc80d1067 100644
--- a/agreement/proposalManager_test.go
+++ b/agreement/proposalManager_test.go
@@ -105,8 +105,6 @@ func TestProposalManagerThresholdSoftStage(t *testing.T) {
}
func TestProposalManagerThresholdCert(t *testing.T) {
- // this event is only actually generated by adversaries.
- // check that manager tells store to enter new round
const p = 10
const r = 1
_, pM, helper := setupManager(t, r)
@@ -126,10 +124,19 @@ func TestProposalManagerThresholdCert(t *testing.T) {
require.NoError(t, err)
require.NoError(t, panicErr)
- // check that the inner trace contains a boost period message
- nxtPeriodEvent := newRoundEvent{}
- require.Truef(t, pM.getTraceVisible().Contains(nxtPeriodEvent),
- "Proposal Manager must tell lower level to increment round")
+ // check that the inner trace contains a forwarded cert threshold
+ // (this is very much white box testing)
+ count := 0
+ require.Truef(t, pM.getTraceVisible().ContainsFn(func(b event) bool {
+ if b.ComparableStr() == inMsg.ComparableStr() {
+ count++
+ }
+ if count > 1 {
+ return true
+ }
+ return false
+ }),
+ "Proposal Manager must forward cert threshold to proposal round machine")
}
func TestProposalManagerThresholdNext(t *testing.T) {
diff --git a/agreement/proposalStore.go b/agreement/proposalStore.go
index dff13095be..4f2d1e6c49 100644
--- a/agreement/proposalStore.go
+++ b/agreement/proposalStore.go
@@ -170,8 +170,8 @@ func (store *proposalStore) underlying() listener {
// credential it has seen and possibly a cached authenticator (if not, it
// returns an empty event).
//
-// - A softThreshold event is delivered when the player state has observed a
-// quorum of soft votes for the current round and period. The proposalStore
+// - A soft/certThreshold event is delivered when the player state has observed a
+// quorum of soft/cert votes for the current round and period. The proposalStore
// dispatches this event to the proposalMachinePeriod. If the proposalStore
// has the proposal payload corresponding to the proposal-value of the quorum,
// it returns a proposalCommittable event; otherwise, it propagates the
@@ -307,13 +307,13 @@ func (store *proposalStore) handle(r routerHandle, p player, e event) event {
}
return emptyEvent{}
- case softThreshold:
+ case softThreshold, certThreshold:
te := e.(thresholdEvent)
- // in particular, this will set te.Period.Staging = val(softThreshold)
- // as a consequence, only val(softThreshold) will generate proposalAccepted in the future
- // therefore store.Relevant[te.Period] will not be reset
+ // in particular, this will set te.Period.Staging = val(softThreshold/certThreshold)
+ // as a consequence, only val(softThreshold/certThreshold) will generate proposalAccepted in the future
+ // for this period, therefore store.Relevant[te.Period] will not be reset
e := r.dispatch(p, e, proposalMachinePeriod, te.Round, te.Period, 0).(proposalAcceptedEvent)
- // return commitableEvent if ready; else, return proposalAcceptedEvent
+ // return committableEvent if ready; else, return proposalAcceptedEvent
if store.Assemblers[e.Proposal].Assembled {
authVote := store.Assemblers[e.Proposal].authenticator(p.Period)
return committableEvent{
diff --git a/agreement/proposalStore_test.go b/agreement/proposalStore_test.go
index cf1fabae4d..4bc0903e95 100644
--- a/agreement/proposalStore_test.go
+++ b/agreement/proposalStore_test.go
@@ -61,7 +61,7 @@ func TestBlockAssemblerPipeline(t *testing.T) {
round := player.Round
period := player.Period
testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", round, err)
accountIndex := 0
proposal, _, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, period, ledger)
@@ -127,7 +127,7 @@ func TestBlockAssemblerBind(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", player.Round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
@@ -193,7 +193,7 @@ func TestBlockAssemblerAuthenticator(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", player.Round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, _, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -257,7 +257,7 @@ func TestBlockAssemblerTrim(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", player.Round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, _, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -329,7 +329,7 @@ func TestProposalStoreT(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", player.Round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, proposalV, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -401,7 +401,7 @@ func TestProposalStoreUnderlying(t *testing.T) {
player, _, accounts, factory, ledger := testSetup(0)
testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", player.Round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
proposalPayload, proposalV, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
@@ -463,7 +463,7 @@ func TestProposalStoreHandle(t *testing.T) {
proposalVoteEventBatch, proposalPayloadEventBatch, _ := generateProposalEvents(t, player, accounts, factory, ledger)
testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", player.Round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
_, proposalV0, _ := proposalForBlock(accounts.addresses[accountIndex], accounts.vrfs[accountIndex], testBlockFactory, player.Period, ledger)
accountIndex++
@@ -645,7 +645,7 @@ func TestProposalStoreGetPinnedValue(t *testing.T) {
// create proposal Store
player, router, accounts, factory, ledger := testPlayerSetup()
testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", player.Round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", player.Round, err)
accountIndex := 0
// create a route handler for the proposal store
rHandle := routerHandle{
diff --git a/agreement/proposalTracker.go b/agreement/proposalTracker.go
index 1df9d106d3..add3c94480 100644
--- a/agreement/proposalTracker.go
+++ b/agreement/proposalTracker.go
@@ -19,6 +19,7 @@ package agreement
import (
"fmt"
+ "github.com/algorand/go-algorand/config" // TODO(upgrade): Please remove this line after the upgrade goes through
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
)
@@ -37,14 +38,23 @@ type proposalSeeker struct {
// accept compares a given vote with the current lowest-credentialled vote and
// sets it if freeze has not been called.
-func (s proposalSeeker) accept(v vote) (proposalSeeker, error) {
+// TODO(upgrade): Please remove the "useBuggyLowestOutput" argument as soon as the protocol upgrade goes through
+func (s proposalSeeker) accept(v vote, useBuggyLowestOutput bool) (proposalSeeker, error) {
if s.Frozen {
return s, errProposalSeekerFrozen{}
}
- if s.Filled && !v.Cred.Less(s.Lowest.Cred) {
- return s, errProposalSeekerNotLess{NewSender: v.R.Sender, LowestSender: s.Lowest.R.Sender}
- }
+ // TODO(upgrade): Please remove the lines below as soon as the upgrade goes through
+ if useBuggyLowestOutput {
+ if s.Filled && !v.Cred.LessBuggy(s.Lowest.Cred) {
+ return s, errProposalSeekerNotLess{NewSender: v.R.Sender, LowestSender: s.Lowest.R.Sender}
+ }
+ } else {
+ // TODO(upgrade): Please remove the lines above as soon as the upgrade goes through
+ if s.Filled && !v.Cred.Less(s.Lowest.Cred) {
+ return s, errProposalSeekerNotLess{NewSender: v.R.Sender, LowestSender: s.Lowest.R.Sender}
+ }
+ } // TODO(upgrade): Please remove this line when the upgrade goes through
s.Lowest = v
s.Filled = true
@@ -146,7 +156,7 @@ func (t *proposalTracker) handle(r routerHandle, p player, e event) event {
}
var err error
- t.Freezer, err = t.Freezer.accept(v)
+ t.Freezer, err = t.Freezer.accept(v, config.Consensus[e.Proto.Version].UseBuggyProposalLowestOutput) // TODO(upgrade): Please remove the second argument as soon as the upgrade goes through
if err != nil {
err := errProposalTrackerPS{Sub: err}
return filteredEvent{T: voteFiltered, Err: makeSerErr(err)}
@@ -164,7 +174,7 @@ func (t *proposalTracker) handle(r routerHandle, p player, e event) event {
t.Freezer = t.Freezer.freeze()
return e
- case softThreshold:
+ case softThreshold, certThreshold:
e := e.(thresholdEvent)
t.Staging = e.Proposal
@@ -208,7 +218,7 @@ type errProposalTrackerSenderDup struct {
}
func (err errProposalTrackerSenderDup) Error() string {
- return fmt.Sprintf("proposalTracker: filtered vote: sender %v had already sent a vote in round %v period %v", err.Sender, err.Round, err.Period)
+ return fmt.Sprintf("proposalTracker: filtered vote: sender %v had already sent a vote in round %d period %d", err.Sender, err.Round, err.Period)
}
diff --git a/agreement/proposalTrackerContract.go b/agreement/proposalTrackerContract.go
index cff7ed03e9..f61876b348 100644
--- a/agreement/proposalTrackerContract.go
+++ b/agreement/proposalTrackerContract.go
@@ -24,13 +24,13 @@ type proposalTrackerContract struct {
SawOneVote bool
Froze bool
SawSoftThreshold bool
- Expected proposalValue
+ SawCertThreshold bool
}
// TODO check concrete types of events
func (c *proposalTrackerContract) pre(p player, in event) (pre []error) {
switch in.t() {
- case voteVerified, proposalFrozen, softThreshold, voteFilterRequest, readStaging:
+ case voteVerified, proposalFrozen, softThreshold, certThreshold, voteFilterRequest, readStaging:
default:
pre = append(pre, fmt.Errorf("incoming event has invalid type: %v", in.t()))
}
@@ -72,7 +72,7 @@ func (c *proposalTrackerContract) post(p player, in, out event) (post []error) {
return
}
- if !c.SawOneVote && !c.Froze && !c.SawSoftThreshold {
+ if !c.SawOneVote && !c.Froze && !c.SawSoftThreshold && !c.SawCertThreshold {
if out.t() != proposalAccepted {
post = append(post, fmt.Errorf("expected first vote to have event type %v; had %v", proposalAccepted, out.t()))
} else if out.(proposalAcceptedEvent).Proposal != in.(messageEvent).Input.Vote.R.Proposal {
@@ -80,8 +80,8 @@ func (c *proposalTrackerContract) post(p player, in, out event) (post []error) {
}
}
- if (c.Froze || c.SawSoftThreshold) && out.t() != voteFiltered {
- post = append(post, fmt.Errorf("Frozen state = %v and soft threshold state = %v but got event type %v != voteFiltered", c.Froze, c.SawSoftThreshold, out.t()))
+ if (c.Froze || c.SawSoftThreshold || c.SawCertThreshold) && out.t() != voteFiltered {
+ post = append(post, fmt.Errorf("Frozen state = %v and soft threshold state = %v and cert threshold state = %v but got event type %v != voteFiltered", c.Froze, c.SawSoftThreshold, c.SawCertThreshold, out.t()))
}
if !c.SawOneVote {
@@ -103,7 +103,6 @@ func (c *proposalTrackerContract) post(p player, in, out event) (post []error) {
}
c.Froze = true
- c.Expected = out.(proposalFrozenEvent).Proposal
case softThreshold:
if out.t() != proposalAccepted {
post = append(post, fmt.Errorf("output event from proposalFrozen has bad type: %v", out.t()))
@@ -121,7 +120,19 @@ func (c *proposalTrackerContract) post(p player, in, out event) (post []error) {
}
c.SawSoftThreshold = true
- c.Expected = out.(proposalAcceptedEvent).Proposal
+ case certThreshold:
+ if out.t() != proposalAccepted {
+ post = append(post, fmt.Errorf("output event from certThreshold has bad type: %v", out.t()))
+ }
+ _, ok := out.(proposalAcceptedEvent)
+ if !ok {
+ post = append(post, fmt.Errorf("output event does not cast to proposalAcceptedEvent: output is %#v", out))
+ }
+ outProp := out.(proposalAcceptedEvent).Proposal
+ if outProp != in.(thresholdEvent).Proposal {
+ post = append(post, fmt.Errorf("expected proposal-value %v; instead got %v", outProp, in.(thresholdEvent).Proposal))
+ }
+ c.SawCertThreshold = true
}
return
}
diff --git a/agreement/proposalTracker_test.go b/agreement/proposalTracker_test.go
index b0f8348644..84b23fb237 100644
--- a/agreement/proposalTracker_test.go
+++ b/agreement/proposalTracker_test.go
@@ -62,19 +62,19 @@ func TestProposalTrackerProposalSeeker(t *testing.T) {
assert.False(t, s.Filled)
// issue events in the following order: 2, 3, 1, (freeze), 0
- s, err = s.accept(votes[2])
+ s, err = s.accept(votes[2], false) //TODO(upgrade) delete the ", false"
assert.NoError(t, err)
assert.False(t, s.Frozen)
assert.True(t, s.Filled)
assert.True(t, s.Lowest.equals(votes[2]))
- s, err = s.accept(votes[3])
+ s, err = s.accept(votes[3], false) //TODO(upgrade) delete the ", false"
assert.Error(t, err)
assert.False(t, s.Frozen)
assert.True(t, s.Filled)
assert.True(t, s.Lowest.equals(votes[2]))
- s, err = s.accept(votes[1])
+ s, err = s.accept(votes[1], false) //TODO(upgrade) delete the ", false"
assert.NoError(t, err)
assert.False(t, s.Frozen)
assert.True(t, s.Filled)
@@ -85,7 +85,7 @@ func TestProposalTrackerProposalSeeker(t *testing.T) {
assert.True(t, s.Filled)
assert.True(t, s.Lowest.equals(votes[1]))
- s, err = s.accept(votes[0])
+ s, err = s.accept(votes[0], false) //TODO(upgrade) delete the ", false"
assert.Error(t, err)
assert.True(t, s.Frozen)
assert.True(t, s.Filled)
@@ -266,6 +266,30 @@ func (s *proposalTrackerTestShadow) stage(pv proposalValue) {
s.outputs = append(s.outputs, res)
}
+func (s *proposalTrackerTestShadow) stageWithCert(pv proposalValue) {
+ var req, res event
+
+ // check staging
+ req = stagingValueEvent{}
+ res = stagingValueEvent{}
+ s.inputs = append(s.inputs, req)
+ s.outputs = append(s.outputs, res)
+
+ // deliver cert threshold
+ req = thresholdEvent{T: certThreshold, Proposal: pv}
+ res = proposalAcceptedEvent{Round: s.round, Period: s.period, Proposal: pv}
+ s.inputs = append(s.inputs, req)
+ s.outputs = append(s.outputs, res)
+ s.staged = true
+ s.staging = pv
+
+ // check staging
+ req = stagingValueEvent{}
+ res = stagingValueEvent{Proposal: pv}
+ s.inputs = append(s.inputs, req)
+ s.outputs = append(s.outputs, res)
+}
+
// create many proposal-votes, sorted in increasing credential-order.
func setupProposalTrackerTests(t *testing.T) (votes []vote) {
ledger, addrs, vrfs, ots := readOnlyFixture100()
@@ -406,6 +430,56 @@ func TestProposalTrackerBasic(t *testing.T) {
lowDelivery(shadow, "failed to track votes properly after staged")
midDelivery(shadow, "failed to track votes properly after staged")
})
+
+ t.Run("EarlyStagingCert", func(t *testing.T) {
+ targetCert := midvotes[0]
+ shadow := makeProposalTrackerTestShadow(votes[0].R.Round, votes[0].R.Period)
+
+ shadow.stageWithCert(targetCert.R.Proposal)
+ shadow.execute(t, "failed to deliver cert threshold properly")
+
+ highDelivery(shadow, "failed to track votes after staged")
+
+ shadow.freeze()
+ shadow.execute(t, "failed to freeze machine properly")
+
+ lowDelivery(shadow, "failed to track votes properly after staged")
+ midDelivery(shadow, "failed to track votes properly after staged")
+ })
+
+ t.Run("LateStagingCert", func(t *testing.T) {
+ targetCert := midvotes[0]
+ shadow := makeProposalTrackerTestShadow(votes[0].R.Round, votes[0].R.Period)
+
+ highDelivery(shadow, "failed to track votes properly at zero state")
+
+ shadow.freeze()
+ shadow.execute(t, "failed to freeze machine properly")
+
+ midDelivery(shadow, "failed to track votes properly after frozen (but not staged)")
+
+ shadow.stageWithCert(targetCert.R.Proposal)
+ shadow.execute(t, "failed to deliver soft threshold properly")
+
+ lowDelivery(shadow, "failed to track votes properly after staged")
+ })
+
+ t.Run("SynchronousCert", func(t *testing.T) {
+ targetCert := lowvotes[0]
+ shadow := makeProposalTrackerTestShadow(votes[0].R.Round, votes[0].R.Period)
+
+ midDelivery(shadow, "failed to track votes properly at zero state")
+ highDelivery(shadow, "failed to track votes properly at zero state")
+ lowDelivery(shadow, "failed to track votes properly at zero state")
+
+ shadow.freeze()
+ shadow.execute(t, "failed to freeze machine properly")
+
+ shadow.stageWithCert(targetCert.R.Proposal)
+ shadow.execute(t, "failed to deliver cert threshold properly")
+
+ })
+
}
// func TestProposalTrackerSenderSpam(t *testing.T) {
diff --git a/agreement/proposal_test.go b/agreement/proposal_test.go
index 8cb358bd8e..b54a4326c8 100644
--- a/agreement/proposal_test.go
+++ b/agreement/proposal_test.go
@@ -48,7 +48,7 @@ func testSetup(periodCount uint64) (player, rootRouter, testAccountData, testBlo
func createProposalsTesting(accs testAccountData, round basics.Round, period period, factory BlockFactory, ledger Ledger) (ps []proposal, vs []vote) {
ve, err := factory.AssembleBlock(round, time.Now().Add(time.Minute))
if err != nil {
- logging.Base().Errorf("Could not generate a proposal for round %v: %v", round, err)
+ logging.Base().Errorf("Could not generate a proposal for round %d: %v", round, err)
return nil, nil
}
@@ -119,7 +119,7 @@ func TestProposalFunctions(t *testing.T) {
round := player.Round
period := player.Period
ve, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", round, err)
validator := testBlockValidator{}
@@ -157,7 +157,7 @@ func TestProposalUnauthenticated(t *testing.T) {
round := player.Round
period := player.Period
testBlockFactory, err := factory.AssembleBlock(player.Round, time.Now().Add(time.Minute))
- require.NoError(t, err, "Could not generate a proposal for round %v: %v", round, err)
+ require.NoError(t, err, "Could not generate a proposal for round %d: %v", round, err)
validator := testBlockValidator{}
diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go
index db7bf10ea2..eac9febc83 100644
--- a/agreement/pseudonode.go
+++ b/agreement/pseudonode.go
@@ -254,7 +254,7 @@ func (n asyncPseudonode) makeProposals(round basics.Round, period period, accoun
deadline := time.Now().Add(AssemblyTime)
ve, err := n.factory.AssembleBlock(round, deadline)
if err != nil {
- n.log.Errorf("pseudonode.makeProposals: could not generate a proposal for round %v: %v", round, err)
+ n.log.Errorf("pseudonode.makeProposals: could not generate a proposal for round %d: %v", round, err)
return nil, nil
}
@@ -362,31 +362,35 @@ func (t pseudonodeVotesTask) execute(verifier *AsyncVoteVerifier, quit chan stru
for _, result := range verifiedResults {
totalWeight += result.v.Cred.Weight
}
- for _, result := range verifiedResults {
- vote := result.v
- logEvent := logspec.AgreementEvent{
- Type: logspec.VoteBroadcast,
- Sender: vote.R.Sender.String(),
- Hash: vote.R.Proposal.BlockDigest.String(),
- ObjectRound: uint64(vote.R.Round),
- ObjectPeriod: uint64(vote.R.Period),
- ObjectStep: uint64(vote.R.Step),
- Weight: vote.Cred.Weight,
- WeightTotal: totalWeight,
+ if t.node.log.IsLevelEnabled(logging.Info) {
+ for _, result := range verifiedResults {
+ vote := result.v
+ logEvent := logspec.AgreementEvent{
+ Type: logspec.VoteBroadcast,
+ Sender: vote.R.Sender.String(),
+ Hash: vote.R.Proposal.BlockDigest.String(),
+ ObjectRound: uint64(vote.R.Round),
+ ObjectPeriod: uint64(vote.R.Period),
+ ObjectStep: uint64(vote.R.Step),
+ Weight: vote.Cred.Weight,
+ WeightTotal: totalWeight,
+ }
+ t.node.log.with(logEvent).Infof("vote created for broadcast (weight %d, total weight %d)", vote.Cred.Weight, totalWeight)
+ if !t.node.log.GetTelemetryEnabled() {
+ continue
+ }
+ t.node.log.EventWithDetails(telemetryspec.Agreement, telemetryspec.VoteSentEvent, telemetryspec.VoteEventDetails{
+ Address: vote.R.Sender.String(),
+ Hash: vote.R.Proposal.BlockDigest.String(),
+ Round: uint64(vote.R.Round),
+ Period: uint64(vote.R.Period),
+ Step: uint64(vote.R.Step),
+ Weight: vote.Cred.Weight,
+ // Recovered: false,
+ })
}
- t.node.log.with(logEvent).Infof("vote created for broadcast (weight %v, total weight %v)", vote.Cred.Weight, totalWeight)
- t.node.log.EventWithDetails(telemetryspec.Agreement, telemetryspec.VoteSentEvent, telemetryspec.VoteEventDetails{
- Address: vote.R.Sender.String(),
- Hash: vote.R.Proposal.BlockDigest.String(),
- Round: uint64(vote.R.Round),
- Period: uint64(vote.R.Period),
- Step: uint64(vote.R.Step),
- Weight: vote.Cred.Weight,
- // Recovered: false,
- })
- }
- t.node.log.Infof("pseudonode.makeVotes: %v votes created for %v at (%v, %v, %v), total weight %v", len(verifiedResults), t.prop, t.round, t.period, t.step, totalWeight)
-
+ t.node.log.Infof("pseudonode.makeVotes: %v votes created for %v at (%v, %v, %v), total weight %v", len(verifiedResults), t.prop, t.round, t.period, t.step, totalWeight)
+ }
if len(verifiedResults) > 0 {
// wait until the persist state is flushed, as we don't want to send any vote unless we've completed flushing it to disk.
// at this point, the error was already logged.
@@ -477,15 +481,17 @@ func (t pseudonodeProposalsTask) execute(verifier *AsyncVoteVerifier, quit chan
ObjectRound: uint64(vote.R.Round),
ObjectPeriod: uint64(vote.R.Period),
}
- t.node.log.with(logEvent).Infof("pseudonode.makeProposals: proposal created for (%v, %v)", vote.R.Round, vote.R.Period)
- t.node.log.EventWithDetails(telemetryspec.Agreement, telemetryspec.BlockProposedEvent, telemetryspec.BlockProposedEventDetails{
- Hash: vote.R.Proposal.BlockDigest.String(),
- Address: vote.R.Sender.String(),
- Round: uint64(vote.R.Round),
- Period: uint64(vote.R.Period),
- })
- }
- t.node.log.Infof("pseudonode.makeProposals: %v proposals created for round %v, period %v", len(verifiedVotes), t.round, t.period)
+ t.node.log.with(logEvent).Infof("pseudonode.makeProposals: proposal created for (%d, %d)", vote.R.Round, vote.R.Period)
+ if t.node.log.GetTelemetryEnabled() {
+ t.node.log.EventWithDetails(telemetryspec.Agreement, telemetryspec.BlockProposedEvent, telemetryspec.BlockProposedEventDetails{
+ Hash: vote.R.Proposal.BlockDigest.String(),
+ Address: vote.R.Sender.String(),
+ Round: uint64(vote.R.Round),
+ Period: uint64(vote.R.Period),
+ })
+ }
+ }
+ t.node.log.Infof("pseudonode.makeProposals: %d proposals created for round %d, period %d", len(verifiedVotes), t.round, t.period)
for range verifiedVotes {
t.node.monitor.inc(pseudonodeCoserviceType)
diff --git a/agreement/pseudonode_test.go b/agreement/pseudonode_test.go
index 71645abac2..c8192c22ab 100644
--- a/agreement/pseudonode_test.go
+++ b/agreement/pseudonode_test.go
@@ -114,7 +114,7 @@ func compareEventChannels(t *testing.T, ch1, ch2 <-chan externalEvent) bool {
if !compareUnauthenticatedProposal(t, uo, up2) {
return false
}
- if !assert.Equal(t, protocol.Encode(uo), protocol.Encode(up2)) {
+ if !assert.Equal(t, protocol.Encode(&uo), protocol.Encode(&up2)) {
return false
}
if !assert.Equal(t, uo.Digest(), up2.Digest()) {
@@ -298,7 +298,7 @@ func (n serializedPseudonode) MakeProposals(ctx context.Context, r round, p peri
}
for i, proposal := range proposals {
- verifier.Verify(ctx, cryptoRequest{message: message{Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: proposal.u()}, Round: r})
+ verifier.VerifyProposal(ctx, cryptoProposalRequest{message: message{Tag: protocol.ProposalPayloadTag, UnauthenticatedProposal: proposal.u()}, Round: r})
select {
case cryptoResult, ok := <-verifier.Verified(protocol.ProposalPayloadTag):
if !ok {
diff --git a/agreement/selector.go b/agreement/selector.go
index 9d7c535c0d..f52bb58163 100644
--- a/agreement/selector.go
+++ b/agreement/selector.go
@@ -38,7 +38,7 @@ type selector struct {
// ToBeHashed implements the crypto.Hashable interface.
func (sel selector) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.AgreementSelector, protocol.Encode(sel)
+ return protocol.AgreementSelector, protocol.Encode(&sel)
}
// CommitteeSize returns the size of the committee, which is determined by
@@ -66,19 +66,19 @@ func membership(l LedgerReader, addr basics.Address, r basics.Round, p period, s
record, err := l.BalanceRecord(balanceRound, addr)
if err != nil {
- err = fmt.Errorf("Service.initializeVote (r=%v): Failed to obtain balance record for address %v in round %v: %v", r, addr, balanceRound, err)
+ err = fmt.Errorf("Service.initializeVote (r=%d): Failed to obtain balance record for address %v in round %d: %v", r, addr, balanceRound, err)
return
}
total, err := l.Circulation(balanceRound)
if err != nil {
- err = fmt.Errorf("Service.initializeVote (r=%v): Failed to obtain total circulation in round %v: %v", r, balanceRound, err)
+ err = fmt.Errorf("Service.initializeVote (r=%d): Failed to obtain total circulation in round %d: %v", r, balanceRound, err)
return
}
seed, err := l.Seed(seedRound)
if err != nil {
- err = fmt.Errorf("Service.initializeVote (r=%v): Failed to obtain seed in round %v: %v", r, seedRound, err)
+ err = fmt.Errorf("Service.initializeVote (r=%d): Failed to obtain seed in round %d: %v", r, seedRound, err)
return
}
diff --git a/agreement/service_test.go b/agreement/service_test.go
index 0de42e5005..85221d6ed4 100644
--- a/agreement/service_test.go
+++ b/agreement/service_test.go
@@ -152,6 +152,7 @@ type testingNetwork struct {
compoundPocket chan<- multicastParams
partitionedNodes map[nodeID]bool
crownedNodes map[nodeID]bool
+ relayNodes map[nodeID]bool
interceptFn multicastInterceptFn
}
@@ -219,7 +220,7 @@ func (n *testingNetwork) multicast(tag protocol.Tag, data []byte, source nodeID,
tag, data, source, exclude = out.tag, out.data, out.source, out.exclude
}
- if n.dropSoftVotes || n.dropSlowNextVotes || n.dropVotes || n.certVotePocket != nil || n.softVotePocket != nil || n.compoundPocket != nil || n.crownedNodes != nil {
+ if n.dropSoftVotes || n.dropSlowNextVotes || n.dropVotes || n.certVotePocket != nil || n.softVotePocket != nil || n.compoundPocket != nil {
if tag == protocol.ProposalPayloadTag {
r := bytes.NewBuffer(data)
@@ -300,13 +301,18 @@ func (n *testingNetwork) multicast(tag protocol.Tag, data []byte, source nodeID,
continue
}
if n.partitionedNodes != nil {
- if n.partitionedNodes[source] != n.partitionedNodes[nodeID(i)] {
+ if n.partitionedNodes[source] != n.partitionedNodes[peerid] {
continue
}
}
if n.crownedNodes != nil {
- if !n.crownedNodes[nodeID(i)] {
- return
+ if !n.crownedNodes[peerid] {
+ continue
+ }
+ }
+ if n.relayNodes != nil {
+ if !n.relayNodes[source] && !n.relayNodes[peerid] {
+ continue
}
}
@@ -377,6 +383,7 @@ func (n *testingNetwork) repairAll() {
n.compoundPocket = nil
n.partitionedNodes = nil
n.crownedNodes = nil
+ n.relayNodes = nil
n.interceptFn = nil
}
@@ -406,7 +413,17 @@ func (n *testingNetwork) crown(prophets ...nodeID) {
defer n.mu.Unlock()
n.crownedNodes = make(map[nodeID]bool)
for i := 0; i < len(prophets); i++ {
- n.crownedNodes[nodeID(i)] = true
+ n.crownedNodes[prophets[i]] = true
+ }
+}
+
+// Star topology with the given nodes at the center; to revert, call repairAll
+func (n *testingNetwork) makeRelays(relays ...nodeID) {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ n.relayNodes = make(map[nodeID]bool)
+ for i := 0; i < len(relays); i++ {
+ n.relayNodes[relays[i]] = true
}
}
@@ -443,16 +460,16 @@ func (n *testingNetwork) testingNetworkEndpoint(id nodeID) *testingNetworkEndpoi
func (n *testingNetwork) prepareAllMulticast() {
n.mu.Lock()
defer n.mu.Unlock()
- for i := 0; i < len(n.monitors); i++ {
- n.monitors[nodeID(i)].inc(networkCoserviceType)
+ for _, monitor := range n.monitors {
+ monitor.inc(networkCoserviceType)
}
}
func (n *testingNetwork) finishAllMulticast() {
n.mu.Lock()
defer n.mu.Unlock()
- for i := 0; i < len(n.monitors); i++ {
- n.monitors[nodeID(i)].dec(networkCoserviceType)
+ for _, monitor := range n.monitors {
+ monitor.dec(networkCoserviceType)
}
}
@@ -549,7 +566,7 @@ func (m *activityMonitor) waitForActivity() {
func (m *activityMonitor) waitForQuiet() {
select {
case <-m.quiet:
- case <-time.After(5 * time.Second):
+ case <-time.After(10 * time.Second):
m.dump()
var buf [1000000]byte
@@ -779,8 +796,8 @@ func setupAgreementWithValidator(t *testing.T, numNodes int, traceLevel traceLev
}
cleanupFn := func() {
- for _, accessor := range dbAccessors {
- defer accessor.Close()
+ for idx := 0; idx < len(dbAccessors); idx++ {
+ dbAccessors[idx].Close()
}
if r := recover(); r != nil {
@@ -1999,10 +2016,10 @@ func TestAgreementRegression_WrongPeriodPayloadVerificationCancellation_8ba23942
baseNetwork, baseLedger, cleanupFn, services, clocks, ledgers, activityMonitor := setupAgreementWithValidator(t, numNodes, disabled, validator, makeTestLedger)
startRound := baseLedger.NextRound()
defer cleanupFn()
+
for i := 0; i < numNodes; i++ {
services[i].Start()
}
-
activityMonitor.waitForActivity()
activityMonitor.waitForQuiet()
zeroes := expectNewPeriod(clocks, 0)
@@ -2101,7 +2118,7 @@ func TestAgreementRegression_WrongPeriodPayloadVerificationCancellation_8ba23942
}
// resume block verification, replay potentially cancelled blocks to ensure good caching
- // then wait for network to converge (round should terminate at this point)
+ // then wait for network to converge (round should terminate at this point)
activityMonitor.setCallback(nil)
close(ch)
@@ -2140,3 +2157,118 @@ func TestAgreementRegression_WrongPeriodPayloadVerificationCancellation_8ba23942
}
}
}
+
+// Receiving a certificate should not cause a node to stop relaying important messages
+// (such as blocks and pipelined messages for the next round)
+// Note that the stall will be resolved by catchup even if the relay blocks.
+func TestAgreementCertificateDoesNotStallSingleRelay(t *testing.T) {
+ numNodes := 5 // single relay, four leaf nodes
+ relayID := nodeID(0)
+ baseNetwork, baseLedger, cleanupFn, services, clocks, ledgers, activityMonitor := setupAgreement(t, numNodes, disabled, makeTestLedger)
+
+ startRound := baseLedger.NextRound()
+ defer cleanupFn()
+ for i := 0; i < numNodes; i++ {
+ services[i].Start()
+ }
+ activityMonitor.waitForActivity()
+ activityMonitor.waitForQuiet()
+ zeroes := expectNewPeriod(clocks, 0)
+ // run two rounds
+ zeroes = runRound(clocks, activityMonitor, zeroes)
+ // make sure relay does not see block proposal for round 3
+ baseNetwork.intercept(func(params multicastParams) multicastParams {
+ if params.tag == protocol.ProposalPayloadTag {
+ var tp transmittedPayload
+ err := protocol.DecodeStream(bytes.NewBuffer(params.data), &tp)
+ if err != nil {
+ panic(err)
+ }
+ if tp.Round() == basics.Round(startRound+2) {
+ params.exclude = relayID
+ }
+ }
+ if params.source == relayID {
+ // must also drop relay's proposal so it cannot win leadership
+ r := bytes.NewBuffer(params.data)
+ if params.tag == protocol.AgreementVoteTag {
+ var uv unauthenticatedVote
+ err := protocol.DecodeStream(r, &uv)
+ if err != nil {
+ panic(err)
+ }
+ if uv.R.Step != propose {
+ return params
+ }
+ }
+ params.tag = protocol.UnknownMsgTag
+ }
+
+ return params
+ })
+ zeroes = runRound(clocks, activityMonitor, zeroes)
+
+ // Round 3:
+ // First partition the relay to prevent it from seeing certificate or block
+ baseNetwork.repairAll()
+ baseNetwork.partition(relayID)
+ // Get a copy of the certificate
+ pocketCert := make(chan multicastParams, 100)
+ baseNetwork.intercept(func(params multicastParams) multicastParams {
+ if params.tag == protocol.AgreementVoteTag {
+ r := bytes.NewBuffer(params.data)
+ var uv unauthenticatedVote
+ err := protocol.DecodeStream(r, &uv)
+ if err != nil {
+ panic(err)
+ }
+ if uv.R.Step == cert {
+ pocketCert <- params
+ }
+ }
+ return params
+ })
+ // And with some hypothetical second relay the network achieves consensus on a certificate and block.
+ triggerGlobalTimeout(filterTimeout, clocks, activityMonitor)
+ zeroes = expectNewPeriod(clocks[1:], zeroes)
+ require.Equal(t, uint(3), clocks[0].(*testingClock).zeroes)
+ close(pocketCert)
+
+ // Round 4:
+ // Return to the relay topology
+ baseNetwork.repairAll()
+ baseNetwork.makeRelays(relayID)
+ // Trigger ensureDigest on the relay
+ baseNetwork.prepareAllMulticast()
+ for p := range pocketCert {
+ baseNetwork.multicast(p.tag, p.data, p.source, p.exclude)
+ }
+ baseNetwork.finishAllMulticast()
+ activityMonitor.waitForActivity()
+ activityMonitor.waitForQuiet()
+ // this relay must still relay initial messages. Note that payloads were already relayed with
+ // the previous global timeout.
+ triggerGlobalTimeout(filterTimeout, clocks[1:], activityMonitor)
+ zeroes = expectNewPeriod(clocks[1:], zeroes)
+ require.Equal(t, uint(3), clocks[0].(*testingClock).zeroes)
+
+ for i := 0; i < numNodes; i++ {
+ services[i].Shutdown()
+ }
+ const expectNumRounds = 4
+ for i := 1; i < numNodes; i++ {
+ if ledgers[i].NextRound() != startRound+round(expectNumRounds) {
+ panic("did not progress 4 rounds")
+ }
+ }
+ for j := 0; j < expectNumRounds; j++ {
+ ledger := ledgers[1].(*testLedger)
+ reference := ledger.entries[startRound+round(j)].Digest()
+ for i := 1; i < numNodes; i++ {
+ ledger := ledgers[i].(*testLedger)
+ if ledger.entries[startRound+round(j)].Digest() != reference {
+ panic("wrong block confirmed")
+ }
+ }
+ }
+}
diff --git a/agreement/vote.go b/agreement/vote.go
index 277f736845..e38c588e98 100644
--- a/agreement/vote.go
+++ b/agreement/vote.go
@@ -100,14 +100,14 @@ func (uv unauthenticatedVote) verify(l LedgerReader) (vote, error) {
}
// The following check could apply to all steps, but it's sufficient to only check in the propose step.
if rv.Proposal.OriginalPeriod > rv.Period {
- return vote{}, fmt.Errorf("unauthenticatedVote.verify: proposal-vote in period %v claims to repropose block from future period %v", rv.Period, rv.Proposal.OriginalPeriod)
+ return vote{}, fmt.Errorf("unauthenticatedVote.verify: proposal-vote in period %d claims to repropose block from future period %d", rv.Period, rv.Proposal.OriginalPeriod)
}
fallthrough
case soft:
fallthrough
case cert:
if rv.Proposal == bottom {
- return vote{}, fmt.Errorf("unauthenticatedVote.verify: votes from step %v cannot validate bottom", rv.Step)
+ return vote{}, fmt.Errorf("unauthenticatedVote.verify: votes from step %d cannot validate bottom", rv.Step)
}
}
@@ -156,18 +156,18 @@ func makeVote(rv rawVote, voting crypto.OneTimeSigner, selection *crypto.VRFSecr
switch rv.Step {
case propose, soft, cert, late, redo:
if rv.Proposal == bottom {
- logging.Base().Panicf("makeVote: votes from step %v cannot validate bottom", rv.Step)
+ logging.Base().Panicf("makeVote: votes from step %d cannot validate bottom", rv.Step)
}
case down:
if rv.Proposal != bottom {
- logging.Base().Panicf("makeVote: votes from step %v must validate bottom", rv.Step)
+ logging.Base().Panicf("makeVote: votes from step %d must validate bottom", rv.Step)
}
}
} else {
switch rv.Step {
case propose, soft, cert:
if rv.Proposal == bottom {
- logging.Base().Panicf("makeVote: votes from step %v cannot validate bottom", rv.Step)
+ logging.Base().Panicf("makeVote: votes from step %d cannot validate bottom", rv.Step)
}
}
}
@@ -184,7 +184,7 @@ func makeVote(rv rawVote, voting crypto.OneTimeSigner, selection *crypto.VRFSecr
// ToBeHashed implements the Hashable interface.
func (rv rawVote) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.Vote, protocol.Encode(rv)
+ return protocol.Vote, protocol.Encode(&rv)
}
func (v vote) u() unauthenticatedVote {
diff --git a/agreement/voteAggregator.go b/agreement/voteAggregator.go
index 1e4d309fb4..710bd5d346 100644
--- a/agreement/voteAggregator.go
+++ b/agreement/voteAggregator.go
@@ -197,7 +197,7 @@ func (agg *voteAggregator) filterVote(proto protocol.ConsensusVersion, p player,
switch filterRes.t() {
case voteFilteredStep:
// we'll rebuild the filtered event later
- return fmt.Errorf("voteAggregator: rejected vote: sender %v had already sent a vote in round %v period %v step %v", uv.R.Sender, uv.R.Round, uv.R.Period, uv.R.Step)
+ return fmt.Errorf("voteAggregator: rejected vote: sender %v had already sent a vote in round %d period %d step %d", uv.R.Sender, uv.R.Round, uv.R.Period, uv.R.Step)
case none:
return nil
}
@@ -205,8 +205,7 @@ func (agg *voteAggregator) filterVote(proto protocol.ConsensusVersion, p player,
panic("not reached")
}
-// filterBundle filters a bundle, checking if it is both fresh and is neither a
-// duplicate nor equivocates twice.
+// filterBundle filters a bundle, checking if it is fresh.
// TODO consider optimizing recovery by filtering bundles for some value if we
// have already seen the threshold met for that value. This will filter
// repeated bundles sent by honest peers.
@@ -234,10 +233,10 @@ func voteStepFresh(descr string, proto protocol.ConsensusVersion, mine, vote ste
}
if mine != 0 && mine-1 > vote {
- return fmt.Errorf("filtered stale vote %s: step %v - 1 > %v", descr, mine, vote)
+ return fmt.Errorf("filtered stale vote %s: step %d - 1 > %d", descr, mine, vote)
}
if mine+1 < vote {
- return fmt.Errorf("filtered premature vote %s: step %v + 1 < %v", descr, mine, vote)
+ return fmt.Errorf("filtered premature vote %s: step %d + 1 < %d", descr, mine, vote)
}
return nil
@@ -276,11 +275,15 @@ func voteFresh(proto protocol.ConsensusVersion, freshData freshnessData, vote un
// bundleFresh determines whether a bundle satisfies freshness rules.
func bundleFresh(freshData freshnessData, b unauthenticatedBundle) error {
if freshData.PlayerRound != b.Round {
- return fmt.Errorf("filtered bundle from different round: round %v != %v", freshData.PlayerRound, b.Round)
+ return fmt.Errorf("filtered bundle from different round: round %d != %d", freshData.PlayerRound, b.Round)
+ }
+
+ if b.Step == cert {
+ return nil
}
if freshData.PlayerPeriod != 0 && freshData.PlayerPeriod-1 > b.Period {
- return fmt.Errorf("filtered stale bundle: period %v >= %v", freshData.PlayerPeriod, b.Period)
+ return fmt.Errorf("filtered stale bundle: period %d >= %d", freshData.PlayerPeriod, b.Period)
}
return nil
diff --git a/agreement/voteAuxiliary.go b/agreement/voteAuxiliary.go
index c224b815c5..7fd64ec486 100644
--- a/agreement/voteAuxiliary.go
+++ b/agreement/voteAuxiliary.go
@@ -39,7 +39,7 @@ func (t *voteTrackerPeriod) underlying() listener {
// A voteTrackerPeriod handles:
// - voteAcceptedEvent, which it forwards to the vote tracker. This generates either
// a threshold event or an empty event (forwarded to the sender)
-// - nextThresholds: It updates its next threshodl cache if this
+// - nextThresholds: It updates its next threshold cache if this
// is a new next vote bundle for this period. Emits empty event. (We split this out
// so that we can unit test the voteTrackerPeriod trace without depending on the
// voteTrackerStep.)
@@ -92,7 +92,8 @@ func (t *voteTrackerPeriod) handle(r routerHandle, p player, e event) event {
//
// Bundle "freshness" is an ordering relation defined on thresholdEvents. The
// relation is defined as follows:
-// - thresholdEvents are fresher than thresholdEvents from older periods.
+// - certThresholds are fresher than other kinds of thresholdEvent.
+// - other thresholdEvents are fresher than thresholdEvents from older periods.
// - nextThresholds are fresher than softThreshold in the same period.
// - nextThresholds for the bottom proposal-value are fresher than
// nextThresholds for another proposal-value.
diff --git a/agreement/voteTracker.go b/agreement/voteTracker.go
index 5aba4c9509..9d27feb14b 100644
--- a/agreement/voteTracker.go
+++ b/agreement/voteTracker.go
@@ -183,7 +183,7 @@ func (tracker *voteTracker) handle(r routerHandle, p player, e0 event) event {
// In order for this to be triggered, more than 75% of the vote for the given step need to vote for more than
// a single proposal. In that state, all the proposals become "above threshold". That's a serious issue, since
// it would compromise the honest node core assumption.
- logging.Base().Panicf("too many equivocators for step %v: %v", e.Vote.R.Step, tracker.EquivocatorsCount)
+ logging.Base().Panicf("too many equivocators for step %d: %d", e.Vote.R.Step, tracker.EquivocatorsCount)
}
// decrease their weight from any block proposal they already
diff --git a/agreement/voteTrackerContract.go b/agreement/voteTrackerContract.go
index 9555be8058..3df30cde48 100644
--- a/agreement/voteTrackerContract.go
+++ b/agreement/voteTrackerContract.go
@@ -61,7 +61,7 @@ func (c *voteTrackerContract) pre(p player, in0 event) (pre []error) {
c.Step = in.Vote.R.Step
} else {
if c.Step != in.Vote.R.Step {
- pre = append(pre, fmt.Errorf("incoming event has step %v but expected step %v", in.Vote.R.Step, c.Step))
+ pre = append(pre, fmt.Errorf("incoming event has step %d but expected step %d", in.Vote.R.Step, c.Step))
}
}
return
@@ -82,15 +82,15 @@ func (c *voteTrackerContract) post(p player, in0, out0 event) (post []error) {
case none:
case softThreshold:
if in.Vote.R.Step != soft {
- post = append(post, fmt.Errorf("incoming event has step %v but outgoing event has type softThreshold", in.Vote.R.Step))
+ post = append(post, fmt.Errorf("incoming event has step %d but outgoing event has type softThreshold", in.Vote.R.Step))
}
case certThreshold:
if in.Vote.R.Step != cert {
- post = append(post, fmt.Errorf("incoming event has step %v but outgoing event has type certThreshold", in.Vote.R.Step))
+ post = append(post, fmt.Errorf("incoming event has step %d but outgoing event has type certThreshold", in.Vote.R.Step))
}
case nextThreshold:
if in.Vote.R.Step <= cert {
- post = append(post, fmt.Errorf("incoming event has step %v but outgoing event has type nextThreshold", in.Vote.R.Step))
+ post = append(post, fmt.Errorf("incoming event has step %d but outgoing event has type nextThreshold", in.Vote.R.Step))
}
default:
post = append(post, fmt.Errorf("outgoing event has invalid type: %v", out0.t()))
@@ -115,10 +115,10 @@ func (c *voteTrackerContract) post(p player, in0, out0 event) (post []error) {
emptyBundle := len(out.Bundle.Votes) == 0
if (out.T == none) != emptyBundle {
- post = append(post, fmt.Errorf("out.T must be none if and only if out.Bundle is empty, but out.T = %v while len(out.Bundle.Votes) = %v", out.T, len(out.Bundle.Votes)))
+ post = append(post, fmt.Errorf("out.T must be none if and only if out.Bundle is empty, but out.T = %v while len(out.Bundle.Votes) = %d", out.T, len(out.Bundle.Votes)))
}
if out.T != none && out.Proposal == bottom && out.Step < next {
- post = append(post, fmt.Errorf("outgoing event has bottom proposal but step %v", out.Step))
+ post = append(post, fmt.Errorf("outgoing event has bottom proposal but step %d", out.Step))
}
return
case voteFilterRequest:
diff --git a/auction/msgp_gen.go b/auction/msgp_gen.go
index f20489843e..2c8afc131a 100644
--- a/auction/msgp_gen.go
+++ b/auction/msgp_gen.go
@@ -2245,6 +2245,9 @@ func (z NoteFieldType) MarshalMsg(b []byte) (o []byte, err error) {
func (_ NoteFieldType) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(NoteFieldType)
+ if !ok {
+ _, ok = (z).(*NoteFieldType)
+ }
return ok
}
diff --git a/buildnumber.dat b/buildnumber.dat
index b4de394767..48082f72f0 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-11
+12
diff --git a/rpcs/fetcher.go b/catchup/fetcher.go
similarity index 95%
rename from rpcs/fetcher.go
rename to catchup/fetcher.go
index 56b30da5d4..15e4b009a0 100644
--- a/rpcs/fetcher.go
+++ b/catchup/fetcher.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see .
-package rpcs
+package catchup
import (
"context"
@@ -31,6 +31,7 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
)
// DefaultFetchTimeout is the default time a fetcher should wait for a block
@@ -61,9 +62,9 @@ type FetcherFactory interface {
// NetworkFetcherFactory creates network fetchers
type NetworkFetcherFactory struct {
- net PeerSource
+ net network.GossipNode
peerLimit int
- fs *WsFetcherService
+ fs *rpcs.WsFetcherService
log logging.Logger
}
@@ -71,7 +72,7 @@ type NetworkFetcherFactory struct {
func (factory NetworkFetcherFactory) makeHTTPFetcherFromPeer(log logging.Logger, peer network.Peer) FetcherClient {
hp, ok := peer.(network.HTTPPeer)
if ok {
- return MakeHTTPFetcher(log, hp)
+ return MakeHTTPFetcher(log, hp, factory.net)
}
log.Errorf("%T %#v is not HTTPPeer", peer, peer)
return nil
@@ -79,7 +80,7 @@ func (factory NetworkFetcherFactory) makeHTTPFetcherFromPeer(log logging.Logger,
// MakeNetworkFetcherFactory returns a network fetcher factory, that associates fetchers with no more than peerLimit peers from the aggregator.
// WSClientSource can be nil, if no network exists to create clients from (defaults to http clients)
-func MakeNetworkFetcherFactory(net PeerSource, peerLimit int, fs *WsFetcherService) NetworkFetcherFactory {
+func MakeNetworkFetcherFactory(net network.GossipNode, peerLimit int, fs *rpcs.WsFetcherService) NetworkFetcherFactory {
var factory NetworkFetcherFactory
factory.net = net
factory.peerLimit = peerLimit
@@ -116,7 +117,9 @@ func (factory NetworkFetcherFactory) New() Fetcher {
}
}
-// NewOverGossip returns a gossip fetcher using the given message tag.
+// NewOverGossip returns a fetcher using the given message tag.
+// If there are gossip peers, then it returns a fetcher over gossip
+// Otherwise, it returns an HTTP fetcher
// We should never build two fetchers utilising the same tag. Why?
func (factory NetworkFetcherFactory) NewOverGossip(tag protocol.Tag) Fetcher {
gossipPeers := factory.net.GetPeers(network.PeersConnectedIn)
@@ -295,7 +298,7 @@ func (cf *ComposedFetcher) Close() {
/* Utils */
func processBlockBytes(fetchedBuf []byte, r basics.Round, debugStr string) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) {
- var decodedEntry EncodedBlockCert
+ var decodedEntry rpcs.EncodedBlockCert
err = protocol.Decode(fetchedBuf, &decodedEntry)
if err != nil {
err = fmt.Errorf("networkFetcher.FetchBlock(%d): cannot decode block from peer %v: %v", r, debugStr, err)
diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go
new file mode 100644
index 0000000000..ba153e9911
--- /dev/null
+++ b/catchup/fetcher_test.go
@@ -0,0 +1,907 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package catchup
+
+import (
+ "context"
+ "errors"
+ "net"
+ "net/http"
+ "net/rpc"
+ "net/url"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/gorilla/mux"
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/components/mocks"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
+ "github.com/algorand/go-algorand/util/bloom"
+)
+
+type mockRunner struct {
+ ran bool
+ done chan *rpc.Call
+ failWithNil bool
+ failWithError bool
+ txgroups [][]transactions.SignedTxn
+}
+
+type mockRPCClient struct {
+ client *mockRunner
+ closed bool
+ rootURL string
+ log logging.Logger
+}
+
+func (client *mockRPCClient) Close() error {
+ client.closed = true
+ return nil
+}
+
+func (client *mockRPCClient) Address() string {
+ return "mock.address."
+}
+func (client *mockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups [][]transactions.SignedTxn, err error) {
+ client.log.Info("MockRPCClient.Sync")
+ select {
+ case <-ctx.Done():
+ return nil, errors.New("cancelled")
+ default:
+ }
+ if client.client.failWithNil {
+ return nil, errors.New("old failWithNil")
+ }
+ if client.client.failWithError {
+ return nil, errors.New("failing call")
+ }
+ return client.client.txgroups, nil
+}
+func (client *mockRPCClient) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
+ return nil, nil
+}
+
+// network.HTTPPeer interface
+func (client *mockRPCClient) GetAddress() string {
+ return client.rootURL
+}
+func (client *mockRPCClient) GetHTTPClient() *http.Client {
+ return nil
+}
+func (client *mockRPCClient) PrepareURL(x string) string {
+ return strings.Replace(x, "{genesisID}", "test genesisID", -1)
+}
+
+type mockClientAggregator struct {
+ mocks.MockNetwork
+ peers []network.Peer
+}
+
+func (mca *mockClientAggregator) GetPeers(options ...network.PeerOption) []network.Peer {
+ return mca.peers
+}
+
+const numberOfPeers = 10
+
+func makeMockClientAggregator(t *testing.T, failWithNil bool, failWithError bool) *mockClientAggregator {
+ clients := make([]network.Peer, 0)
+ for i := 0; i < numberOfPeers; i++ {
+ runner := mockRunner{failWithNil: failWithNil, failWithError: failWithError, done: make(chan *rpc.Call)}
+ clients = append(clients, &mockRPCClient{client: &runner, log: logging.TestingLog(t)})
+ }
+ t.Logf("len(mca.clients) = %d", len(clients))
+ return &mockClientAggregator{peers: clients}
+}
+
+func getAllClientsSelectedForRound(t *testing.T, fetcher *NetworkFetcher, round basics.Round) map[FetcherClient]basics.Round {
+ selected := make(map[FetcherClient]basics.Round, 0)
+ for i := 0; i < 1000; i++ {
+ c, err := fetcher.selectClient(round)
+ if err != nil {
+ return selected
+ }
+ selected[c.(FetcherClient)] = fetcher.roundUpperBound[c]
+ }
+ return selected
+}
+
+func TestSelectValidRemote(t *testing.T) {
+ network := makeMockClientAggregator(t, false, false)
+ factory := MakeNetworkFetcherFactory(network, numberOfPeers, nil)
+ factory.log = logging.TestingLog(t)
+ fetcher := factory.New()
+ require.Equal(t, numberOfPeers, len(fetcher.(*NetworkFetcher).peers))
+
+ var oldClient FetcherClient
+ var newClient FetcherClient
+ i := 0
+ for _, client := range fetcher.(*NetworkFetcher).peers {
+ if i == 0 {
+ oldClient = client
+ r := basics.Round(2)
+ fetcher.(*NetworkFetcher).roundUpperBound[client] = r
+ } else if i == 1 {
+ newClient = client
+ r := basics.Round(4)
+ fetcher.(*NetworkFetcher).roundUpperBound[client] = r
+ } else if i > 2 {
+ r := basics.Round(3)
+ fetcher.(*NetworkFetcher).roundUpperBound[client] = r
+ } // skip i == 2
+ i++
+ }
+
+ require.Equal(t, numberOfPeers, len(fetcher.(*NetworkFetcher).availablePeers(1)))
+ selected := getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 1)
+ require.Equal(t, numberOfPeers, len(selected))
+ _, hasOld := selected[oldClient]
+ require.True(t, hasOld)
+
+ _, hasNew := selected[newClient]
+ require.True(t, hasNew)
+
+ require.Equal(t, numberOfPeers-1, len(fetcher.(*NetworkFetcher).availablePeers(2)))
+ selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 2)
+ require.Equal(t, numberOfPeers-1, len(selected))
+ _, hasOld = selected[oldClient]
+ require.False(t, hasOld)
+ _, hasNew = selected[newClient]
+ require.True(t, hasNew)
+
+ require.Equal(t, 2, len(fetcher.(*NetworkFetcher).availablePeers(3)))
+ selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 3)
+ require.Equal(t, 2, len(selected))
+ _, hasOld = selected[oldClient]
+ require.False(t, hasOld)
+ _, hasNew = selected[newClient]
+ require.True(t, hasNew)
+
+ require.Equal(t, 1, len(fetcher.(*NetworkFetcher).availablePeers(4)))
+ selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 4)
+ require.Equal(t, 1, len(selected))
+ _, hasOld = selected[oldClient]
+ require.False(t, hasOld)
+ _, hasNew = selected[newClient]
+ require.False(t, hasNew)
+}
+
+type dummyFetcher struct {
+ failWithNil bool
+ failWithError bool
+ fetchTimeout time.Duration
+}
+
+// FetcherClient interface
+func (df *dummyFetcher) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
+ if df.failWithNil {
+ return nil, nil
+ }
+ if df.failWithError {
+ return nil, errors.New("failing call")
+ }
+
+ timer := time.NewTimer(df.fetchTimeout)
+ defer timer.Stop()
+
+ // Fill in the dummy response with the correct round
+ dummyBlock := rpcs.EncodedBlockCert{
+ Block: bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: r,
+ },
+ },
+ Certificate: agreement.Certificate{
+ Round: r,
+ },
+ }
+
+ encodedData := protocol.Encode(&dummyBlock)
+
+ select {
+ case <-timer.C:
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+
+ return encodedData, nil
+}
+
+// FetcherClient interface
+func (df *dummyFetcher) Address() string {
+ //logging.Base().Debug("dummyFetcher Address")
+ return "dummyFetcher address"
+}
+
+// FetcherClient interface
+func (df *dummyFetcher) Close() error {
+ //logging.Base().Debug("dummyFetcher Close")
+ return nil
+}
+
+func makeDummyFetchers(failWithNil bool, failWithError bool, timeout time.Duration) []FetcherClient {
+ out := make([]FetcherClient, numberOfPeers)
+ for i := range out {
+ out[i] = &dummyFetcher{failWithNil, failWithError, timeout}
+ }
+ return out
+}
+
+func TestFetchBlock(t *testing.T) {
+ fetcher := &NetworkFetcher{
+ roundUpperBound: make(map[FetcherClient]basics.Round),
+ activeFetches: make(map[FetcherClient]int),
+ peers: makeDummyFetchers(false, false, 100*time.Millisecond),
+ log: logging.TestingLog(t),
+ }
+
+ var err error
+ var block *bookkeeping.Block
+ var cert *agreement.Certificate
+ var client FetcherClient
+
+ fetched := false
+ for i := 0; i < numberOfPeers; i++ {
+ start := time.Now()
+ block, cert, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
+ require.NoError(t, err)
+ require.NotNil(t, client)
+ end := time.Now()
+ require.True(t, end.Sub(start) > 100*time.Millisecond)
+ require.True(t, end.Sub(start) < 100*time.Millisecond+5*time.Second) // we want to have a higher margin here, as the machine we're running on might be slow.
+ if err == nil {
+ require.NotEqual(t, nil, block)
+ require.NotEqual(t, nil, cert)
+ _, _, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
+ require.NotNil(t, client)
+ require.NoError(t, err)
+ fetched = true
+ }
+ }
+ require.True(t, fetched)
+}
+
+func TestFetchBlockFail(t *testing.T) {
+ fetcher := &NetworkFetcher{
+ roundUpperBound: make(map[FetcherClient]basics.Round),
+ activeFetches: make(map[FetcherClient]int),
+ peers: makeDummyFetchers(true, false, 100*time.Millisecond),
+ log: logging.TestingLog(t),
+ }
+
+ for i := 0; i < numberOfPeers; i++ {
+ require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
+ _, _, _, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
+ require.Error(t, err)
+ }
+ require.True(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
+}
+
+func TestFetchBlockAborted(t *testing.T) {
+ fetcher := &NetworkFetcher{
+ roundUpperBound: make(map[FetcherClient]basics.Round),
+ activeFetches: make(map[FetcherClient]int),
+ peers: makeDummyFetchers(false, false, 2*time.Second),
+ log: logging.TestingLog(t),
+ }
+
+ ctx, cf := context.WithCancel(context.Background())
+ defer cf()
+ go func() {
+ cf()
+ }()
+ start := time.Now()
+ _, _, client, err := fetcher.FetchBlock(ctx, basics.Round(1))
+ end := time.Now()
+ require.True(t, strings.Contains(err.Error(), context.Canceled.Error()))
+ require.Nil(t, client)
+ require.True(t, end.Sub(start) < 10*time.Second)
+}
+
+func TestFetchBlockTimeout(t *testing.T) {
+ fetcher := &NetworkFetcher{
+ roundUpperBound: make(map[FetcherClient]basics.Round),
+ activeFetches: make(map[FetcherClient]int),
+ peers: makeDummyFetchers(false, false, 10*time.Second),
+ log: logging.TestingLog(t),
+ }
+ start := time.Now()
+ ctx, cf := context.WithTimeout(context.Background(), 500*time.Millisecond)
+ defer cf()
+ _, _, client, err := fetcher.FetchBlock(ctx, basics.Round(1))
+ end := time.Now()
+ require.True(t, strings.Contains(err.Error(), context.DeadlineExceeded.Error()))
+ require.Nil(t, client)
+ require.True(t, end.Sub(start) >= 500*time.Millisecond)
+ require.True(t, end.Sub(start) < 10*time.Second)
+}
+
+func TestFetchBlockErrorCall(t *testing.T) {
+ fetcher := &NetworkFetcher{
+ roundUpperBound: make(map[FetcherClient]basics.Round),
+ activeFetches: make(map[FetcherClient]int),
+ peers: makeDummyFetchers(false, true, 10*time.Millisecond),
+ log: logging.TestingLog(t),
+ }
+
+ require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
+ _, _, client, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
+ require.Error(t, err)
+ require.Nil(t, client)
+}
+
+func TestFetchBlockComposedNoOp(t *testing.T) {
+ f := &NetworkFetcher{
+ roundUpperBound: make(map[FetcherClient]basics.Round),
+ activeFetches: make(map[FetcherClient]int),
+ peers: makeDummyFetchers(false, false, 1*time.Millisecond),
+ log: logging.TestingLog(t),
+ }
+ fetcher := &ComposedFetcher{fetchers: []Fetcher{f, nil}}
+
+ var err error
+ var block *bookkeeping.Block
+ var cert *agreement.Certificate
+ var client FetcherClient
+
+ fetched := false
+ for i := 0; i < numberOfPeers; i++ {
+ start := time.Now()
+ block, cert, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
+ require.NoError(t, err)
+ require.NotNil(t, client)
+ end := time.Now()
+ require.True(t, end.Sub(start) >= 1*time.Millisecond)
+ require.True(t, end.Sub(start) < 1*time.Millisecond+10*time.Second) // we take a very high margin here for the fetcher to complete.
+ if err == nil {
+ require.NotEqual(t, nil, block)
+ require.NotEqual(t, nil, cert)
+ _, _, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
+ require.NotNil(t, client)
+ require.NoError(t, err)
+ fetched = true
+ }
+ }
+ require.True(t, fetched)
+}
+
+// Make sure composed fetchers are hit in priority order
+func TestFetchBlockComposedFail(t *testing.T) {
+ f := &NetworkFetcher{
+ roundUpperBound: make(map[FetcherClient]basics.Round),
+ activeFetches: make(map[FetcherClient]int),
+ peers: makeDummyFetchers(true, false, 1*time.Millisecond),
+ log: logging.TestingLog(t),
+ }
+ f2 := &NetworkFetcher{
+ roundUpperBound: make(map[FetcherClient]basics.Round),
+ activeFetches: make(map[FetcherClient]int),
+ peers: makeDummyFetchers(false, false, 1*time.Millisecond),
+ log: logging.TestingLog(t),
+ }
+ fetcher := &ComposedFetcher{fetchers: []Fetcher{f, f2}}
+
+ for i := 0; i < numberOfPeers; i++ {
+ require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
+ _, _, _, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
+ require.Error(t, err)
+ }
+ require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
+ for i := 0; i < numberOfPeers; i++ {
+ require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
+ _, _, client, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
+ require.NotNil(t, client)
+ require.NoError(t, err)
+ }
+}
+
+func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bookkeeping.Block, err error) {
+ var user basics.Address
+ user[0] = 123
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ genesis := make(map[basics.Address]basics.AccountData)
+ genesis[user] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ }
+ genesis[sinkAddr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ }
+ genesis[poolAddr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ }
+
+ log := logging.TestingLog(t)
+ genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ genHash := crypto.Digest{0x42}
+ const inMem = true
+ const archival = true
+ ledger, err = data.LoadLedger(
+ log, t.Name(), inMem, protocol.ConsensusCurrentVersion, genBal, "", genHash,
+ nil, archival,
+ )
+ if err != nil {
+ t.Fatal("couldn't build ledger", err)
+ return
+ }
+ next = ledger.NextRound()
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: user,
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee},
+ FirstValid: next,
+ LastValid: next,
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: user,
+ Amount: basics.MicroAlgos{Raw: 2},
+ },
+ }
+ signedtx := transactions.SignedTxn{
+ Txn: tx,
+ }
+
+ prev, err := ledger.Block(ledger.LastRound())
+ require.NoError(t, err)
+ b.RewardsLevel = prev.RewardsLevel
+ b.BlockHeader.Round = next
+ b.BlockHeader.GenesisHash = genHash
+ b.CurrentProtocol = protocol.ConsensusCurrentVersion
+ txib, err := b.EncodeSignedTxn(signedtx, transactions.ApplyData{})
+ require.NoError(t, err)
+ b.Payset = []transactions.SignedTxnInBlock{
+ txib,
+ }
+
+ require.NoError(t, ledger.AddBlock(b, agreement.Certificate{Round: next}))
+ return
+}
+
+type basicRPCNode struct {
+ listener net.Listener
+ server http.Server
+ rmux *mux.Router
+ peers []network.Peer
+ mocks.MockNetwork
+}
+
+func (b *basicRPCNode) RegisterHTTPHandler(path string, handler http.Handler) {
+ if b.rmux == nil {
+ b.rmux = mux.NewRouter()
+ }
+ b.rmux.Handle(path, handler)
+}
+
+func (b *basicRPCNode) RegisterHandlers(dispatch []network.TaggedMessageHandler) {
+}
+
+func (b *basicRPCNode) start() bool {
+ var err error
+ b.listener, err = net.Listen("tcp", "")
+ if err != nil {
+ logging.Base().Error("tcp listen", err)
+ return false
+ }
+ if b.rmux == nil {
+ b.rmux = mux.NewRouter()
+ }
+ b.server.Handler = b.rmux
+ go b.server.Serve(b.listener)
+ return true
+}
+func (b *basicRPCNode) rootURL() string {
+ addr := b.listener.Addr().String()
+ rootURL := url.URL{Scheme: "http", Host: addr, Path: ""}
+ return rootURL.String()
+}
+
+func (b *basicRPCNode) stop() {
+ b.server.Close()
+}
+
+func (b *basicRPCNode) GetPeers(options ...network.PeerOption) []network.Peer {
+ return b.peers
+}
+
+type httpTestPeerSource struct {
+ peers []network.Peer
+ mocks.MockNetwork
+ dispatchHandlers []network.TaggedMessageHandler
+}
+
+func (s *httpTestPeerSource) GetPeers(options ...network.PeerOption) []network.Peer {
+ return s.peers
+}
+
+func (s *httpTestPeerSource) RegisterHandlers(dispatch []network.TaggedMessageHandler) {
+ s.dispatchHandlers = append(s.dispatchHandlers, dispatch...)
+}
+
+// implement network.HTTPPeer
+type testHTTPPeer string
+
+func (p *testHTTPPeer) GetAddress() string {
+ return string(*p)
+}
+func (p *testHTTPPeer) PrepareURL(x string) string {
+ return strings.Replace(x, "{genesisID}", "test genesisID", -1)
+}
+func (p *testHTTPPeer) GetHTTPClient() *http.Client {
+ return &http.Client{}
+}
+func (p *testHTTPPeer) GetHTTPPeer() network.HTTPPeer {
+ return p
+}
+
+func buildTestHTTPPeerSource(rootURLs ...string) *httpTestPeerSource {
+ peers := []network.Peer{}
+ for url := range rootURLs {
+ peer := testHTTPPeer(url)
+ peers = append(peers, &peer)
+ }
+ return &httpTestPeerSource{peers: peers}
+}
+func (s *httpTestPeerSource) addPeer(rootURL string) {
+ peer := testHTTPPeer(rootURL)
+ s.peers = append(s.peers, &peer)
+}
+
+// Build a ledger with genesis and one block, start an HTTPServer around it, use NetworkFetcher to fetch the block.
+// For smaller test, see ledgerService_test.go TestGetBlockHTTP
+// todo - fix this one
+func TestGetBlockHTTP(t *testing.T) {
+ // start server
+ ledger, next, b, err := buildTestLedger(t)
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ net := buildTestHTTPPeerSource()
+ ls := rpcs.RegisterLedgerService(config.GetDefaultLocal(), ledger, net, "test genesisID")
+
+ nodeA := basicRPCNode{}
+ nodeA.RegisterHTTPHandler(rpcs.LedgerServiceBlockPath, ls)
+ nodeA.start()
+ defer nodeA.stop()
+ rootURL := nodeA.rootURL()
+
+ // run fetcher
+ net.addPeer(rootURL)
+ _, ok := net.GetPeers(network.PeersConnectedOut)[0].(network.HTTPPeer)
+ require.True(t, ok)
+ factory := MakeNetworkFetcherFactory(net, numberOfPeers, nil)
+ factory.log = logging.TestingLog(t)
+ fetcher := factory.New()
+ // we have one peer, the HTTP block server
+ require.Equal(t, len(fetcher.(*NetworkFetcher).peers), 1)
+
+ var block *bookkeeping.Block
+ var cert *agreement.Certificate
+ var client FetcherClient
+
+ start := time.Now()
+ block, cert, client, err = fetcher.FetchBlock(context.Background(), next)
+ end := time.Now()
+ require.NotNil(t, client)
+ require.NoError(t, err)
+
+ require.True(t, end.Sub(start) < 10*time.Second)
+ require.Equal(t, &b, block)
+ if err == nil {
+ require.NotEqual(t, nil, block)
+ require.NotEqual(t, nil, cert)
+ }
+}
+
+func nodePair() (*basicRPCNode, *basicRPCNode) {
+ nodeA := &basicRPCNode{}
+ nodeA.start()
+ nodeB := &basicRPCNode{}
+ nodeB.start()
+ httpPeerA := testHTTPPeer(nodeA.rootURL())
+ httpPeerB := testHTTPPeer(nodeB.rootURL())
+ nodeB.peers = []network.Peer{&httpPeerA}
+ nodeA.peers = []network.Peer{&httpPeerB}
+ return nodeA, nodeB
+}
+
+func TestGetBlockMocked(t *testing.T) {
+ var user basics.Address
+ user[0] = 123
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ genesis := make(map[basics.Address]basics.AccountData)
+ genesis[user] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ }
+ genesis[sinkAddr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ }
+ genesis[poolAddr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ }
+
+ log := logging.TestingLog(t)
+ // A network with two nodes, A and B
+ nodeA, nodeB := nodePair()
+ defer nodeA.stop()
+ defer nodeB.stop()
+
+ // A is running the ledger service and will respond to fetch requests
+ genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ const inMem = true
+ const archival = true
+ ledgerA, err := data.LoadLedger(
+ log.With("name", "A"), t.Name(), inMem,
+ protocol.ConsensusCurrentVersion, genBal, "", crypto.Digest{},
+ nil, archival,
+ )
+ if err != nil {
+ t.Errorf("Couldn't make ledger: %v", err)
+ }
+ rpcs.RegisterLedgerService(config.GetDefaultLocal(), ledgerA, nodeA, "test genesisID")
+
+ next := ledgerA.NextRound()
+ genHash := crypto.Digest{0x42}
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: user,
+ Fee: basics.MicroAlgos{Raw: proto.MinTxnFee},
+ FirstValid: next,
+ LastValid: next,
+ GenesisHash: genHash,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: user,
+ Amount: basics.MicroAlgos{Raw: 2},
+ },
+ }
+ signedtx := transactions.SignedTxn{
+ Txn: tx,
+ }
+
+ var b bookkeeping.Block
+ prev, err := ledgerA.Block(ledgerA.LastRound())
+ require.NoError(t, err)
+ b.RewardsLevel = prev.RewardsLevel
+ b.BlockHeader.Round = next
+ b.BlockHeader.GenesisHash = genHash
+ b.CurrentProtocol = protocol.ConsensusCurrentVersion
+ txib, err := b.EncodeSignedTxn(signedtx, transactions.ApplyData{})
+ require.NoError(t, err)
+ b.Payset = []transactions.SignedTxnInBlock{
+ txib,
+ }
+ require.NoError(t, ledgerA.AddBlock(b, agreement.Certificate{Round: next}))
+
+ // B tries to fetch block
+ factory := MakeNetworkFetcherFactory(nodeB, 10, nil)
+ factory.log = logging.TestingLog(t)
+ nodeBRPC := factory.New()
+ ctx, cf := context.WithTimeout(context.Background(), time.Second)
+ defer cf()
+ eblock, _, _, err := nodeBRPC.FetchBlock(ctx, next)
+ if err != nil {
+ require.Failf(t, "Error fetching block", "%v", err)
+ }
+ block, err := ledgerA.Block(next)
+ require.NoError(t, err)
+ if eblock.Hash() != block.Hash() {
+ t.Errorf("FetchBlock returned wrong block: expected %v; got %v", block.Hash(), eblock)
+ }
+}
+
+func TestGetFutureBlock(t *testing.T) {
+ log := logging.TestingLog(t)
+ // A network with two nodes, A and B
+ nodeA, nodeB := nodePair()
+ defer nodeA.stop()
+ defer nodeB.stop()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ genesis := make(map[basics.Address]basics.AccountData)
+ genesis[sinkAddr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ }
+ genesis[poolAddr] = basics.AccountData{
+ Status: basics.Online,
+ MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ }
+
+ gen := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
+ // A is running the ledger service and will respond to fetch requests
+ const inMem = true
+ const archival = true
+ ledgerA, err := data.LoadLedger(
+ log.With("name", "A"), t.Name(), inMem,
+ protocol.ConsensusCurrentVersion, gen, "", crypto.Digest{},
+ nil, archival,
+ )
+ if err != nil {
+ t.Errorf("Couldn't make ledger: %v", err)
+ }
+ rpcs.RegisterLedgerService(config.GetDefaultLocal(), ledgerA, nodeA, "test genesisID")
+
+ // B tries to fetch block 4
+ factory := MakeNetworkFetcherFactory(nodeB, 10, nil)
+ factory.log = logging.TestingLog(t)
+ nodeBRPC := factory.New()
+ ctx, cf := context.WithTimeout(context.Background(), time.Second)
+ defer cf()
+ _, _, client, err := nodeBRPC.FetchBlock(ctx, ledgerA.NextRound())
+ require.Error(t, err)
+ require.Nil(t, client)
+}
+
+// implement network.UnicastPeer
+type testUnicastPeer struct {
+ gn network.GossipNode
+ version string
+ responseChannels map[uint64]chan *network.Response
+ t *testing.T
+}
+
+func (p *testUnicastPeer) GetAddress() string {
+ return "test"
+}
+
+func (p *testUnicastPeer) Request(ctx context.Context, tag protocol.Tag, topics network.Topics) (resp *network.Response, e error) {
+
+ responseChannel := make(chan *network.Response, 1)
+ p.responseChannels[0] = responseChannel
+
+ ps := p.gn.(*httpTestPeerSource)
+ var dispather network.MessageHandler
+ for _, v := range ps.dispatchHandlers {
+ if v.Tag == tag {
+ dispather = v.MessageHandler
+ break
+ }
+ }
+ require.NotNil(p.t, dispather)
+ dispather.Handle(network.IncomingMessage{Tag: tag, Data: topics.MarshallTopics(), Sender: p, Net: p.gn})
+
+ // wait for the channel.
+ select {
+ case resp = <-responseChannel:
+ return resp, nil
+ case <-ctx.Done():
+ return resp, ctx.Err()
+ }
+}
+
+func (p *testUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMessage, responseTopics network.Topics) (e error) {
+
+ hashKey := uint64(0)
+ channel, found := p.responseChannels[hashKey]
+ if !found {
+ }
+
+ select {
+ case channel <- &network.Response{Topics: responseTopics}:
+ default:
+ }
+
+ return nil
+}
+
+func (p *testUnicastPeer) Version() string {
+ return p.version
+}
+
+func (p *testUnicastPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) error {
+ ps := p.gn.(*httpTestPeerSource)
+ var dispather network.MessageHandler
+ for _, v := range ps.dispatchHandlers {
+ if v.Tag == tag {
+ dispather = v.MessageHandler
+ break
+ }
+ }
+ require.NotNil(p.t, dispather)
+ dispather.Handle(network.IncomingMessage{Tag: tag, Data: msg, Sender: p, Net: p.gn})
+ return nil
+}
+
+func makeTestUnicastPeer(gn network.GossipNode, version string, t *testing.T) network.UnicastPeer {
+ wsp := testUnicastPeer{}
+ wsp.gn = gn
+ wsp.t = t
+ wsp.version = version
+ wsp.responseChannels = make(map[uint64]chan *network.Response)
+ return &wsp
+}
+
+// A quick GetBlock over websockets test hitting a mocked websocket server (no actual connection)
+func TestGetBlockWS(t *testing.T) {
+ // test the WS fetcher:
+ // 1. fetcher sends UniCatchupReqTag to http peer
+ // 2. peer send message to gossip node
+ // 3. gossip node send message to ledger service
+ // 4. ledger service responds with UniCatchupResTag sending it back to the http peer
+ // 5. the http peer send it to the network
+ // 6. the network send it back to the fetcher
+
+ // start server
+ ledger, next, b, err := buildTestLedger(t)
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ versions := []string{"1", "2.1"}
+ for _, version := range versions { // range network.SupportedProtocolVersions {
+
+ net := buildTestHTTPPeerSource()
+ ledgerServiceConfig := config.GetDefaultLocal()
+ ledgerServiceConfig.CatchupParallelBlocks = 5
+ ls := rpcs.RegisterLedgerService(ledgerServiceConfig, ledger, net, "test genesisID")
+
+ ls.Start()
+
+ up := makeTestUnicastPeer(net, version, t)
+ net.peers = append(net.peers, up)
+
+ fs := rpcs.RegisterWsFetcherService(logging.TestingLog(t), net)
+
+ _, ok := net.GetPeers(network.PeersConnectedIn)[0].(network.UnicastPeer)
+ require.True(t, ok)
+ factory := MakeNetworkFetcherFactory(net, numberOfPeers, fs)
+ factory.log = logging.TestingLog(t)
+ fetcher := factory.NewOverGossip(protocol.UniCatchupReqTag)
+ // we have one peer, the Ws block server
+ require.Equal(t, fetcher.NumPeers(), 1)
+
+ var block *bookkeeping.Block
+ var cert *agreement.Certificate
+ var client FetcherClient
+
+ // start := time.Now()
+ block, cert, client, err = fetcher.FetchBlock(context.Background(), next)
+ require.NotNil(t, client)
+ require.NoError(t, err)
+ // end := time.Now()
+ // require.True(t, end.Sub(start) < 10*time.Second)
+ require.Equal(t, &b, block)
+ if err == nil {
+ require.NotEqual(t, nil, block)
+ require.NotEqual(t, nil, cert)
+ }
+ fetcher.Close()
+ }
+}
diff --git a/rpcs/httpFetcher.go b/catchup/httpFetcher.go
similarity index 90%
rename from rpcs/httpFetcher.go
rename to catchup/httpFetcher.go
index 18bd05e4a9..e09d7d0587 100644
--- a/rpcs/httpFetcher.go
+++ b/catchup/httpFetcher.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see .
-package rpcs
+package catchup
import (
"context"
@@ -27,6 +27,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/rpcs"
)
// set max fetcher size to 5MB, this is enough to fit the block and certificate
@@ -45,6 +46,7 @@ type FetcherClient interface {
type HTTPFetcher struct {
peer network.HTTPPeer
rootURL string
+ net network.GossipNode
client *http.Client
@@ -52,8 +54,8 @@ type HTTPFetcher struct {
}
// MakeHTTPFetcher wraps an HTTPPeer so that we can get blocks from it
-func MakeHTTPFetcher(log logging.Logger, peer network.HTTPPeer) (fc FetcherClient) {
- fc = &HTTPFetcher{peer, peer.GetAddress(), peer.GetHTTPClient(), log}
+func MakeHTTPFetcher(log logging.Logger, peer network.HTTPPeer, net network.GossipNode) (fc FetcherClient) {
+ fc = &HTTPFetcher{peer, peer.GetAddress(), net, peer.GetHTTPClient(), log}
return
}
@@ -104,13 +106,13 @@ func (hf *HTTPFetcher) GetBlockBytes(ctx context.Context, r basics.Round) (data
// TODO: Temporarily allow old and new content types so we have time for lazy upgrades
// Remove this 'old' string after next release.
const ledgerResponseContentTypeOld = "application/algorand-block-v1"
- if contentTypes[0] != LedgerResponseContentType && contentTypes[0] != ledgerResponseContentTypeOld {
+ if contentTypes[0] != rpcs.LedgerResponseContentType && contentTypes[0] != ledgerResponseContentTypeOld {
hf.log.Warnf("http block fetcher response has an invalid content type : %s", contentTypes[0])
response.Body.Close()
return nil, fmt.Errorf("http block fetcher invalid content type '%s'", contentTypes[0])
}
- return responseBytes(response, hf.log, fetcherMaxBlockBytes)
+ return rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes)
}
// Address is part of FetcherClient interface.
diff --git a/catchup/service.go b/catchup/service.go
index d906f6b3b1..fabcc5f565 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -63,7 +63,7 @@ type Service struct {
syncStartNS int64 // at top of struct to keep 64 bit aligned for atomic.* ops
cfg config.Local
ledger Ledger
- fetcherFactory rpcs.FetcherFactory
+ fetcherFactory FetcherFactory
ctx context.Context
cancel func()
done chan struct{}
@@ -81,7 +81,7 @@ type Service struct {
lastSupportedRound basics.Round
unmatchedPendingCertificates <-chan PendingUnmatchedCertificate
- latestRoundFetcherFactory rpcs.FetcherFactory
+ latestRoundFetcherFactory FetcherFactory
}
// A BlockAuthenticator authenticates blocks given a certificate.
@@ -102,13 +102,13 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode
s = &Service{}
s.ctx, s.cancel = context.WithCancel(context.Background())
s.cfg = config
- s.fetcherFactory = rpcs.MakeNetworkFetcherFactory(net, catchupPeersForSync, wsf)
+ s.fetcherFactory = MakeNetworkFetcherFactory(net, catchupPeersForSync, wsf)
s.ledger = ledger
s.net = net
s.auth = auth
s.unmatchedPendingCertificates = unmatchedPendingCertificates
- s.latestRoundFetcherFactory = rpcs.MakeNetworkFetcherFactory(net, blockQueryPeerLimit, wsf)
+ s.latestRoundFetcherFactory = MakeNetworkFetcherFactory(net, blockQueryPeerLimit, wsf)
s.log = log.With("Context", "sync")
s.InitialSyncDone = make(chan struct{})
@@ -153,8 +153,8 @@ func (s *Service) SynchronizingTime() time.Duration {
}
// function scope to make a bunch of defer statements better
-func (s *Service) innerFetch(fetcher rpcs.Fetcher, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, rpcc rpcs.FetcherClient, err error) {
- ctx, cf := context.WithTimeout(s.ctx, rpcs.DefaultFetchTimeout)
+func (s *Service) innerFetch(fetcher Fetcher, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, rpcc FetcherClient, err error) {
+ ctx, cf := context.WithTimeout(s.ctx, DefaultFetchTimeout)
defer cf()
stopWaitingForLedgerRound := make(chan struct{})
defer close(stopWaitingForLedgerRound)
@@ -170,7 +170,7 @@ func (s *Service) innerFetch(fetcher rpcs.Fetcher, r basics.Round) (blk *bookkee
// fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary.
// Returns false if we couldn't fetch or write (i.e., if we failed even after a given number of retries or if we were told to abort.)
-func (s *Service) fetchAndWrite(fetcher rpcs.Fetcher, r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool) bool {
+func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool) bool {
i := 0
hasLookback := false
for !fetcher.OutOfPeers(r) {
@@ -287,7 +287,7 @@ func (s *Service) fetchAndWrite(fetcher rpcs.Fetcher, r basics.Round, prevFetchC
type task func() basics.Round
-func (s *Service) pipelineCallback(fetcher rpcs.Fetcher, r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool) func() basics.Round {
+func (s *Service) pipelineCallback(fetcher Fetcher, r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool) func() basics.Round {
return func() basics.Round {
fetchResult := s.fetchAndWrite(fetcher, r, prevFetchCompleteChan, lookbackChan)
@@ -483,7 +483,7 @@ func (s *Service) sync(cert *PendingUnmatchedCertificate) {
seedLookback := uint64(2)
proto, err := s.ledger.ConsensusParams(pr)
if err != nil {
- s.log.Errorf("catchup: could not get consensus parameters for round %v: $%v", pr, err)
+ s.log.Errorf("catchup: could not get consensus parameters for round %v: %v", pr, err)
} else {
seedLookback = proto.SeedLookback
}
@@ -578,9 +578,9 @@ func (s *Service) nextRoundIsNotSupported(nextRound basics.Round) bool {
lastLedgerRound := s.ledger.LastRound()
supportedUpgrades := config.Consensus
- block, error := s.ledger.Block(lastLedgerRound)
- if error != nil {
- s.log.Errorf("nextRoundIsNotSupported: could not retrieve last block (%d) from the ledger.", lastLedgerRound)
+ block, err := s.ledger.Block(lastLedgerRound)
+ if err != nil {
+ s.log.Errorf("nextRoundIsNotSupported: could not retrieve last block (%d) from the ledger : %v", lastLedgerRound, err)
return false
}
bh := block.BlockHeader
diff --git a/catchup/service_test.go b/catchup/service_test.go
index d12d313550..bed670a148 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -36,7 +36,6 @@ import (
"github.com/algorand/go-algorand/data/committee"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/rpcs"
)
var defaultConfig = config.Local{
@@ -62,13 +61,13 @@ func makeMockFactory(fetcher *MockedFetcher) *MockedFetcherFactory {
return &factory
}
-func (factory *MockedFetcherFactory) New() rpcs.Fetcher {
+func (factory *MockedFetcherFactory) New() Fetcher {
factory.mu.Lock()
defer factory.mu.Unlock()
return factory.fetcher
}
-func (factory *MockedFetcherFactory) NewOverGossip(tag protocol.Tag) rpcs.Fetcher {
+func (factory *MockedFetcherFactory) NewOverGossip(tag protocol.Tag) Fetcher {
return factory.New()
}
@@ -107,9 +106,9 @@ type MockedFetcher struct {
mu deadlock.Mutex
}
-func (m *MockedFetcher) FetchBlock(ctx context.Context, round basics.Round) (*bookkeeping.Block, *agreement.Certificate, rpcs.FetcherClient, error) {
+func (m *MockedFetcher) FetchBlock(ctx context.Context, round basics.Round) (*bookkeeping.Block, *agreement.Certificate, FetcherClient, error) {
if m.timeout {
- time.Sleep(rpcs.DefaultFetchTimeout + time.Second)
+ time.Sleep(DefaultFetchTimeout + time.Second)
}
time.Sleep(m.latency)
diff --git a/rpcs/wsFetcher.go b/catchup/wsFetcher.go
similarity index 96%
rename from rpcs/wsFetcher.go
rename to catchup/wsFetcher.go
index 7005a1bb34..ef3cc3f4a8 100644
--- a/rpcs/wsFetcher.go
+++ b/catchup/wsFetcher.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see .
-package rpcs
+package catchup
import (
"context"
@@ -28,6 +28,7 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/rpcs"
)
// Buffer messages from the network to have fewer drops.
@@ -43,7 +44,7 @@ type WsFetcher struct {
clients map[network.Peer]*wsFetcherClient
// service
- service *WsFetcherService
+ service *rpcs.WsFetcherService
// metadata
log logging.Logger
@@ -53,7 +54,7 @@ type WsFetcher struct {
// MakeWsFetcher creates a fetcher that fetches over the gossip network.
// It instantiates a NetworkFetcher under the hood, registers as a handler for the given message tag,
// and demuxes messages appropriately to the corresponding fetcher clients.
-func MakeWsFetcher(log logging.Logger, tag protocol.Tag, peers []network.Peer, service *WsFetcherService) Fetcher {
+func MakeWsFetcher(log logging.Logger, tag protocol.Tag, peers []network.Peer, service *rpcs.WsFetcherService) Fetcher {
f := &WsFetcher{
log: log,
tag: tag,
@@ -104,7 +105,7 @@ func (wsf *WsFetcher) Close() {
type wsFetcherClient struct {
target network.UnicastPeer // the peer where we're going to send the request.
tag protocol.Tag // the tag that is associated with the request/
- service *WsFetcherService // the fetcher service. This is where we perform the actual request and waiting for the response.
+ service *rpcs.WsFetcherService // the fetcher service. This is where we perform the actual request and waiting for the response.
pendingCtxs map[context.Context]context.CancelFunc // a map of all the current pending contexts.
closed bool // a flag indicating that the fetcher will not perform additional block retrivals.
diff --git a/cmd/algod/main.go b/cmd/algod/main.go
index 189581847f..f09ecbd2c0 100644
--- a/cmd/algod/main.go
+++ b/cmd/algod/main.go
@@ -159,6 +159,12 @@ func main() {
log.Fatalf("Cannot load config: %v", err)
}
+ err = config.LoadConfigurableConsensusProtocols(absolutePath)
+ if err != nil {
+ // log is not setup yet, this will log to stderr
+ log.Fatalf("Unable to load optional consensus protocols file: %v", err)
+ }
+
// Enable telemetry hook in daemon to send logs to cloud
// If ALGOTEST env variable is set, telemetry is disabled - allows disabling telemetry for tests
isTest := os.Getenv("ALGOTEST") != ""
@@ -243,7 +249,23 @@ func main() {
log.Fatalf("DefaultDeadlock is somehow not set to an expected value (enable / disable): %s", config.DefaultDeadlock)
}
- err = s.Initialize(cfg)
+ var phonebookAddresses []string
+ if peerOverrideArray != nil {
+ phonebookAddresses = peerOverrideArray
+ } else {
+ ex, err := os.Executable()
+ if err != nil {
+ log.Errorf("cannot locate node executable: %s", err)
+ } else {
+ phonebookDir := filepath.Dir(ex)
+ phonebookAddresses, err = config.LoadPhonebook(phonebookDir)
+ if err != nil {
+ log.Debugf("Cannot load static phonebook: %v", err)
+ }
+ }
+ }
+
+ err = s.Initialize(cfg, phonebookAddresses)
if err != nil {
fmt.Fprintln(os.Stderr, err)
log.Error(err)
@@ -254,10 +276,6 @@ func main() {
return
}
- if peerOverrideArray != nil {
- s.OverridePhonebook(peerOverrideArray...)
- }
-
deadlockState := "enabled"
if deadlock.Opts.Disable {
deadlockState = "disabled"
diff --git a/cmd/auctionbank/main.go b/cmd/auctionbank/main.go
index 2677345599..96bc25e0e5 100644
--- a/cmd/auctionbank/main.go
+++ b/cmd/auctionbank/main.go
@@ -402,7 +402,7 @@ func depositAuction(w http.ResponseWriter, r *http.Request) {
var status depositStatus
status.Success = true
- status.SignedDepositNote = protocol.Encode(auction.NoteField{
+ status.SignedDepositNote = protocol.Encode(&auction.NoteField{
Type: auction.NoteDeposit,
SignedDeposit: sigDep,
})
diff --git a/cmd/auctionmaster/main.go b/cmd/auctionmaster/main.go
index 9072e1e023..bf2e9d0620 100644
--- a/cmd/auctionmaster/main.go
+++ b/cmd/auctionmaster/main.go
@@ -97,7 +97,7 @@ func readFile(filename string) ([]byte, error) {
// atomicEncode writes the encoding of [obj] using atomicWrite
func atomicEncode(filename string, obj interface{}) {
- atomicWrite(filename, protocol.Encode(obj))
+ atomicWrite(filename, protocol.EncodeReflect(obj))
}
// readAndDecode reads data from [filename] using readFile, and
@@ -108,7 +108,7 @@ func readAndDecode(filename string, obj interface{}) {
panic(fmt.Sprintf("reading %s: %v", filename, err))
}
- err = protocol.Decode(data, obj)
+ err = protocol.DecodeReflect(data, obj)
if err != nil {
panic(fmt.Sprintf("decoding from %s: %v", filename, err))
}
@@ -140,7 +140,7 @@ func noteTxn(masterKey *crypto.SignatureSecrets, note auction.NoteField) transac
Fee: basics.MicroAlgos{Raw: *notesFee},
FirstValid: basics.Round(*txnRound),
LastValid: basics.Round(*txnRound + maxTxnLife),
- Note: protocol.Encode(note),
+ Note: protocol.Encode(¬e),
GenesisHash: genHash,
},
PaymentTxnFields: transactions.PaymentTxnFields{
@@ -372,7 +372,7 @@ func settleAuction() {
Msig: msigBase,
}
- paymentData = append(paymentData, protocol.Encode(signedTx)...)
+ paymentData = append(paymentData, protocol.Encode(&signedTx)...)
}
atomicWrite(fmt.Sprintf("auction%d.paymenttx", auctionID), paymentData)
diff --git a/cmd/auctionminion/main.go b/cmd/auctionminion/main.go
index 9d7b3e1c99..9bf4892398 100644
--- a/cmd/auctionminion/main.go
+++ b/cmd/auctionminion/main.go
@@ -235,7 +235,7 @@ func main() {
fmt.Printf("Collected %d auctionmaster inputs\n", len(results))
outfile := fmt.Sprintf("auction%d.inputs", cfg.AuctionID)
- err = ioutil.WriteFile(outfile, protocol.Encode(results), 0666)
+ err = ioutil.WriteFile(outfile, protocol.EncodeReflect(results), 0666)
if err != nil {
fmt.Printf("Cannot write to %s: %v\n", outfile, err)
os.Exit(1)
diff --git a/cmd/catchupsrv/main.go b/cmd/catchupsrv/main.go
index e5f1129830..261a30d212 100644
--- a/cmd/catchupsrv/main.go
+++ b/cmd/catchupsrv/main.go
@@ -19,8 +19,11 @@ package main
import (
"encoding/base64"
"flag"
+ "fmt"
"io/ioutil"
+ "math/rand"
"net/http"
+ "os"
"path"
"strconv"
@@ -35,6 +38,7 @@ import (
var addrFlag = flag.String("addr", "127.0.0.1:4160", "Address to listen on")
var dirFlag = flag.String("dir", "", "Directory containing catchup blocks")
+var tarDirFlag = flag.String("tardir", "", "Directory containing catchup blocks in M_N.tar.bz2")
func main() {
flag.Parse()
@@ -42,8 +46,18 @@ func main() {
log := logging.Base()
log.SetLevel(logging.Info)
- if *dirFlag == "" {
- panic("Must specify -dir")
+ if *dirFlag == "" && *tarDirFlag == "" {
+ panic("Must specify -dir or -tardir")
+ }
+
+ var blocktars *tarBlockSet
+ if *tarDirFlag != "" {
+ var err error
+ blocktars, err = openTarBlockDir(*tarDirFlag)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%s: error opening block tar dir, %v\n", *tarDirFlag, err)
+ os.Exit(1)
+ }
}
if *downloadFlag {
@@ -94,22 +108,33 @@ func main() {
roundStr := pathVars["round"]
genesisID := pathVars["genesisID"]
- blkPath, err := stringBlockToPath(roundStr)
+ roundNumber, err := stringToBlock(roundStr)
if err != nil {
log.Infof("%s %s: %v", r.Method, r.URL, err)
http.NotFound(w, r)
return
}
- data, err := ioutil.ReadFile(
- path.Join(
- *dirFlag,
- "v"+versionStr,
- genesisID,
- "block",
- blkPath,
- ),
- )
+ var data []byte
+ if *dirFlag != "" {
+ blkPath := blockToPath(roundNumber)
+ data, err = ioutil.ReadFile(
+ path.Join(
+ *dirFlag,
+ "v"+versionStr,
+ genesisID,
+ "block",
+ blkPath,
+ ),
+ )
+ } else if blocktars != nil {
+ data, err = blocktars.getBlock(roundNumber)
+ } else {
+ fmt.Fprintf(os.Stderr, "config err, no block dir and no block tar dir\n")
+ defer os.Exit(1)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
if err != nil {
log.Infof("%s %s: %v", r.Method, r.URL, err)
http.NotFound(w, r)
@@ -120,8 +145,12 @@ func main() {
w.Header().Set("Content-Length", strconv.Itoa(len(data)))
w.WriteHeader(http.StatusOK)
w.Write(data)
+ if rand.Intn(20) == 0 {
+ log.Infof("OK %d", roundNumber)
+ }
})
+ log.Infof("serving %s", srv.Addr)
err := srv.ListenAndServe()
if err != nil {
panic(err)
diff --git a/cmd/catchupsrv/tarblocks.go b/cmd/catchupsrv/tarblocks.go
new file mode 100644
index 0000000000..726add15c3
--- /dev/null
+++ b/cmd/catchupsrv/tarblocks.go
@@ -0,0 +1,228 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package main
+
+import (
+ "archive/tar"
+ "compress/bzip2"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-deadlock"
+)
+
+type tarBlockSet struct {
+ // known tarfiles
+ entries []*tarBlockFile
+
+ // tarfiles with an open handle
+ // a cache with random replacement
+ open []*tarBlockFile
+
+ // replacement index
+ nextOpen int
+
+ l deadlock.Mutex
+}
+
+const maxOpenTars = 3
+
+func openTarBlockDir(path string) (tars *tarBlockSet, err error) {
+ out := &tarBlockSet{}
+ matches, err := filepath.Glob(filepath.Join(path, "*_*.tar.bz2"))
+ if err != nil {
+ return nil, err
+ }
+ out.entries = make([]*tarBlockFile, 0, len(matches))
+ for _, path := range matches {
+ tbf := parseTarPathname(path)
+ if tbf != nil {
+ out.entries = append(out.entries, tbf)
+ }
+ }
+ logging.Base().Infof("found %d block tarfiles", len(out.entries))
+ out.open = make([]*tarBlockFile, 0, maxOpenTars)
+ return out, nil
+}
+
+func (tars *tarBlockSet) getBlock(round uint64) (data []byte, err error) {
+ tars.l.Lock()
+ defer tars.l.Unlock()
+ for _, tbf := range tars.open {
+ if tbf.first <= round && round <= tbf.last {
+ return tbf.getBlock(round)
+ }
+ }
+ for _, tbf := range tars.entries {
+ if tbf.first <= round && round <= tbf.last {
+ if len(tars.open) >= maxOpenTars {
+ tars.open[tars.nextOpen].close()
+ tars.open[tars.nextOpen] = tbf
+ tars.nextOpen = (tars.nextOpen + 1) % len(tars.open)
+ } else {
+ tars.open = append(tars.open, tbf)
+ }
+ return tbf.getBlock(round)
+ }
+ }
+ return nil, nil
+}
+
+type tarBlockFile struct {
+ path string
+ first uint64
+ last uint64
+
+ // fields valid when tarfile is open
+ rawFile io.ReadCloser
+ bz2Stream io.Reader
+ tarfile *tar.Reader
+ current *tar.Header
+ currentRound uint64
+
+ l deadlock.Mutex
+
+ blocks map[uint64][]byte
+}
+
+func parseTarPathname(path string) (tbf *tarBlockFile) {
+ fname := filepath.Base(path)
+ underscore := strings.IndexRune(fname, '_')
+ if underscore < 0 {
+ return nil
+ }
+ dottar := strings.Index(fname, ".tar")
+ if dottar < 0 {
+ return nil
+ }
+ first, err := strconv.ParseUint(fname[:underscore], 10, 64)
+ if err != nil {
+ return nil
+ }
+ last, err := strconv.ParseUint(fname[underscore+1:dottar], 10, 64)
+ if err != nil {
+ return nil
+ }
+ return &tarBlockFile{
+ path: path,
+ first: first,
+ last: last,
+ }
+}
+
+func (tbf *tarBlockFile) _open() (err error) {
+ if tbf.tarfile != nil {
+ logging.Base().Infof("%s already open", tbf.path)
+ return nil
+ }
+ tbf.rawFile, err = os.Open(tbf.path)
+ if err != nil {
+ err = fmt.Errorf("%s: os.open %v", tbf.path, err)
+ tbf.rawFile = nil
+ return
+ }
+ logging.Base().Infof("open %p %s", tbf, tbf.path)
+ if strings.HasSuffix(tbf.path, ".bz2") {
+ tbf.bz2Stream = bzip2.NewReader(tbf.rawFile)
+ tbf.tarfile = tar.NewReader(tbf.bz2Stream)
+ } else {
+ tbf.tarfile = tar.NewReader(tbf.rawFile)
+ }
+ tbf.blocks = make(map[uint64][]byte, 1000)
+ return nil
+}
+
+func (tbf *tarBlockFile) close() (err error) {
+ tbf.l.Lock()
+ defer tbf.l.Unlock()
+ return tbf._close()
+}
+
+func (tbf *tarBlockFile) _close() (err error) {
+ if tbf.rawFile != nil {
+ err = tbf.rawFile.Close()
+ logging.Base().Infof("close %p %s, %v", tbf, tbf.path, err)
+ tbf.rawFile = nil
+ tbf.bz2Stream = nil
+ tbf.tarfile = nil
+ tbf.current = nil
+ tbf.blocks = nil
+ } else {
+ logging.Base().Infof("close %p %s", tbf, tbf.path)
+ }
+ return
+}
+
+func (tbf *tarBlockFile) getBlock(round uint64) (data []byte, err error) {
+ tbf.l.Lock()
+ defer tbf.l.Unlock()
+ if tbf.blocks != nil {
+ var ok bool
+ data, ok = tbf.blocks[round]
+ if ok {
+ return
+ }
+ }
+ //logging.Base().Infof("get block %d", round)
+ //defer logging.Base().Infof("get block %d done %v", round, err)
+ if tbf.tarfile == nil {
+ err = tbf._open()
+ if err != nil {
+ err = fmt.Errorf("%s: open, %v", tbf.path, err)
+ return
+ }
+ if tbf.tarfile == nil {
+ err = fmt.Errorf("%s: tarfile didn't open", tbf.path)
+ return
+ }
+ }
+ err = nil
+ for true {
+ tbf.current, err = tbf.tarfile.Next()
+ if err == io.EOF {
+ tbf._close()
+ // we don't have it
+ return nil, nil
+ }
+ if err != nil {
+ err = fmt.Errorf("%s: next, %v", tbf.path, err)
+ tbf._close()
+ return nil, err
+ }
+ tbf.currentRound, err = strconv.ParseUint(tbf.current.Name, 10, 64)
+ if err != nil {
+ err = fmt.Errorf("%s: could not parse block file name %#v, %v", tbf.path, tbf.current.Name, err)
+ return nil, err
+ }
+ data = make([]byte, tbf.current.Size)
+ _, err = io.ReadFull(tbf.tarfile, data)
+ if err != nil {
+ err = fmt.Errorf("%s: read %s, %v", tbf.path, tbf.current.Name, err)
+ }
+ tbf.blocks[tbf.currentRound] = data
+ if tbf.currentRound == round {
+ return
+ }
+ }
+ return nil, errors.New("this should be unreachable")
+}
diff --git a/cmd/genesis/newgenesis.go b/cmd/genesis/newgenesis.go
index bba3d8d3b7..0b9fe51467 100644
--- a/cmd/genesis/newgenesis.go
+++ b/cmd/genesis/newgenesis.go
@@ -21,6 +21,7 @@ import (
"fmt"
"log"
+ "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/gen"
"github.com/algorand/go-algorand/util"
)
@@ -53,7 +54,7 @@ func main() {
genesisData.NetworkName = *netName
}
- err = gen.GenerateGenesisFiles(genesisData, *outDir, !*quiet)
+ err = gen.GenerateGenesisFiles(genesisData, config.Consensus, *outDir, !*quiet)
if err != nil {
reportErrorf("Cannot write genesis files: %s", err)
}
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index 157ca64367..7553a6c4b2 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -1018,6 +1018,7 @@ var importRootKeysCmd = &cobra.Command{
// Fetch an account.Participation from the database
root, err := algodAcct.RestoreRoot(handle)
+ handle.Close()
if err != nil {
// Couldn't read it, skip it
err = nil
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 9159aad897..f2fb2c0213 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -207,7 +207,7 @@ func writeTxnToFile(client libgoal.Client, signTx bool, dataDir string, walletNa
return err
}
// Write the SignedTxn to the output file
- return writeFile(filename, protocol.Encode(stxn), 0600)
+ return writeFile(filename, protocol.Encode(&stxn), 0600)
}
func getProgramArgs() [][]byte {
@@ -392,7 +392,7 @@ var sendCmd = &cobra.Command{
}
}
} else {
- err = writeFile(txFilename, protocol.Encode(stx), 0600)
+ err = writeFile(txFilename, protocol.Encode(&stx), 0600)
if err != nil {
reportErrorf(err.Error())
}
@@ -514,7 +514,7 @@ var rawsendCmd = &cobra.Command{
}
fmt.Printf(" %s: %s\n", txid, errmsg)
- rejectsData = append(rejectsData, protocol.Encode(txn)...)
+ rejectsData = append(rejectsData, protocol.Encode(&txn)...)
}
f, err := os.OpenFile(rejectsFilename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
@@ -669,7 +669,7 @@ var signCmd = &cobra.Command{
}
}
- outData = append(outData, protocol.Encode(signedTxn)...)
+ outData = append(outData, protocol.Encode(&signedTxn)...)
count++
}
err = writeFile(outFilename, outData, 0600)
@@ -715,7 +715,7 @@ var groupCmd = &cobra.Command{
var outData []byte
for _, txn := range txns {
txn.Txn.Group = crypto.HashObj(group)
- outData = append(outData, protocol.Encode(txn)...)
+ outData = append(outData, protocol.Encode(&txn)...)
}
err = writeFile(outFilename, outData, 0600)
@@ -756,7 +756,7 @@ var splitCmd = &cobra.Command{
outBase := outFilename[:len(outFilename)-len(outExt)]
for idx, txn := range txns {
fn := fmt.Sprintf("%s-%d%s", outBase, idx, outExt)
- err = writeFile(fn, protocol.Encode(txn), 0600)
+ err = writeFile(fn, protocol.Encode(&txn), 0600)
if err != nil {
reportErrorf(fileWriteError, outFilename, err)
}
@@ -850,7 +850,7 @@ var compileCmd = &cobra.Command{
reportErrorf(errorSigningTX, err)
}
ls := transactions.LogicSig{Logic: program, Sig: signature}
- outblob = protocol.Encode(ls)
+ outblob = protocol.Encode(&ls)
}
if !noProgramOutput {
fout, err := os.Create(outname)
diff --git a/cmd/goal/inspect.go b/cmd/goal/inspect.go
index 8b81568a4d..f00b189c22 100644
--- a/cmd/goal/inspect.go
+++ b/cmd/goal/inspect.go
@@ -106,7 +106,7 @@ func inspectTxn(stxn transactions.SignedTxn) (sti inspectSignedTxn, err error) {
err = fmt.Errorf("non-idempotent transformation to inspectSignedTxn (DeepEqual)")
return
}
- if !reflect.DeepEqual(protocol.Encode(sti), protocol.Encode(stxn)) {
+ if !reflect.DeepEqual(protocol.EncodeReflect(sti), protocol.Encode(&stxn)) {
err = fmt.Errorf("non-idempotent transformation to inspectSignedTxn (protocol.Encode)")
return
}
diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go
index 5e96e58268..2bc7b221ca 100644
--- a/cmd/goal/messages.go
+++ b/cmd/goal/messages.go
@@ -66,6 +66,7 @@ const (
errorNodeRunning = "Node must be stopped before writing APIToken"
errorNodeFailGenToken = "Cannot generate API token: %s"
errorNodeCreation = "Error during node creation: %v"
+ errorNodeManagedBySystemd = "This node is managed by systemd, you must run the following command to make your desired state change to your node:\n\nsystemctl %s algorand.service"
errorKill = "Cannot kill node: %s"
errorCloningNode = "Error cloning the node: %s"
infoNodeCloned = "Node cloned successfully to: %s"
@@ -116,6 +117,22 @@ const (
multisigProgramCollision = "should have at most one of --program/-p | --program-bytes/-P | --lsig/-L"
+ tealsignMutKeyArgs = "--keyfile and --account are mutually exclusive"
+ tealsignMutLsigArgs = "Need exactly one of --contract-addr or --lsig-txn"
+ tealsignKeyfileFail = "Failed to read keyfile: %v"
+ tealsignNoWithAcct = "--account is not yet supported"
+ tealsignEmptyLogic = "LogicSig must have non-empty program"
+ tealsignParseAddr = "Failed to parse contract addr: %v"
+ tealsignParseData = "Failed to parse data to sign: %v"
+ tealsignParseb64 = "failed to base64 decode data to sign: %v"
+ tealsignParseb32 = "failed to base32 decode data to sign: %v"
+ tealsignTxIDLsigReq = "--sign-txid requires --lsig-txn"
+ tealsignSetArgLsigReq = "--set-lsig-arg-idx requires --lsig-txn"
+ tealsignDataReq = "need exactly one of --sign-txid, --data-file, --data-b64, or --data-b32"
+ tealsignInfoSig = "Generated signature: %s"
+ tealsignTooManyArg = "--set-lsig-arg-idx too large, maximum of %d arguments"
+ tealsignInfoWroteSig = "Wrote signature for %s to LSig.Args[%d]"
+
// Wallet
infoRecoveryPrompt = "Please type your recovery mnemonic below, and hit return when you are done: "
infoChoosePasswordPrompt = "Please choose a password for wallet '%s': "
diff --git a/cmd/goal/multisig.go b/cmd/goal/multisig.go
index d58f733761..5198e549ad 100644
--- a/cmd/goal/multisig.go
+++ b/cmd/goal/multisig.go
@@ -126,7 +126,7 @@ var addSigCmd = &cobra.Command{
// The following line makes stxn.cachedEncodingLen incorrect, but it's okay because we're just serializing it to a file
stxn.Msig = msig
- outData = append(outData, protocol.Encode(stxn)...)
+ outData = append(outData, protocol.Encode(&stxn)...)
}
err = writeFile(txFilename, outData, 0600)
@@ -214,7 +214,7 @@ var signProgramCmd = &cobra.Command{
reportErrorf(errorSigningTX, err)
}
lsig.Msig = msig
- lsigblob := protocol.Encode(lsig)
+ lsigblob := protocol.Encode(&lsig)
err = writeFile(outname, lsigblob, 0600)
if err != nil {
reportErrorf("%s: %s", outname, err)
@@ -284,7 +284,7 @@ var mergeSigCmd = &cobra.Command{
// Write out the transactions to the output file
var mergedData []byte
for _, txn := range mergedTxns {
- mergedData = append(mergedData, protocol.Encode(txn)...)
+ mergedData = append(mergedData, protocol.Encode(&txn)...)
}
err := writeFile(txFilename, mergedData, 0600)
diff --git a/cmd/goal/network.go b/cmd/goal/network.go
index 75b2153525..3b34a23265 100644
--- a/cmd/goal/network.go
+++ b/cmd/goal/network.go
@@ -93,7 +93,7 @@ var networkCreateCmd = &cobra.Command{
panic(err)
}
- network, err := netdeploy.CreateNetworkFromTemplate(networkName, networkRootDir, networkTemplateFile, binDir, !noImportKeys, nil)
+ network, err := netdeploy.CreateNetworkFromTemplate(networkName, networkRootDir, networkTemplateFile, binDir, !noImportKeys, nil, nil)
if err != nil {
if noClean {
reportInfof(" ** failed ** - Preserving network rootdir '%s'", networkRootDir)
diff --git a/cmd/goal/node.go b/cmd/goal/node.go
index eeaf7844b2..d44dba2d53 100644
--- a/cmd/goal/node.go
+++ b/cmd/goal/node.go
@@ -107,6 +107,10 @@ var startCmd = &cobra.Command{
panic(err)
}
onDataDirs(func(dataDir string) {
+ if libgoal.AlgorandDaemonSystemdManaged(dataDir) {
+ reportErrorf(errorNodeManagedBySystemd, "start")
+ }
+
nc := nodecontrol.MakeNodeController(binDir, dataDir)
nodeArgs := nodecontrol.AlgodStartArgs{
PeerAddress: peerDial,
@@ -153,6 +157,10 @@ var stopCmd = &cobra.Command{
panic(err)
}
onDataDirs(func(dataDir string) {
+ if libgoal.AlgorandDaemonSystemdManaged(dataDir) {
+ reportErrorf(errorNodeManagedBySystemd, "stop")
+ }
+
nc := nodecontrol.MakeNodeController(binDir, dataDir)
log.Info(infoTryingToStopNode)
@@ -177,6 +185,10 @@ var restartCmd = &cobra.Command{
panic(err)
}
onDataDirs(func(dataDir string) {
+ if libgoal.AlgorandDaemonSystemdManaged(dataDir) {
+ reportErrorf(errorNodeManagedBySystemd, "restart")
+ }
+
nc := nodecontrol.MakeNodeController(binDir, dataDir)
_, err = nc.GetAlgodPID()
diff --git a/cmd/goal/tealsign.go b/cmd/goal/tealsign.go
new file mode 100644
index 0000000000..d0a6e7af30
--- /dev/null
+++ b/cmd/goal/tealsign.go
@@ -0,0 +1,229 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package main
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "io/ioutil"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/protocol"
+
+ "github.com/spf13/cobra"
+)
+
+var (
+ keyFilename string
+ signerAcct string
+ lsigTxnFilename string
+ contractAddr string
+ signTxID bool
+ dataFile string
+ datab64 string
+ datab32 string
+ setLsigArg int
+)
+
+func init() {
+ clerkCmd.AddCommand(tealsignCmd)
+
+ tealsignCmd.Flags().StringVar(&keyFilename, "keyfile", "", "algokey private key file to sign with")
+ tealsignCmd.Flags().StringVar(&signerAcct, "account", "", "Address of account to sign with")
+ tealsignCmd.Flags().StringVar(&lsigTxnFilename, "lsig-txn", "", "Transaction with logicsig to sign data for")
+ tealsignCmd.Flags().StringVar(&contractAddr, "contract-addr", "", "Contract address to sign data for. not necessary if --lsig-txn is provided")
+ tealsignCmd.Flags().BoolVar(&signTxID, "sign-txid", false, "Use the txid of --lsig-txn as the data to sign")
+ tealsignCmd.Flags().StringVar(&dataFile, "data-file", "", "Data file to sign")
+ tealsignCmd.Flags().StringVar(&datab64, "data-b64", "", "base64 data to sign")
+ tealsignCmd.Flags().StringVar(&datab32, "data-b32", "", "base32 data to sign")
+ tealsignCmd.Flags().IntVar(&setLsigArg, "set-lsig-arg-idx", -1, "If --lsig-txn is also specified, set the lsig arg at this index to the raw signature bytes. Overwrites any existing argument at this index. Updates --lsig-txn file in place. nil args will be appended until index is valid.")
+}
+
+var tealsignCmd = &cobra.Command{
+ Use: "tealsign",
+ Short: "Sign data to be verified in a TEAL program",
+ Long: `Sign data to be verified in a TEAL program.
+
+Data verified by the ed25519verify TEAL opcode must be domain separated. As part of this process, the signed payload includes the hash of the program logic. This hash must be specified. To do this, provide a transaction whose logic sig contains the program via --lsig-txn, or provide a contract address directly with --contract-addr. These options are mutually exclusive.
+
+Next, you must specify the data to be signed. When using --lsig-txn, you can use the --sign-txid flag to sign that transaction's txid. Alternatively, arbitrary data can be signed with the --data-file, --data-b64, or --data-b32 options. These options are mutually exclusive.
+
+The base64 encoding of the signature will always be printed to stdout. Optionally, when using --lsig-txn, you may specify that the signature be used as a TEAL argument for that transaction. Specify the argument index with the --set-lsig-arg-idx flag. The --lsig-txn file will be updated in place, and any existing argument at that index will be overwritten.`,
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ /*
+ * First, fetch the key for signing
+ */
+
+ if keyFilename != "" && signerAcct != "" {
+ reportErrorf(tealsignMutKeyArgs)
+ }
+
+ var kdata []byte
+ var err error
+ if keyFilename != "" {
+ kdata, err = ioutil.ReadFile(keyFilename)
+ if err != nil {
+ reportErrorf(tealsignKeyfileFail, err)
+ }
+ }
+
+ // --account not yet supported, coming in another PR
+ // (need to add kmd support for signing logicsig data)
+ if signerAcct != "" {
+ reportErrorf(tealsignNoWithAcct)
+ }
+
+ // Create signature secrets from the seed
+ var seed crypto.Seed
+ copy(seed[:], kdata)
+ sec := crypto.GenerateSignatureSecrets(seed)
+
+ /*
+ * Next, fetch the hash of the program for use in the domain
+ * separated signature payload
+ */
+
+ var lsigHashArgs int
+ if lsigTxnFilename != "" {
+ lsigHashArgs++
+ }
+ if contractAddr != "" {
+ lsigHashArgs++
+ }
+
+ // Ensure there is one unambiguous source of program hash
+ if lsigHashArgs != 1 {
+ reportErrorf(tealsignMutLsigArgs)
+ }
+
+ var progHash crypto.Digest
+ var stxn transactions.SignedTxn
+ if lsigTxnFilename != "" {
+ // If passed a SignedTxn with a logic sig, compute
+ // the hash of the program within the logic sig
+ stxnBytes, err := ioutil.ReadFile(lsigTxnFilename)
+ if err != nil {
+ reportErrorf(fileReadError, lsigTxnFilename, err)
+ }
+
+ err = protocol.Decode(stxnBytes, &stxn)
+ if err != nil {
+ reportErrorf(txDecodeError, lsigTxnFilename, err)
+ }
+
+ // Ensure signed transaction has a logic sig with a
+ // program
+ if len(stxn.Lsig.Logic) == 0 {
+ reportErrorf(tealsignEmptyLogic)
+ }
+
+ progHash = crypto.HashObj(logic.Program(stxn.Lsig.Logic))
+ } else {
+ // Otherwise, the contract address is the logic hash
+ parsedAddr, err := basics.UnmarshalChecksumAddress(contractAddr)
+ if err != nil {
+ reportErrorf(tealsignParseAddr, err)
+ }
+
+ // Copy parsed address as program hash
+ copy(progHash[:], parsedAddr[:])
+ }
+
+ /*
+ * Next, fetch the data to sign
+ */
+
+ var dataArgs int
+ var dataToSign []byte
+
+ if dataFile != "" {
+ dataToSign, err = ioutil.ReadFile(dataFile)
+ if err != nil {
+ reportErrorf(tealsignParseData, err)
+ }
+ dataArgs++
+ }
+ if datab64 != "" {
+ dataToSign, err = base64.StdEncoding.DecodeString(datab64)
+ if err != nil {
+ reportErrorf(tealsignParseb64, err)
+ }
+ dataArgs++
+ }
+ if datab32 != "" {
+ dataToSign, err = base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(datab32)
+ if err != nil {
+ reportErrorf(tealsignParseb32, err)
+ }
+ dataArgs++
+ }
+ if signTxID {
+ if lsigTxnFilename == "" {
+ reportErrorf(tealsignTxIDLsigReq)
+ }
+ txid := stxn.Txn.ID()
+ dataToSign = txid[:]
+ dataArgs++
+ }
+
+ // Ensure there is one unambiguous source of data
+ if dataArgs != 1 {
+ reportErrorf(tealsignDataReq)
+ }
+
+ /*
+ * Sign the payload
+ */
+
+ signature := sec.Sign(logic.Msg{
+ ProgramHash: progHash,
+ Data: dataToSign,
+ })
+
+ /*
+ * If requested, fill in logic sig arg
+ */
+
+ if setLsigArg >= 0 {
+ if lsigTxnFilename == "" {
+ reportErrorf(tealsignSetArgLsigReq)
+ }
+ if setLsigArg > transactions.EvalMaxArgs-1 {
+ reportErrorf(tealsignTooManyArg, transactions.EvalMaxArgs)
+ }
+ for len(stxn.Lsig.Args) < setLsigArg+1 {
+ stxn.Lsig.Args = append(stxn.Lsig.Args, nil)
+ }
+ stxn.Lsig.Args[setLsigArg] = signature[:]
+
+ // Write out the modified stxn
+ err = writeFile(lsigTxnFilename, protocol.Encode(&stxn), 0600)
+ if err != nil {
+ reportErrorf(fileWriteError, lsigTxnFilename, err)
+ }
+ reportInfof(tealsignInfoWroteSig, lsigTxnFilename, setLsigArg)
+ }
+
+ // Always print signature to stdout
+ signatureb64 := base64.StdEncoding.EncodeToString(signature[:])
+ reportInfof(tealsignInfoSig, signatureb64)
+ },
+}
diff --git a/cmd/kmd/codes/codes.go b/cmd/kmd/codes/codes.go
index ba8d86992c..752c05a193 100644
--- a/cmd/kmd/codes/codes.go
+++ b/cmd/kmd/codes/codes.go
@@ -17,8 +17,6 @@
package codes
const (
- // ExitCodeKMDInvalidArgs is returned if any cli arguments are invalid
- ExitCodeKMDInvalidArgs = 1
// ExitCodeKMDLogError is returned if we can't open the log file
ExitCodeKMDLogError = 2
// ExitCodeKMDError is the catch-all exit code for most kmd errors
diff --git a/cmd/kmd/main.go b/cmd/kmd/main.go
index 60f49cc77a..27cf2f7202 100644
--- a/cmd/kmd/main.go
+++ b/cmd/kmd/main.go
@@ -17,13 +17,15 @@
package main
import (
- "flag"
+ "fmt"
"os"
"os/signal"
"path/filepath"
"time"
"golang.org/x/sys/unix"
+ "github.com/spf13/cobra"
+ "github.com/spf13/cobra/doc"
"github.com/algorand/go-algorand/cmd/kmd/codes"
"github.com/algorand/go-algorand/daemon/kmd"
@@ -36,30 +38,44 @@ const (
kmdLogFilePerm = 0640
)
-func main() {
- dataDir := flag.String("d", "", "kmd data directory")
- timeoutSecs := flag.Uint("t", 0, "number of seconds after which to kill kmd if there are no requests. 0 means no timeout.")
- flag.Parse()
+var (
+ dataDir string
+ timeoutSecs uint64
+)
+
+func init() {
+ kmdCmd.Flags().StringVarP(&dataDir, "data-dir", "d", "", "kmd data directory.")
+ kmdCmd.Flags().Uint64VarP(&timeoutSecs, "timout-secs", "t", 0, "Number of seconds that kmd will run for before termination.")
+ kmdCmd.MarkFlagRequired("data-dir")
+}
+var kmdCmd = &cobra.Command{
+ Use: "kmd",
+ Short: "Key Management Daemon (kmd)",
+ Long: `The Key Management Daemon (kmd) is a low level wallet and key management
+tool. It works in conjunction with algod and goal to keep secrets safe. An
+optional timeout flag will automatically terminate kmd after a number of
+seconds has elapsed, allowing a simple way to ensure kmd will be shutdown in
+a timely manner. This is a blocking command.`,
+ Run: func(cmd *cobra.Command, args []string) {
+ runKmd(dataDir, timeoutSecs)
+ },
+}
+
+func runKmd(dataDir string, timeoutSecs uint64) {
// Use logging package instead of stdin/stdout
log := logging.NewLogger()
log.SetLevel(logging.Info)
- // Validate flags
- if *dataDir == "" {
- log.Errorf("dataDir (-d) is a required argument")
- os.Exit(codes.ExitCodeKMDInvalidArgs)
- }
-
// Parse timeout duration. 0 timeout -> nil timeout
var timeout *time.Duration
- if *timeoutSecs != 0 {
- t := time.Duration(*timeoutSecs) * time.Second
+ if timeoutSecs != 0 {
+ t := time.Duration(timeoutSecs) * time.Second
timeout = &t
}
// We have a dataDir now, so use log files
- kmdLogFilePath := filepath.Join(*dataDir, kmdLogFileName)
+ kmdLogFilePath := filepath.Join(dataDir, kmdLogFileName)
kmdLogFileMode := os.O_CREATE | os.O_WRONLY | os.O_APPEND
logFile, err := os.OpenFile(kmdLogFilePath, kmdLogFileMode, kmdLogFilePerm)
if err != nil {
@@ -82,7 +98,7 @@ func main() {
// Build a kmd StartConfig
startConfig := kmd.StartConfig{
- DataDir: *dataDir,
+ DataDir: dataDir,
Kill: kill,
Log: log,
Timeout: timeout,
@@ -105,3 +121,21 @@ func main() {
<-died
log.Infof("kmd server died. exiting...")
}
+
+func main() {
+ // Hidden command to generate docs in a given directory
+ // kmd generate-docs [path]
+ if len(os.Args) == 3 && os.Args[1] == "generate-docs" {
+ err := doc.GenMarkdownTree(kmdCmd, os.Args[2])
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ os.Exit(0)
+ }
+
+ if err := kmdCmd.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/cmd/netdummy/main.go b/cmd/netdummy/main.go
index e12cbc1118..648482dfa6 100644
--- a/cmd/netdummy/main.go
+++ b/cmd/netdummy/main.go
@@ -47,12 +47,13 @@ func main() {
log.SetLevel(logging.Debug)
log.SetOutput(os.Stderr)
- addrs := network.MakeArrayPhonebook()
- addrs.Entries.ReplacePeerList([]string{*serverAddress})
-
var nodes []network.GossipNode
for i := 0; i < *numClients; i++ {
- n, _ := network.NewWebsocketGossipNode(log, conf, addrs, *genesisID, protocol.NetworkID(*networkID))
+ n, _ := network.NewWebsocketGossipNode(log,
+ conf,
+ []string{*serverAddress},
+ *genesisID,
+ protocol.NetworkID(*networkID))
n.Start()
nodes = append(nodes, n)
}
diff --git a/cmd/netgoal/generate.go b/cmd/netgoal/generate.go
index 6042a9329e..5f845dfd4b 100644
--- a/cmd/netgoal/generate.go
+++ b/cmd/netgoal/generate.go
@@ -19,6 +19,7 @@ package main
import (
"encoding/json"
"errors"
+ "math/big"
"math/rand"
"os"
"regexp"
@@ -28,6 +29,7 @@ import (
"github.com/spf13/cobra"
"github.com/algorand/go-algorand/gen"
+ "github.com/algorand/go-algorand/netdeploy"
"github.com/algorand/go-algorand/netdeploy/remote"
"github.com/algorand/go-algorand/util/codecs"
)
@@ -67,7 +69,8 @@ func init() {
}
var generateTemplateLines = []string{
- "net => network template according to -R -N -n -w options",
+ "net => network template according to -R -N -n -w options. Suitable for 'netgoal build'",
+ "goalnet => goal network template according to -R -n -w options. Suitable for 'goal network'",
"genesis => genesis.json according to -w option",
"otwt => OneThousandWallets network template",
"otwg => OneThousandWallets genesis data",
@@ -123,13 +126,14 @@ template modes for -t:`,
} else {
baseRelay = baseNode
}
- switch strings.ToLower(templateToGenerate) {
+ templateType := strings.ToLower(templateToGenerate)
+ switch templateType {
case "genesis", "wallets":
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
}
err = generateWalletGenesis(outputFilename, walletsToGenerate, nonPartnodesHostsToGenerate)
- case "net", "network":
+ case "net", "network", "goalnet":
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
}
@@ -142,8 +146,11 @@ template modes for -t:`,
if relaysToGenerate < 0 {
reportErrorf("must specify number of relays with -R")
}
-
- err = generateNetworkTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodeHostsToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate, baseNode, baseNonParticipatingNode, baseRelay)
+ if templateType == "goalnet" {
+ err = generateNetworkGoalTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate)
+ } else {
+ err = generateNetworkTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodeHostsToGenerate, nodesToGenerate, nonPartnodesHostsToGenerate, baseNode, baseNonParticipatingNode, baseRelay)
+ }
case "otwt":
err = generateNetworkTemplate(outputFilename, 1000, 10, 20, 100, 0, baseNode, baseNonParticipatingNode, baseRelay)
case "otwg":
@@ -202,6 +209,82 @@ func pickNodeConfig(alt []remote.NodeConfig, name string) remote.NodeConfig {
return alt[0]
}
+func generateNetworkGoalTemplate(templateFilename string, wallets, relays, nodes, npnHosts int) error {
+ template := netdeploy.NetworkTemplate{}
+ template.Nodes = make([]remote.NodeConfigGoal, 0, relays+nodes+npnHosts)
+ template.Genesis = generateWalletGenesisData(walletsToGenerate, 0)
+ for i := 0; i < relays; i++ {
+ name := "relay" + strconv.Itoa(i+1)
+ newNode := remote.NodeConfigGoal{
+ Name: name,
+ IsRelay: true,
+ Wallets: nil,
+ }
+ template.Nodes = append(template.Nodes, newNode)
+ }
+
+ for i := 0; i < nodes; i++ {
+ name := "node" + strconv.Itoa(i+1)
+ newNode := remote.NodeConfigGoal{
+ Name: name,
+ Wallets: make([]remote.NodeWalletData, 0),
+ }
+ template.Nodes = append(template.Nodes, newNode)
+ }
+
+ for i := 0; i < npnHosts; i++ {
+ name := "nonParticipatingNode" + strconv.Itoa(i+1)
+ newNode := remote.NodeConfigGoal{
+ Name: name,
+ Wallets: make([]remote.NodeWalletData, 0),
+ }
+ template.Nodes = append(template.Nodes, newNode)
+ }
+ walletIndex := 0
+ for walletIndex < wallets {
+ for nodei, node := range template.Nodes {
+ if node.Name[0:4] != "node" {
+ continue
+ }
+ wallet := remote.NodeWalletData{
+ Name: "Wallet" + strconv.Itoa(walletIndex+1),
+ ParticipationOnly: false,
+ }
+ template.Nodes[nodei].Wallets = append(template.Nodes[nodei].Wallets, wallet)
+ walletIndex++
+ if walletIndex >= wallets {
+ break
+ }
+ }
+ if walletIndex >= wallets {
+ break
+ }
+ }
+
+ if npnHosts > 0 {
+ for walletIndex < wallets {
+ for nodei, node := range template.Nodes {
+ if node.Name[0:4] != "nonP" {
+ continue
+ }
+ wallet := remote.NodeWalletData{
+ Name: "Wallet" + strconv.Itoa(walletIndex+1),
+ ParticipationOnly: false,
+ }
+ template.Nodes[nodei].Wallets = append(template.Nodes[nodei].Wallets, wallet)
+ walletIndex++
+ if walletIndex >= wallets {
+ break
+ }
+ }
+ if walletIndex >= wallets {
+ break
+ }
+ }
+ }
+ return saveGoalTemplateToDisk(template, templateFilename)
+}
+
func generateNetworkTemplate(templateFilename string, wallets, relays, nodeHosts, nodes, npnHosts int, baseNode, baseNonPartNode, baseRelay remote.NodeConfig) error {
network := remote.DeployedNetworkConfig{}
@@ -332,30 +415,50 @@ func saveTemplateToDisk(template remote.DeployedNetworkConfig, filename string)
return err
}
-func generateWalletGenesis(filename string, wallets, npnHosts int) error {
+func saveGoalTemplateToDisk(template netdeploy.NetworkTemplate, filename string) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+ if err == nil {
+ defer f.Close()
+
+ enc := codecs.NewFormattedJSONEncoder(f)
+ err = enc.Encode(template)
+ }
+ return err
+}
+
+func generateWalletGenesisData(wallets, npnHosts int) gen.GenesisData {
data := gen.DefaultGenesis
if npnHosts > 0 {
wallets = 2 * wallets
}
data.Wallets = make([]gen.WalletData, wallets)
- stake := 100.0 / float64(wallets)
+ stake := big.NewRat(int64(100), int64(wallets))
+
+ ratZero := big.NewRat(int64(0), int64(1))
+ ratHundred := big.NewRat(int64(100), int64(1))
- stakeSum := float64(0)
+ stakeSum := new(big.Rat).Set(ratZero)
for i := 0; i < wallets; i++ {
if i == (wallets - 1) {
// use the last wallet to workaround roundoff and get back to 1.0
- stake = 100.0 - stakeSum
+ stake = stake.Sub(new(big.Rat).Set(ratHundred), stakeSum)
}
+ floatStake, _ := stake.Float64()
w := gen.WalletData{
Name: "Wallet" + strconv.Itoa(i+1), // Wallet names are 1-based for this template
- Stake: stake,
+ Stake: floatStake,
}
if (i < (wallets / 2)) || (npnHosts == 0) {
w.Online = true
}
- stakeSum += stake
+ stakeSum = stakeSum.Add(stakeSum, stake)
data.Wallets[i] = w
}
+ return data
+}
+
+func generateWalletGenesis(filename string, wallets, npnHosts int) error {
+ data := generateWalletGenesisData(wallets, npnHosts)
return saveGenesisDataToDisk(data, filename)
}
diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh
index 9b4ed5d2a8..14ec51b924 100755
--- a/cmd/updater/update.sh
+++ b/cmd/updater/update.sh
@@ -138,8 +138,88 @@ function determine_current_version() {
echo Current Version = ${CURRENTVER}
}
+function get_updater_url() {
+ local UNAME
+ local OS
+ local ARCH
+ UNAME=$(uname)
+ if [[ "${UNAME}" = "Darwin" ]]; then
+ OS="darwin"
+ UNAME=$(uname -m)
+ if [[ "${UNAME}" = "x86_64" ]]; then
+ ARCH="amd64"
+ else
+ echo "This platform ${UNAME} is not supported by updater."
+ exit 1
+ fi
+ elif [[ "${UNAME}" = "Linux" ]]; then
+ OS="linux"
+ UNAME=$(uname -m)
+ if [[ "${UNAME}" = "x86_64" ]]; then
+ ARCH="amd64"
+ elif [[ "${UNAME}" = "armv6l" ]]; then
+ ARCH="arm"
+ elif [[ "${UNAME}" = "armv7l" ]]; then
+ ARCH="arm"
+ elif [[ "${UNAME}" = "aarch64" ]]; then
+ ARCH="arm64"
+ else
+ echo "This platform ${UNAME} is not supported by updater."
+ exit 1
+ fi
+ else
+ echo "This operation system ${UNAME} is not supported by updater."
+ exit 1
+ fi
+ UPDATER_FILENAME="install_master_${OS}-${ARCH}.tar.gz"
+ UPDATER_URL="https://github.com/algorand/go-algorand-doc/raw/master/downloads/installers/${OS}_${ARCH}/${UPDATER_FILENAME}"
+}
+
+# check to see if the binary updater exists. if not, it will automatically the correct updater binary for the current platform
+function check_for_updater() {
+ # check if the updater binary exist.
+ if [ -f "${SCRIPTPATH}/updater" ]; then
+ return 0
+ fi
+ get_updater_url
+
+ # check the curl is available.
+ CURL_VER=$(curl -V 2>/dev/null || true)
+ if [ "${CURL_VER}" = "" ]; then
+ # no curl is installed.
+ echo "updater binary is missing and cannot be downloaded since curl is missing."
+ if [[ "$(uname)" = "Linux" ]]; then
+ echo "To install curl, run the following command:"
+ echo "apt-get update; apt-get install -y curl"
+ fi
+ exit 1
+ fi
+
+ CURL_OUT=$(curl -LJO --silent ${UPDATER_URL})
+ if [ "$?" != "0" ]; then
+ echo "failed to download updater binary from ${UPDATER_URL} using curl."
+ echo "${CURL_OUT}"
+ exit 1
+ fi
+
+ if [ ! -f "${SCRIPTPATH}/${UPDATER_FILENAME}" ]; then
+ echo "downloaded file ${SCRIPTPATH}/${UPDATER_FILENAME} is missing."
+ exit
+ fi
+
+ tar -zxvf "${SCRIPTPATH}/${UPDATER_FILENAME}" updater
+ if [ "$?" != "0" ]; then
+ echo "failed to extract updater binary from ${SCRIPTPATH}/${UPDATER_FILENAME}"
+ exit 1
+ fi
+
+ rm -f "${SCRIPTPATH}/${UPDATER_FILENAME}"
+ echo "updater binary was downloaded"
+}
+
function check_for_update() {
determine_current_version
+ check_for_updater
LATEST="$(${SCRIPTPATH}/updater ver check -c ${CHANNEL} ${BUCKET} | sed -n '2 p')"
if [ $? -ne 0 ]; then
echo "No remote updates found"
diff --git a/components/mocks/mockNetwork.go b/components/mocks/mockNetwork.go
index 92ee3af726..3076693be1 100644
--- a/components/mocks/mockNetwork.go
+++ b/components/mocks/mockNetwork.go
@@ -56,6 +56,10 @@ func (network *MockNetwork) Stop() {
func (network *MockNetwork) RequestConnectOutgoing(replace bool, quit <-chan struct{}) {
}
+// Disconnect - unused function
+func (network *MockNetwork) Disconnect(badpeer network.Peer) {
+}
+
// DisconnectPeers - unused function
func (network *MockNetwork) DisconnectPeers() {
}
@@ -69,6 +73,11 @@ func (network *MockNetwork) GetPeers(options ...network.PeerOption) []network.Pe
return nil
}
+// GetRoundTripper -- returns the network round tripper
+func (network *MockNetwork) GetRoundTripper() http.RoundTripper {
+ return http.DefaultTransport
+}
+
// Ready - always ready
func (network *MockNetwork) Ready() chan struct{} {
c := make(chan struct{})
@@ -87,3 +96,6 @@ func (network *MockNetwork) ClearHandlers() {
// RegisterHTTPHandler - empty implementation
func (network *MockNetwork) RegisterHTTPHandler(path string, handler http.Handler) {
}
+
+// OnNetworkAdvance - empty implementation
+func (network *MockNetwork) OnNetworkAdvance() {}
diff --git a/config/config.go b/config/config.go
index 7d19108d54..20543b57c4 100644
--- a/config/config.go
+++ b/config/config.go
@@ -23,7 +23,6 @@ import (
"os"
"os/user"
"path/filepath"
- "strconv"
"strings"
"time"
@@ -46,577 +45,6 @@ const Mainnet protocol.NetworkID = "mainnet"
// GenesisJSONFile is the name of the genesis.json file
const GenesisJSONFile = "genesis.json"
-// Global defines global Algorand protocol parameters which should not be overriden.
-type Global struct {
- SmallLambda time.Duration // min amount of time to wait for leader's credential (i.e., time to propagate one credential)
- BigLambda time.Duration // max amount of time to wait for leader's proposal (i.e., time to propagate one block)
-}
-
-// Protocol holds the global configuration settings for the agreement protocol,
-// initialized with our current defaults. This is used across all nodes we create.
-var Protocol = Global{
- SmallLambda: 2000 * time.Millisecond,
- BigLambda: 15000 * time.Millisecond,
-}
-
-// ConsensusParams specifies settings that might vary based on the
-// particular version of the consensus protocol.
-type ConsensusParams struct {
- // Consensus protocol upgrades. Votes for upgrades are collected for
- // UpgradeVoteRounds. If the number of positive votes is over
- // UpgradeThreshold, the proposal is accepted.
- //
- // UpgradeVoteRounds needs to be long enough to collect an
- // accurate sample of participants, and UpgradeThreshold needs
- // to be high enough to ensure that there are sufficient participants
- // after the upgrade.
- //
- // A consensus protocol upgrade may specify the delay between its
- // acceptance and its execution. This gives clients time to notify
- // users. This delay is specified by the upgrade proposer and must
- // be between MinUpgradeWaitRounds and MaxUpgradeWaitRounds (inclusive)
- // in the old protocol's parameters. Note that these parameters refer
- // to the representation of the delay in a block rather than the actual
- // delay: if the specified delay is zero, it is equivalent to
- // DefaultUpgradeWaitRounds.
- //
- // The maximum length of a consensus version string is
- // MaxVersionStringLen.
- UpgradeVoteRounds uint64
- UpgradeThreshold uint64
- DefaultUpgradeWaitRounds uint64
- MinUpgradeWaitRounds uint64
- MaxUpgradeWaitRounds uint64
- MaxVersionStringLen int
-
- // MaxTxnBytesPerBlock determines the maximum number of bytes
- // that transactions can take up in a block. Specifically,
- // the sum of the lengths of encodings of each transaction
- // in a block must not exceed MaxTxnBytesPerBlock.
- MaxTxnBytesPerBlock int
-
- // MaxTxnBytesPerBlock is the maximum size of a transaction's Note field.
- MaxTxnNoteBytes int
-
- // MaxTxnLife is how long a transaction can be live for:
- // the maximum difference between LastValid and FirstValid.
- //
- // Note that in a protocol upgrade, the ledger must first be upgraded
- // to hold more past blocks for this value to be raised.
- MaxTxnLife uint64
-
- // ApprovedUpgrades describes the upgrade proposals that this protocol
- // implementation will vote for, along with their delay value
- // (in rounds). A delay value of zero is the same as a delay of
- // DefaultUpgradeWaitRounds.
- ApprovedUpgrades map[protocol.ConsensusVersion]uint64
-
- // SupportGenesisHash indicates support for the GenesisHash
- // fields in transactions (and requires them in blocks).
- SupportGenesisHash bool
-
- // RequireGenesisHash indicates that GenesisHash must be present
- // in every transaction.
- RequireGenesisHash bool
-
- // DefaultKeyDilution specifies the granularity of top-level ephemeral
- // keys. KeyDilution is the number of second-level keys in each batch,
- // signed by a top-level "batch" key. The default value can be
- // overriden in the account state.
- DefaultKeyDilution uint64
-
- // MinBalance specifies the minimum balance that can appear in
- // an account. To spend money below MinBalance requires issuing
- // an account-closing transaction, which transfers all of the
- // money from the account, and deletes the account state.
- MinBalance uint64
-
- // MinTxnFee specifies the minimum fee allowed on a transaction.
- // A minimum fee is necessary to prevent DoS. In some sense this is
- // a way of making the spender subsidize the cost of storing this transaction.
- MinTxnFee uint64
-
- // RewardUnit specifies the number of MicroAlgos corresponding to one reward
- // unit.
- //
- // Rewards are received by whole reward units. Fractions of
- // RewardUnits do not receive rewards.
- RewardUnit uint64
-
- // RewardsRateRefreshInterval is the number of rounds after which the
- // rewards level is recomputed for the next RewardsRateRefreshInterval rounds.
- RewardsRateRefreshInterval uint64
-
- // seed-related parameters
- SeedLookback uint64 // how many blocks back we use seeds from in sortition. delta_s in the spec
- SeedRefreshInterval uint64 // how often an old block hash is mixed into the seed. delta_r in the spec
-
- // ledger retention policy
- MaxBalLookback uint64 // (current round - MaxBalLookback) is the oldest round the ledger must answer balance queries for
-
- // sortition threshold factors
- NumProposers uint64
- SoftCommitteeSize uint64
- SoftCommitteeThreshold uint64
- CertCommitteeSize uint64
- CertCommitteeThreshold uint64
- NextCommitteeSize uint64 // for any non-FPR votes >= deadline step, committee sizes and thresholds are constant
- NextCommitteeThreshold uint64
- LateCommitteeSize uint64
- LateCommitteeThreshold uint64
- RedoCommitteeSize uint64
- RedoCommitteeThreshold uint64
- DownCommitteeSize uint64
- DownCommitteeThreshold uint64
-
- FastRecoveryLambda time.Duration // time between fast recovery attempts
- FastPartitionRecovery bool // set when fast partition recovery is enabled
-
- // commit to payset using a hash of entire payset,
- // instead of txid merkle tree
- PaysetCommitFlat bool
-
- MaxTimestampIncrement int64 // maximum time between timestamps on successive blocks
-
- // support for the efficient encoding in SignedTxnInBlock
- SupportSignedTxnInBlock bool
-
- // force the FeeSink address to be non-participating in the genesis balances.
- ForceNonParticipatingFeeSink bool
-
- // support for ApplyData in SignedTxnInBlock
- ApplyData bool
-
- // track reward distributions in ApplyData
- RewardsInApplyData bool
-
- // domain-separated credentials
- CredentialDomainSeparationEnabled bool
-
- // support for transactions that mark an account non-participating
- SupportBecomeNonParticipatingTransactions bool
-
- // fix the rewards calculation by avoiding subtracting too much from the rewards pool
- PendingResidueRewards bool
-
- // asset support
- Asset bool
-
- // max number of assets per account
- MaxAssetsPerAccount int
-
- // max length of asset name
- MaxAssetNameBytes int
-
- // max length of asset unit name
- MaxAssetUnitNameBytes int
-
- // max length of asset url
- MaxAssetURLBytes int
-
- // support sequential transaction counter TxnCounter
- TxnCounter bool
-
- // transaction groups
- SupportTxGroups bool
-
- // max group size
- MaxTxGroupSize int
-
- // support for transaction leases
- SupportTransactionLeases bool
-
- // 0 for no support, otherwise highest version supported
- LogicSigVersion uint64
-
- // len(LogicSig.Logic) + len(LogicSig.Args[*]) must be less than this
- LogicSigMaxSize uint64
-
- // sum of estimated op cost must be less than this
- LogicSigMaxCost uint64
-
- // max decimal precision for assets
- MaxAssetDecimals uint32
-}
-
-// Consensus tracks the protocol-level settings for different versions of the
-// consensus protocol.
-var Consensus map[protocol.ConsensusVersion]ConsensusParams
-
-// MaxVoteThreshold is the largest threshold for a bundle over all supported
-// consensus protocols, used for decoding purposes.
-var MaxVoteThreshold int
-
-func maybeMaxVoteThreshold(t uint64) {
- if int(t) > MaxVoteThreshold {
- MaxVoteThreshold = int(t)
- }
-}
-
-func init() {
- Consensus = make(map[protocol.ConsensusVersion]ConsensusParams)
-
- initConsensusProtocols()
- initConsensusTestProtocols()
-
- // This must appear last, since it depends on all of the other
- // versions to already be registered (by the above calls).
- initConsensusTestFastUpgrade()
-
- // Allow tuning SmallLambda for faster consensus in single-machine e2e
- // tests. Useful for development. This might make sense to fold into
- // a protocol-version-specific setting, once we move SmallLambda into
- // ConsensusParams.
- algoSmallLambda, err := strconv.ParseInt(os.Getenv("ALGOSMALLLAMBDAMSEC"), 10, 64)
- if err == nil {
- Protocol.SmallLambda = time.Duration(algoSmallLambda) * time.Millisecond
- }
-
- for _, p := range Consensus {
- maybeMaxVoteThreshold(p.SoftCommitteeThreshold)
- maybeMaxVoteThreshold(p.CertCommitteeThreshold)
- maybeMaxVoteThreshold(p.NextCommitteeThreshold)
- maybeMaxVoteThreshold(p.LateCommitteeThreshold)
- maybeMaxVoteThreshold(p.RedoCommitteeThreshold)
- maybeMaxVoteThreshold(p.DownCommitteeThreshold)
- }
-}
-
-func initConsensusProtocols() {
- // WARNING: copying a ConsensusParams by value into a new variable
- // does not copy the ApprovedUpgrades map. Make sure that each new
- // ConsensusParams structure gets a fresh ApprovedUpgrades map.
-
- // Base consensus protocol version, v7.
- v7 := ConsensusParams{
- UpgradeVoteRounds: 10000,
- UpgradeThreshold: 9000,
- DefaultUpgradeWaitRounds: 10000,
- MaxVersionStringLen: 64,
-
- MinBalance: 10000,
- MinTxnFee: 1000,
- MaxTxnLife: 1000,
- MaxTxnNoteBytes: 1024,
- MaxTxnBytesPerBlock: 1000000,
- DefaultKeyDilution: 10000,
-
- MaxTimestampIncrement: 25,
-
- RewardUnit: 1e6,
- RewardsRateRefreshInterval: 5e5,
-
- ApprovedUpgrades: map[protocol.ConsensusVersion]uint64{},
-
- NumProposers: 30,
- SoftCommitteeSize: 2500,
- SoftCommitteeThreshold: 1870,
- CertCommitteeSize: 1000,
- CertCommitteeThreshold: 720,
- NextCommitteeSize: 10000,
- NextCommitteeThreshold: 7750,
- LateCommitteeSize: 10000,
- LateCommitteeThreshold: 7750,
- RedoCommitteeSize: 10000,
- RedoCommitteeThreshold: 7750,
- DownCommitteeSize: 10000,
- DownCommitteeThreshold: 7750,
-
- FastRecoveryLambda: 5 * time.Minute,
-
- SeedLookback: 2,
- SeedRefreshInterval: 100,
-
- MaxBalLookback: 320,
-
- MaxTxGroupSize: 1,
- }
-
- v7.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV7] = v7
-
- // v8 uses parameters and a seed derivation policy (the "twin seeds") from Georgios' new analysis
- v8 := v7
-
- v8.SeedRefreshInterval = 80
- v8.NumProposers = 9
- v8.SoftCommitteeSize = 2990
- v8.SoftCommitteeThreshold = 2267
- v8.CertCommitteeSize = 1500
- v8.CertCommitteeThreshold = 1112
- v8.NextCommitteeSize = 5000
- v8.NextCommitteeThreshold = 3838
- v8.LateCommitteeSize = 5000
- v8.LateCommitteeThreshold = 3838
- v8.RedoCommitteeSize = 5000
- v8.RedoCommitteeThreshold = 3838
- v8.DownCommitteeSize = 5000
- v8.DownCommitteeThreshold = 3838
-
- v8.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV8] = v8
-
- // v7 can be upgraded to v8.
- v7.ApprovedUpgrades[protocol.ConsensusV8] = 0
-
- // v9 increases the minimum balance to 100,000 microAlgos.
- v9 := v8
- v9.MinBalance = 100000
- v9.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV9] = v9
-
- // v8 can be upgraded to v9.
- v8.ApprovedUpgrades[protocol.ConsensusV9] = 0
-
- // v10 introduces fast partition recovery (and also raises NumProposers).
- v10 := v9
- v10.FastPartitionRecovery = true
- v10.NumProposers = 20
- v10.LateCommitteeSize = 500
- v10.LateCommitteeThreshold = 320
- v10.RedoCommitteeSize = 2400
- v10.RedoCommitteeThreshold = 1768
- v10.DownCommitteeSize = 6000
- v10.DownCommitteeThreshold = 4560
- v10.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV10] = v10
-
- // v9 can be upgraded to v10.
- v9.ApprovedUpgrades[protocol.ConsensusV10] = 0
-
- // v11 introduces SignedTxnInBlock.
- v11 := v10
- v11.SupportSignedTxnInBlock = true
- v11.PaysetCommitFlat = true
- v11.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV11] = v11
-
- // v10 can be upgraded to v11.
- v10.ApprovedUpgrades[protocol.ConsensusV11] = 0
-
- // v12 increases the maximum length of a version string.
- v12 := v11
- v12.MaxVersionStringLen = 128
- v12.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV12] = v12
-
- // v11 can be upgraded to v12.
- v11.ApprovedUpgrades[protocol.ConsensusV12] = 0
-
- // v13 makes the consensus version a meaningful string.
- v13 := v12
- v13.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV13] = v13
-
- // v12 can be upgraded to v13.
- v12.ApprovedUpgrades[protocol.ConsensusV13] = 0
-
- // v14 introduces tracking of closing amounts in ApplyData, and enables
- // GenesisHash in transactions.
- v14 := v13
- v14.ApplyData = true
- v14.SupportGenesisHash = true
- v14.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV14] = v14
-
- // v13 can be upgraded to v14.
- v13.ApprovedUpgrades[protocol.ConsensusV14] = 0
-
- // v15 introduces tracking of reward distributions in ApplyData.
- v15 := v14
- v15.RewardsInApplyData = true
- v15.ForceNonParticipatingFeeSink = true
- v15.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV15] = v15
-
- // v14 can be upgraded to v15.
- v14.ApprovedUpgrades[protocol.ConsensusV15] = 0
-
- // v16 fixes domain separation in credentials.
- v16 := v15
- v16.CredentialDomainSeparationEnabled = true
- v16.RequireGenesisHash = true
- v16.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV16] = v16
-
- // v15 can be upgraded to v16.
- v15.ApprovedUpgrades[protocol.ConsensusV16] = 0
-
- // ConsensusV17 points to 'final' spec commit
- v17 := v16
- v17.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusV17] = v17
-
- // v16 can be upgraded to v17.
- v16.ApprovedUpgrades[protocol.ConsensusV17] = 0
-
- // ConsensusV18 points to reward calculation spec commit
- v18 := v17
- v18.PendingResidueRewards = true
- v18.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- v18.TxnCounter = true
- v18.Asset = true
- v18.LogicSigVersion = 1
- v18.LogicSigMaxSize = 1000
- v18.LogicSigMaxCost = 20000
- v18.MaxAssetsPerAccount = 1000
- v18.SupportTxGroups = true
- v18.MaxTxGroupSize = 16
- v18.SupportTransactionLeases = true
- v18.SupportBecomeNonParticipatingTransactions = true
- v18.MaxAssetNameBytes = 32
- v18.MaxAssetUnitNameBytes = 8
- v18.MaxAssetURLBytes = 32
- Consensus[protocol.ConsensusV18] = v18
-
- // ConsensusV19 is the official spec commit ( teal, assets, group tx )
- v19 := v18
- v19.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
-
- Consensus[protocol.ConsensusV19] = v19
-
- // v18 can be upgraded to v19.
- v18.ApprovedUpgrades[protocol.ConsensusV19] = 0
- // v17 can be upgraded to v19.
- v17.ApprovedUpgrades[protocol.ConsensusV19] = 0
-
- // v20 points to adding the precision to the assets.
- v20 := v19
- v20.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- v20.MaxAssetDecimals = 19
- // we want to adjust the upgrade time to be roughly one week.
- // one week, in term of rounds would be:
- // 140651 = (7 * 24 * 60 * 60 / 4.3)
- // for the sake of future manual calculations, we'll round that down
- // a bit :
- v20.DefaultUpgradeWaitRounds = 140000
- Consensus[protocol.ConsensusV20] = v20
-
- // v19 can be upgraded to v20.
- v19.ApprovedUpgrades[protocol.ConsensusV20] = 0
-
- // ConsensusFuture is used to test features that are implemented
- // but not yet released in a production protocol version.
- vFuture := v20
- vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- vFuture.MinUpgradeWaitRounds = 10000
- vFuture.MaxUpgradeWaitRounds = 150000
- Consensus[protocol.ConsensusFuture] = vFuture
-}
-
-func initConsensusTestProtocols() {
- // Various test protocol versions
- Consensus[protocol.ConsensusTest0] = ConsensusParams{
- UpgradeVoteRounds: 2,
- UpgradeThreshold: 1,
- DefaultUpgradeWaitRounds: 2,
- MaxVersionStringLen: 64,
-
- MaxTxnBytesPerBlock: 1000000,
- DefaultKeyDilution: 10000,
-
- ApprovedUpgrades: map[protocol.ConsensusVersion]uint64{
- protocol.ConsensusTest1: 0,
- },
- }
-
- Consensus[protocol.ConsensusTest1] = ConsensusParams{
- UpgradeVoteRounds: 10,
- UpgradeThreshold: 8,
- DefaultUpgradeWaitRounds: 10,
- MaxVersionStringLen: 64,
-
- MaxTxnBytesPerBlock: 1000000,
- DefaultKeyDilution: 10000,
-
- ApprovedUpgrades: map[protocol.ConsensusVersion]uint64{},
- }
-
- testBigBlocks := Consensus[protocol.ConsensusCurrentVersion]
- testBigBlocks.MaxTxnBytesPerBlock = 100000000
- testBigBlocks.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusTestBigBlocks] = testBigBlocks
-
- rapidRecalcParams := Consensus[protocol.ConsensusCurrentVersion]
- rapidRecalcParams.RewardsRateRefreshInterval = 10
- //because rapidRecalcParams is based on ConsensusCurrentVersion,
- //it *shouldn't* have any ApprovedUpgrades
- //but explicitly mark "no approved upgrades" just in case
- rapidRecalcParams.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusTestRapidRewardRecalculation] = rapidRecalcParams
-
- // Setting the testShorterLookback parameters derived from ConsensusCurrentVersion
- // Will result in MaxBalLookback = 32
- // Used to run tests faster where past MaxBalLookback values are checked
- testShorterLookback := Consensus[protocol.ConsensusCurrentVersion]
- testShorterLookback.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
-
- // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
- // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
- testShorterLookback.SeedLookback = 2
- testShorterLookback.SeedRefreshInterval = 8
- testShorterLookback.MaxBalLookback = 2 * testShorterLookback.SeedLookback * testShorterLookback.SeedRefreshInterval // 32
- Consensus[protocol.ConsensusTestShorterLookback] = testShorterLookback
-
- // The following two protocols: testUnupgradedProtocol and testUnupgradedToProtocol
- // are used to test the case when some nodes in the network do not make progress.
-
- // testUnupgradedToProtocol is derived from ConsensusCurrentVersion and upgraded
- // from testUnupgradedProtocol.
- testUnupgradedToProtocol := Consensus[protocol.ConsensusCurrentVersion]
- testUnupgradedToProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
- Consensus[protocol.ConsensusTestUnupgradedToProtocol] = testUnupgradedToProtocol
-
- // testUnupgradedProtocol is used to control the upgrade of a node. This is used
- // to construct and run a network where some node is upgraded, and some other
- // node is not upgraded.
- // testUnupgradedProtocol is derived from ConsensusCurrentVersion and upgrades to
- // testUnupgradedToProtocol.
- testUnupgradedProtocol := Consensus[protocol.ConsensusCurrentVersion]
- testUnupgradedProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
-
- testUnupgradedProtocol.UpgradeVoteRounds = 3
- testUnupgradedProtocol.UpgradeThreshold = 2
- testUnupgradedProtocol.DefaultUpgradeWaitRounds = 3
- b, err := strconv.ParseBool(os.Getenv("ALGORAND_TEST_UNUPGRADEDPROTOCOL_DELETE_UPGRADE"))
- // Do not upgrade to the next version if
- // ALGORAND_TEST_UNUPGRADEDPROTOCOL_DELETE_UPGRADE is set to true (e.g. 1, TRUE)
- if err == nil && b {
- // Configure as if testUnupgradedToProtocol is not supported by the binary
- delete(Consensus, protocol.ConsensusTestUnupgradedToProtocol)
- } else {
- // Direct upgrade path from ConsensusTestUnupgradedProtocol to ConsensusTestUnupgradedToProtocol
- // This is needed for the voting nodes vote to upgrade to the next protocol
- testUnupgradedProtocol.ApprovedUpgrades[protocol.ConsensusTestUnupgradedToProtocol] = 0
- }
- Consensus[protocol.ConsensusTestUnupgradedProtocol] = testUnupgradedProtocol
-}
-
-func initConsensusTestFastUpgrade() {
- fastUpgradeProtocols := make(map[protocol.ConsensusVersion]ConsensusParams)
-
- for proto, params := range Consensus {
- fastParams := params
- fastParams.UpgradeVoteRounds = 5
- fastParams.UpgradeThreshold = 3
- fastParams.DefaultUpgradeWaitRounds = 5
- fastParams.MaxVersionStringLen += len(protocol.ConsensusTestFastUpgrade(""))
- fastParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64)
-
- for ver := range params.ApprovedUpgrades {
- fastParams.ApprovedUpgrades[protocol.ConsensusTestFastUpgrade(ver)] = 0
- }
-
- fastUpgradeProtocols[protocol.ConsensusTestFastUpgrade(proto)] = fastParams
- }
-
- // Put the test protocols into the Consensus struct; this
- // is done as a separate step so we don't recurse forever.
- for proto, params := range fastUpgradeProtocols {
- Consensus[proto] = params
- }
-}
-
// Local holds the per-node-instance configuration settings for the protocol.
type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
@@ -825,6 +253,25 @@ type Local struct {
// TelemetryToLog records messages to node.log that are normally sent to remote event monitoring
TelemetryToLog bool
+
+ // DNSSecurityFlags instructs algod validating DNS responses.
+ // Possible fla values
+ // 0x00 - disabled
+ // 0x01 (dnssecSRV) - validate SRV response
+ // 0x02 (dnssecRelayAddr) - validate relays' names to addresses resolution
+ // 0x04 (dnssecTelemetryAddr) - validate telemetry and metrics names to addresses resolution
+ // ...
+ DNSSecurityFlags uint32
+
+ // EnablePingHandler controls whether the gossip node would respond to ping messages with a pong message.
+ EnablePingHandler bool
+
+ // DisableOutgoingConnectionThrottling disables the connection throttling of the network library, which
+ // allow the network library to continuesly disconnect relays based on their relative ( and absolute ) performance.
+ DisableOutgoingConnectionThrottling bool
+
+ // NetworkProtocolVersion overrides network protocol version ( if present )
+ NetworkProtocolVersion string
}
// Filenames of config files within the configdir (e.g. ~/.algorand)
@@ -842,6 +289,11 @@ const LedgerFilenamePrefix = "ledger"
// It is used to recover from node crashes.
const CrashFilename = "crash.sqlite"
+// ConfigurableConsensusProtocolsFilename defines a set of consensus prototocols that
+// are to be loaded from the data directory ( if present ), to override the
+// built-in supported consensus protocols.
+const ConfigurableConsensusProtocolsFilename = "consensus.json"
+
// LoadConfigFromDisk returns a Local config structure based on merging the defaults
// with settings loaded from the config file from the custom dir. If the custom file
// cannot be loaded, the default config is returned (with the error from loading the
@@ -1028,3 +480,24 @@ func GetDefaultConfigFilePath() (string, error) {
}
return filepath.Join(currentUser.HomeDir, ".algorand"), nil
}
+
+const (
+ dnssecSRV = 1 << iota
+ dnssecRelayAddr
+ dnssecTelemetryAddr
+)
+
+// DNSSecuritySRVEnforced returns true if SRV response verification enforced
+func (cfg Local) DNSSecuritySRVEnforced() bool {
+ return cfg.DNSSecurityFlags&dnssecSRV != 0
+}
+
+// DNSSecurityRelayAddrEnforced returns true if relay name to ip addr resolution enforced
+func (cfg Local) DNSSecurityRelayAddrEnforced() bool {
+ return cfg.DNSSecurityFlags&dnssecRelayAddr != 0
+}
+
+// DNSSecurityTelemeryAddrEnforced returns true if relay name to ip addr resolution enforced
+func (cfg Local) DNSSecurityTelemeryAddrEnforced() bool {
+ return cfg.DNSSecurityFlags&dnssecTelemetryAddr != 0
+}
diff --git a/config/config_test.go b/config/config_test.go
index 273b88eed0..b273e842e4 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -314,7 +314,7 @@ func TestConfigMigrateFromDisk(t *testing.T) {
func TestConfigInvariant(t *testing.T) {
a := require.New(t)
- a.Equal(uint32(5), configVersion, "If you bump Config Version, please update this test (and consider if you should be adding more)")
+ a.Equal(uint32(6), configVersion, "If you bump Config Version, please update this test (and consider if you should be adding more)")
ourPath, err := os.Getwd()
a.NoError(err)
@@ -349,6 +349,11 @@ func TestConfigInvariant(t *testing.T) {
err = codecs.LoadObjectFromFile(filepath.Join(configsPath, "config-v5.json"), &c5)
a.NoError(err)
a.Equal(defaultLocalV5, c5)
+
+ c6 := Local{}
+ err = codecs.LoadObjectFromFile(filepath.Join(configsPath, "config-v6.json"), &c6)
+ a.NoError(err)
+ a.Equal(defaultLocalV6, c6)
}
func TestConfigLatestVersion(t *testing.T) {
diff --git a/config/consensus.go b/config/consensus.go
new file mode 100644
index 0000000000..663b1f664d
--- /dev/null
+++ b/config/consensus.go
@@ -0,0 +1,589 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package config
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "time"
+
+ "github.com/algorand/go-algorand/protocol"
+)
+
+// ConsensusParams specifies settings that might vary based on the
+// particular version of the consensus protocol.
+type ConsensusParams struct {
+ // Consensus protocol upgrades. Votes for upgrades are collected for
+ // UpgradeVoteRounds. If the number of positive votes is over
+ // UpgradeThreshold, the proposal is accepted.
+ //
+ // UpgradeVoteRounds needs to be long enough to collect an
+ // accurate sample of participants, and UpgradeThreshold needs
+ // to be high enough to ensure that there are sufficient participants
+ // after the upgrade.
+ //
+ // A consensus protocol upgrade may specify the delay between its
+ // acceptance and its execution. This gives clients time to notify
+ // users. This delay is specified by the upgrade proposer and must
+ // be between MinUpgradeWaitRounds and MaxUpgradeWaitRounds (inclusive)
+ // in the old protocol's parameters. Note that these parameters refer
+ // to the representation of the delay in a block rather than the actual
+ // delay: if the specified delay is zero, it is equivalent to
+ // DefaultUpgradeWaitRounds.
+ //
+ // The maximum length of a consensus version string is
+ // MaxVersionStringLen.
+ UpgradeVoteRounds uint64
+ UpgradeThreshold uint64
+ DefaultUpgradeWaitRounds uint64
+ MinUpgradeWaitRounds uint64
+ MaxUpgradeWaitRounds uint64
+ MaxVersionStringLen int
+
+ // MaxTxnBytesPerBlock determines the maximum number of bytes
+ // that transactions can take up in a block. Specifically,
+ // the sum of the lengths of encodings of each transaction
+ // in a block must not exceed MaxTxnBytesPerBlock.
+ MaxTxnBytesPerBlock int
+
+ // MaxTxnBytesPerBlock is the maximum size of a transaction's Note field.
+ MaxTxnNoteBytes int
+
+ // MaxTxnLife is how long a transaction can be live for:
+ // the maximum difference between LastValid and FirstValid.
+ //
+ // Note that in a protocol upgrade, the ledger must first be upgraded
+ // to hold more past blocks for this value to be raised.
+ MaxTxnLife uint64
+
+ // ApprovedUpgrades describes the upgrade proposals that this protocol
+ // implementation will vote for, along with their delay value
+ // (in rounds). A delay value of zero is the same as a delay of
+ // DefaultUpgradeWaitRounds.
+ ApprovedUpgrades map[protocol.ConsensusVersion]uint64
+
+ // SupportGenesisHash indicates support for the GenesisHash
+ // fields in transactions (and requires them in blocks).
+ SupportGenesisHash bool
+
+ // RequireGenesisHash indicates that GenesisHash must be present
+ // in every transaction.
+ RequireGenesisHash bool
+
+ // DefaultKeyDilution specifies the granularity of top-level ephemeral
+ // keys. KeyDilution is the number of second-level keys in each batch,
+ // signed by a top-level "batch" key. The default value can be
+ // overriden in the account state.
+ DefaultKeyDilution uint64
+
+ // MinBalance specifies the minimum balance that can appear in
+ // an account. To spend money below MinBalance requires issuing
+ // an account-closing transaction, which transfers all of the
+ // money from the account, and deletes the account state.
+ MinBalance uint64
+
+ // MinTxnFee specifies the minimum fee allowed on a transaction.
+ // A minimum fee is necessary to prevent DoS. In some sense this is
+ // a way of making the spender subsidize the cost of storing this transaction.
+ MinTxnFee uint64
+
+ // RewardUnit specifies the number of MicroAlgos corresponding to one reward
+ // unit.
+ //
+ // Rewards are received by whole reward units. Fractions of
+ // RewardUnits do not receive rewards.
+ RewardUnit uint64
+
+ // RewardsRateRefreshInterval is the number of rounds after which the
+ // rewards level is recomputed for the next RewardsRateRefreshInterval rounds.
+ RewardsRateRefreshInterval uint64
+
+ // seed-related parameters
+ SeedLookback uint64 // how many blocks back we use seeds from in sortition. delta_s in the spec
+ SeedRefreshInterval uint64 // how often an old block hash is mixed into the seed. delta_r in the spec
+
+ // ledger retention policy
+ MaxBalLookback uint64 // (current round - MaxBalLookback) is the oldest round the ledger must answer balance queries for
+
+ // sortition threshold factors
+ NumProposers uint64
+ SoftCommitteeSize uint64
+ SoftCommitteeThreshold uint64
+ CertCommitteeSize uint64
+ CertCommitteeThreshold uint64
+ NextCommitteeSize uint64 // for any non-FPR votes >= deadline step, committee sizes and thresholds are constant
+ NextCommitteeThreshold uint64
+ LateCommitteeSize uint64
+ LateCommitteeThreshold uint64
+ RedoCommitteeSize uint64
+ RedoCommitteeThreshold uint64
+ DownCommitteeSize uint64
+ DownCommitteeThreshold uint64
+
+ FastRecoveryLambda time.Duration // time between fast recovery attempts
+ FastPartitionRecovery bool // set when fast partition recovery is enabled
+
+ // commit to payset using a hash of entire payset,
+ // instead of txid merkle tree
+ PaysetCommitFlat bool
+
+ MaxTimestampIncrement int64 // maximum time between timestamps on successive blocks
+
+ // support for the efficient encoding in SignedTxnInBlock
+ SupportSignedTxnInBlock bool
+
+ // force the FeeSink address to be non-participating in the genesis balances.
+ ForceNonParticipatingFeeSink bool
+
+ // support for ApplyData in SignedTxnInBlock
+ ApplyData bool
+
+ // track reward distributions in ApplyData
+ RewardsInApplyData bool
+
+ // domain-separated credentials
+ CredentialDomainSeparationEnabled bool
+
+ // support for transactions that mark an account non-participating
+ SupportBecomeNonParticipatingTransactions bool
+
+ // fix the rewards calculation by avoiding subtracting too much from the rewards pool
+ PendingResidueRewards bool
+
+ // asset support
+ Asset bool
+
+ // max number of assets per account
+ MaxAssetsPerAccount int
+
+ // max length of asset name
+ MaxAssetNameBytes int
+
+ // max length of asset unit name
+ MaxAssetUnitNameBytes int
+
+ // max length of asset url
+ MaxAssetURLBytes int
+
+ // support sequential transaction counter TxnCounter
+ TxnCounter bool
+
+ // transaction groups
+ SupportTxGroups bool
+
+ // max group size
+ MaxTxGroupSize int
+
+ // support for transaction leases
+ SupportTransactionLeases bool
+
+ // 0 for no support, otherwise highest version supported
+ LogicSigVersion uint64
+
+ // len(LogicSig.Logic) + len(LogicSig.Args[*]) must be less than this
+ LogicSigMaxSize uint64
+
+ // sum of estimated op cost must be less than this
+ LogicSigMaxCost uint64
+
+ // max decimal precision for assets
+ MaxAssetDecimals uint32
+
+ // whether to use the old buggy Credential.lowestOutput function
+ // TODO(upgrade): Please remove as soon as the upgrade goes through
+ UseBuggyProposalLowestOutput bool
+}
+
+// ConsensusProtocols defines a set of supported protocol versions and their
+// corresponding parameters.
+type ConsensusProtocols map[protocol.ConsensusVersion]ConsensusParams
+
+// Consensus tracks the protocol-level settings for different versions of the
+// consensus protocol.
+var Consensus ConsensusProtocols
+
+// MaxVoteThreshold is the largest threshold for a bundle over all supported
+// consensus protocols, used for decoding purposes.
+var MaxVoteThreshold int
+
+func maybeMaxVoteThreshold(t uint64) {
+ if int(t) > MaxVoteThreshold {
+ MaxVoteThreshold = int(t)
+ }
+}
+
+// SaveConfigurableConsensus saves the configurable protocols file to the provided data directory.
+func SaveConfigurableConsensus(dataDirectory string, params ConsensusProtocols) error {
+ consensusProtocolPath := filepath.Join(dataDirectory, ConfigurableConsensusProtocolsFilename)
+
+ encodedConsensusParams, err := json.Marshal(params)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(consensusProtocolPath, encodedConsensusParams, 0644)
+ return err
+}
+
+// DeepCopy creates a deep copy of a consensus protocols map.
+func (cp ConsensusProtocols) DeepCopy() ConsensusProtocols {
+ staticConsensus := make(ConsensusProtocols)
+ for consensusVersion, consensusParams := range cp {
+ // recreate the ApprovedUpgrades map since we don't want to modify the original one.
+ if consensusParams.ApprovedUpgrades != nil {
+ newApprovedUpgrades := make(map[protocol.ConsensusVersion]uint64)
+ for ver, when := range consensusParams.ApprovedUpgrades {
+ newApprovedUpgrades[ver] = when
+ }
+ consensusParams.ApprovedUpgrades = newApprovedUpgrades
+ }
+ staticConsensus[consensusVersion] = consensusParams
+ }
+ return staticConsensus
+}
+
+// Merge merges a configurable consensus ontop of the existing consensus protocol and return
+// a new consensus protocol without modify any of the incoming structures.
+func (cp ConsensusProtocols) Merge(configurableConsensus ConsensusProtocols) ConsensusProtocols {
+ staticConsensus := cp.DeepCopy()
+
+ for consensusVersion, consensusParams := range configurableConsensus {
+ if consensusParams.ApprovedUpgrades == nil {
+ // if we were provided with an empty ConsensusParams, delete the existing reference to this consensus version
+ for cVer, cParam := range staticConsensus {
+ if cVer == consensusVersion {
+ delete(staticConsensus, cVer)
+ } else if _, has := cParam.ApprovedUpgrades[consensusVersion]; has {
+ // delete upgrade to deleted version
+ delete(cParam.ApprovedUpgrades, consensusVersion)
+ }
+ }
+ } else {
+ // need to add/update entry
+ staticConsensus[consensusVersion] = consensusParams
+ }
+ }
+
+ return staticConsensus
+}
+
+// LoadConfigurableConsensusProtocols loads the configurable protocols from the data directroy
+func LoadConfigurableConsensusProtocols(dataDirectory string) error {
+ newConsensus, err := PreloadConfigurableConsensusProtocols(dataDirectory)
+ if err != nil {
+ return err
+ }
+ if newConsensus != nil {
+ Consensus = newConsensus
+ }
+ return nil
+}
+
+// PreloadConfigurableConsensusProtocols loads the configurable protocols from the data directroy
+// and merge it with a copy of the Consensus map. Then, it returns it to the caller.
+func PreloadConfigurableConsensusProtocols(dataDirectory string) (ConsensusProtocols, error) {
+ consensusProtocolPath := filepath.Join(dataDirectory, ConfigurableConsensusProtocolsFilename)
+ file, err := os.Open(consensusProtocolPath)
+
+ if err != nil {
+ if os.IsNotExist(err) {
+ // this file is not required, only optional. if it's missing, no harm is done.
+ return Consensus, nil
+ }
+ return nil, err
+ }
+ defer file.Close()
+
+ configurableConsensus := make(ConsensusProtocols)
+
+ decoder := json.NewDecoder(file)
+ err = decoder.Decode(&configurableConsensus)
+ if err != nil {
+ return nil, err
+ }
+ return Consensus.Merge(configurableConsensus), nil
+}
+
+func initConsensusProtocols() {
+ // WARNING: copying a ConsensusParams by value into a new variable
+ // does not copy the ApprovedUpgrades map. Make sure that each new
+ // ConsensusParams structure gets a fresh ApprovedUpgrades map.
+
+ // Base consensus protocol version, v7.
+ v7 := ConsensusParams{
+ UpgradeVoteRounds: 10000,
+ UpgradeThreshold: 9000,
+ DefaultUpgradeWaitRounds: 10000,
+ MaxVersionStringLen: 64,
+
+ MinBalance: 10000,
+ MinTxnFee: 1000,
+ MaxTxnLife: 1000,
+ MaxTxnNoteBytes: 1024,
+ MaxTxnBytesPerBlock: 1000000,
+ DefaultKeyDilution: 10000,
+
+ MaxTimestampIncrement: 25,
+
+ RewardUnit: 1e6,
+ RewardsRateRefreshInterval: 5e5,
+
+ ApprovedUpgrades: map[protocol.ConsensusVersion]uint64{},
+
+ NumProposers: 30,
+ SoftCommitteeSize: 2500,
+ SoftCommitteeThreshold: 1870,
+ CertCommitteeSize: 1000,
+ CertCommitteeThreshold: 720,
+ NextCommitteeSize: 10000,
+ NextCommitteeThreshold: 7750,
+ LateCommitteeSize: 10000,
+ LateCommitteeThreshold: 7750,
+ RedoCommitteeSize: 10000,
+ RedoCommitteeThreshold: 7750,
+ DownCommitteeSize: 10000,
+ DownCommitteeThreshold: 7750,
+
+ FastRecoveryLambda: 5 * time.Minute,
+
+ SeedLookback: 2,
+ SeedRefreshInterval: 100,
+
+ MaxBalLookback: 320,
+
+ MaxTxGroupSize: 1,
+ UseBuggyProposalLowestOutput: true, // TODO(upgrade): Please remove as soon as the upgrade goes through
+ }
+
+ v7.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV7] = v7
+
+ // v8 uses parameters and a seed derivation policy (the "twin seeds") from Georgios' new analysis
+ v8 := v7
+
+ v8.SeedRefreshInterval = 80
+ v8.NumProposers = 9
+ v8.SoftCommitteeSize = 2990
+ v8.SoftCommitteeThreshold = 2267
+ v8.CertCommitteeSize = 1500
+ v8.CertCommitteeThreshold = 1112
+ v8.NextCommitteeSize = 5000
+ v8.NextCommitteeThreshold = 3838
+ v8.LateCommitteeSize = 5000
+ v8.LateCommitteeThreshold = 3838
+ v8.RedoCommitteeSize = 5000
+ v8.RedoCommitteeThreshold = 3838
+ v8.DownCommitteeSize = 5000
+ v8.DownCommitteeThreshold = 3838
+
+ v8.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV8] = v8
+
+ // v7 can be upgraded to v8.
+ v7.ApprovedUpgrades[protocol.ConsensusV8] = 0
+
+ // v9 increases the minimum balance to 100,000 microAlgos.
+ v9 := v8
+ v9.MinBalance = 100000
+ v9.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV9] = v9
+
+ // v8 can be upgraded to v9.
+ v8.ApprovedUpgrades[protocol.ConsensusV9] = 0
+
+ // v10 introduces fast partition recovery (and also raises NumProposers).
+ v10 := v9
+ v10.FastPartitionRecovery = true
+ v10.NumProposers = 20
+ v10.LateCommitteeSize = 500
+ v10.LateCommitteeThreshold = 320
+ v10.RedoCommitteeSize = 2400
+ v10.RedoCommitteeThreshold = 1768
+ v10.DownCommitteeSize = 6000
+ v10.DownCommitteeThreshold = 4560
+ v10.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV10] = v10
+
+ // v9 can be upgraded to v10.
+ v9.ApprovedUpgrades[protocol.ConsensusV10] = 0
+
+ // v11 introduces SignedTxnInBlock.
+ v11 := v10
+ v11.SupportSignedTxnInBlock = true
+ v11.PaysetCommitFlat = true
+ v11.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV11] = v11
+
+ // v10 can be upgraded to v11.
+ v10.ApprovedUpgrades[protocol.ConsensusV11] = 0
+
+ // v12 increases the maximum length of a version string.
+ v12 := v11
+ v12.MaxVersionStringLen = 128
+ v12.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV12] = v12
+
+ // v11 can be upgraded to v12.
+ v11.ApprovedUpgrades[protocol.ConsensusV12] = 0
+
+ // v13 makes the consensus version a meaningful string.
+ v13 := v12
+ v13.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV13] = v13
+
+ // v12 can be upgraded to v13.
+ v12.ApprovedUpgrades[protocol.ConsensusV13] = 0
+
+ // v14 introduces tracking of closing amounts in ApplyData, and enables
+ // GenesisHash in transactions.
+ v14 := v13
+ v14.ApplyData = true
+ v14.SupportGenesisHash = true
+ v14.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV14] = v14
+
+ // v13 can be upgraded to v14.
+ v13.ApprovedUpgrades[protocol.ConsensusV14] = 0
+
+ // v15 introduces tracking of reward distributions in ApplyData.
+ v15 := v14
+ v15.RewardsInApplyData = true
+ v15.ForceNonParticipatingFeeSink = true
+ v15.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV15] = v15
+
+ // v14 can be upgraded to v15.
+ v14.ApprovedUpgrades[protocol.ConsensusV15] = 0
+
+ // v16 fixes domain separation in credentials.
+ v16 := v15
+ v16.CredentialDomainSeparationEnabled = true
+ v16.RequireGenesisHash = true
+ v16.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV16] = v16
+
+ // v15 can be upgraded to v16.
+ v15.ApprovedUpgrades[protocol.ConsensusV16] = 0
+
+ // ConsensusV17 points to 'final' spec commit
+ v17 := v16
+ v17.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusV17] = v17
+
+ // v16 can be upgraded to v17.
+ v16.ApprovedUpgrades[protocol.ConsensusV17] = 0
+
+ // ConsensusV18 points to reward calculation spec commit
+ v18 := v17
+ v18.PendingResidueRewards = true
+ v18.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ v18.TxnCounter = true
+ v18.Asset = true
+ v18.LogicSigVersion = 1
+ v18.LogicSigMaxSize = 1000
+ v18.LogicSigMaxCost = 20000
+ v18.MaxAssetsPerAccount = 1000
+ v18.SupportTxGroups = true
+ v18.MaxTxGroupSize = 16
+ v18.SupportTransactionLeases = true
+ v18.SupportBecomeNonParticipatingTransactions = true
+ v18.MaxAssetNameBytes = 32
+ v18.MaxAssetUnitNameBytes = 8
+ v18.MaxAssetURLBytes = 32
+ Consensus[protocol.ConsensusV18] = v18
+
+ // ConsensusV19 is the official spec commit ( teal, assets, group tx )
+ v19 := v18
+ v19.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ Consensus[protocol.ConsensusV19] = v19
+
+ // v18 can be upgraded to v19.
+ v18.ApprovedUpgrades[protocol.ConsensusV19] = 0
+ // v17 can be upgraded to v19.
+ v17.ApprovedUpgrades[protocol.ConsensusV19] = 0
+
+ // v20 points to adding the precision to the assets.
+ v20 := v19
+ v20.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ v20.MaxAssetDecimals = 19
+ // we want to adjust the upgrade time to be roughly one week.
+ // one week, in term of rounds would be:
+ // 140651 = (7 * 24 * 60 * 60 / 4.3)
+ // for the sake of future manual calculations, we'll round that down
+ // a bit :
+ v20.DefaultUpgradeWaitRounds = 140000
+ Consensus[protocol.ConsensusV20] = v20
+
+ // v19 can be upgraded to v20.
+ v19.ApprovedUpgrades[protocol.ConsensusV20] = 0
+
+ // v21 fixes a bug in Credential.lowestOutput that would cause larger accounts to be selected to propose disproportionately more often than small accounts
+ v21 := v20
+ v21.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ v21.UseBuggyProposalLowestOutput = false // TODO(upgrade): Please remove this line as soon as the protocol upgrade goes through
+ Consensus[protocol.ConsensusV21] = v21
+ // v20 can be upgraded to v21.
+ v20.ApprovedUpgrades[protocol.ConsensusV21] = 0
+
+ // ConsensusFuture is used to test features that are implemented
+ // but not yet released in a production protocol version.
+ vFuture := v21
+ vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ vFuture.MinUpgradeWaitRounds = 10000
+ vFuture.MaxUpgradeWaitRounds = 150000
+ Consensus[protocol.ConsensusFuture] = vFuture
+}
+
+// Global defines global Algorand protocol parameters which should not be overriden.
+type Global struct {
+ SmallLambda time.Duration // min amount of time to wait for leader's credential (i.e., time to propagate one credential)
+ BigLambda time.Duration // max amount of time to wait for leader's proposal (i.e., time to propagate one block)
+}
+
+// Protocol holds the global configuration settings for the agreement protocol,
+// initialized with our current defaults. This is used across all nodes we create.
+var Protocol = Global{
+ SmallLambda: 2000 * time.Millisecond,
+ BigLambda: 15000 * time.Millisecond,
+}
+
+func init() {
+ Consensus = make(ConsensusProtocols)
+
+ initConsensusProtocols()
+
+ // Allow tuning SmallLambda for faster consensus in single-machine e2e
+ // tests. Useful for development. This might make sense to fold into
+ // a protocol-version-specific setting, once we move SmallLambda into
+ // ConsensusParams.
+ algoSmallLambda, err := strconv.ParseInt(os.Getenv("ALGOSMALLLAMBDAMSEC"), 10, 64)
+ if err == nil {
+ Protocol.SmallLambda = time.Duration(algoSmallLambda) * time.Millisecond
+ }
+
+ for _, p := range Consensus {
+ maybeMaxVoteThreshold(p.SoftCommitteeThreshold)
+ maybeMaxVoteThreshold(p.CertCommitteeThreshold)
+ maybeMaxVoteThreshold(p.NextCommitteeThreshold)
+ maybeMaxVoteThreshold(p.LateCommitteeThreshold)
+ maybeMaxVoteThreshold(p.RedoCommitteeThreshold)
+ maybeMaxVoteThreshold(p.DownCommitteeThreshold)
+ }
+}
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 6e8dedb117..7f4c1208c9 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -21,9 +21,9 @@ import (
"time"
)
-var defaultLocal = defaultLocalV5
+var defaultLocal = defaultLocalV6
-const configVersion = uint32(5)
+const configVersion = uint32(6)
// !!! WARNING !!!
//
@@ -39,6 +39,62 @@ const configVersion = uint32(5)
//
// !!! WARNING !!!
+var defaultLocalV6 = Local{
+ // DO NOT MODIFY VALUES - New values may be added carefully - See WARNING at top of file
+ Version: 6,
+ Archival: false,
+ BaseLoggerDebugLevel: 4,
+ BroadcastConnectionsLimit: -1,
+ AnnounceParticipationKey: true,
+ PriorityPeers: map[string]bool{},
+ CadaverSizeTarget: 1073741824,
+ CatchupFailurePeerRefreshRate: 10,
+ CatchupParallelBlocks: 16,
+ ConnectionsRateLimitingCount: 60,
+ ConnectionsRateLimitingWindowSeconds: 1,
+ DeadlockDetection: 0,
+ DNSBootstrapID: ".algorand.network",
+ EnableAgreementReporting: false,
+ EnableAgreementTimeMetrics: false,
+ EnableIncomingMessageFilter: false,
+ EnableMetricReporting: false,
+ EnableOutgoingNetworkMessageFiltering: true,
+ EnableRequestLogger: false,
+ EnableTopAccountsReporting: false,
+ EndpointAddress: "127.0.0.1:0",
+ GossipFanout: 4,
+ IncomingConnectionsLimit: 10000,
+ IncomingMessageFilterBucketCount: 5,
+ IncomingMessageFilterBucketSize: 512,
+ LogArchiveName: "node.archive.log",
+ LogArchiveMaxAge: "",
+ LogSizeLimit: 1073741824,
+ MaxConnectionsPerIP: 30,
+ NetAddress: "",
+ NetworkProtocolVersion: "",
+ NodeExporterListenAddress: ":9100",
+ NodeExporterPath: "./node_exporter",
+ OutgoingMessageFilterBucketCount: 3,
+ OutgoingMessageFilterBucketSize: 128,
+ ReconnectTime: 1 * time.Minute,
+ ReservedFDs: 256,
+ RestReadTimeoutSeconds: 15,
+ RestWriteTimeoutSeconds: 120,
+ RunHosted: false,
+ SuggestedFeeBlockHistory: 3,
+ SuggestedFeeSlidingWindowSize: 50,
+ TelemetryToLog: true,
+ TxPoolExponentialIncreaseFactor: 2,
+ TxPoolSize: 15000,
+ TxSyncIntervalSeconds: 60,
+ TxSyncTimeoutSeconds: 30,
+ TxSyncServeResponseSize: 1000000,
+ PeerConnectionsUpdateInterval: 3600,
+ DNSSecurityFlags: 0x01, // New value with default 0x01
+ EnablePingHandler: true,
+ // DO NOT MODIFY VALUES - New values may be added carefully - See WARNING at top of file
+}
+
var defaultLocalV5 = Local{
// DO NOT MODIFY VALUES - New values may be added carefully - See WARNING at top of file
Version: 5,
@@ -53,6 +109,7 @@ var defaultLocalV5 = Local{
ConnectionsRateLimitingCount: 60,
ConnectionsRateLimitingWindowSeconds: 1,
DeadlockDetection: 0,
+ DisableOutgoingConnectionThrottling: false,
DNSBootstrapID: ".algorand.network",
EnableAgreementReporting: false,
EnableAgreementTimeMetrics: false,
@@ -75,6 +132,7 @@ var defaultLocalV5 = Local{
NodeExporterPath: "./node_exporter",
OutgoingMessageFilterBucketCount: 3,
OutgoingMessageFilterBucketSize: 128,
+ PeerConnectionsUpdateInterval: 3600,
ReconnectTime: 1 * time.Minute, // Was 60ns
ReservedFDs: 256,
RestReadTimeoutSeconds: 15,
@@ -88,7 +146,6 @@ var defaultLocalV5 = Local{
TxSyncIntervalSeconds: 60,
TxSyncTimeoutSeconds: 30,
TxSyncServeResponseSize: 1000000,
- PeerConnectionsUpdateInterval: 3600,
// DO NOT MODIFY VALUES - New values may be added carefully - See WARNING at top of file
}
@@ -357,6 +414,18 @@ func migrate(cfg Local) (newCfg Local, err error) {
newCfg.Version = 5
}
+ // Migrate 5 -> 6
+ if newCfg.Version == 5 {
+ if newCfg.DNSSecurityFlags == 0 {
+ newCfg.DNSSecurityFlags = defaultLocalV6.DNSSecurityFlags
+ }
+ if newCfg.EnablePingHandler == defaultLocalV5.EnablePingHandler {
+ newCfg.EnablePingHandler = defaultLocalV6.EnablePingHandler
+ }
+
+ newCfg.Version = 6
+ }
+
if newCfg.Version != configVersion {
err = fmt.Errorf("failed to migrate config version %d (stuck at %d) to latest %d", cfg.Version, newCfg.Version, configVersion)
}
diff --git a/crypto/curve25519.go b/crypto/curve25519.go
index 23aa64447f..3f721c4328 100644
--- a/crypto/curve25519.go
+++ b/crypto/curve25519.go
@@ -17,8 +17,14 @@
package crypto
// #cgo CFLAGS: -Wall -std=c99
-// #cgo CFLAGS: -I${SRCDIR}/include
-// #cgo LDFLAGS: ${SRCDIR}/lib/libsodium.a
+// #cgo darwin,amd64 CFLAGS: -I${SRCDIR}/libs/darwin/amd64/include
+// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/libs/darwin/amd64/lib/libsodium.a
+// #cgo linux,amd64 CFLAGS: -I${SRCDIR}/libs/linux/amd64/include
+// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/libs/linux/amd64/lib/libsodium.a
+// #cgo linux,arm64 CFLAGS: -I${SRCDIR}/libs/linux/arm64/include
+// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/libs/linux/arm64/lib/libsodium.a
+// #cgo linux,arm CFLAGS: -I${SRCDIR}/libs/linux/arm/include
+// #cgo linux,arm LDFLAGS: ${SRCDIR}/libs/linux/arm/lib/libsodium.a
// #include
// #include "sodium.h"
import "C"
@@ -124,6 +130,8 @@ type SignatureVerifier = PublicKey
// SignatureSecrets are used by an entity to produce unforgeable signatures over
// a message.
type SignatureSecrets struct {
+ _struct struct{} `codec:""`
+
SignatureVerifier
SK ed25519PrivateKey
}
diff --git a/crypto/libsodium-fork/autogen.sh b/crypto/libsodium-fork/autogen.sh
index 394e6f7a14..0de498c855 100755
--- a/crypto/libsodium-fork/autogen.sh
+++ b/crypto/libsodium-fork/autogen.sh
@@ -32,5 +32,5 @@ fi
$LIBTOOLIZE && \
aclocal && \
-automake --add-missing --force-missing --include-deps && \
+automake $* --add-missing --force-missing --include-deps && \
autoconf
diff --git a/crypto/msgp_gen.go b/crypto/msgp_gen.go
index 9b5de8349a..c863fa25e0 100644
--- a/crypto/msgp_gen.go
+++ b/crypto/msgp_gen.go
@@ -1713,6 +1713,232 @@ func (z *Signature) MsgIsZero() bool {
return (*z) == (Signature{})
}
+// MarshalMsg implements msgp.Marshaler
+func (z *SignatureSecrets) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "SK"
+ o = append(o, 0x82, 0xa2, 0x53, 0x4b)
+ o = msgp.AppendBytes(o, ((*z).SK)[:])
+ // string "SignatureVerifier"
+ o = append(o, 0xb1, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x72)
+ o, err = (*z).SignatureVerifier.MarshalMsg(o)
+ if err != nil {
+ err = msgp.WrapError(err, "SignatureVerifier")
+ return
+ }
+ return
+}
+
+func (_ *SignatureSecrets) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*SignatureSecrets)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *SignatureSecrets) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ bts, err = (*z).SignatureVerifier.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SignatureVerifier")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ bts, err = msgp.ReadExactBytes(bts, ((*z).SK)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SK")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = SignatureSecrets{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "SignatureVerifier":
+ bts, err = (*z).SignatureVerifier.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "SignatureVerifier")
+ return
+ }
+ case "SK":
+ bts, err = msgp.ReadExactBytes(bts, ((*z).SK)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "SK")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *SignatureSecrets) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*SignatureSecrets)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *SignatureSecrets) Msgsize() (s int) {
+ s = 1 + 18 + (*z).SignatureVerifier.Msgsize() + 3 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *SignatureSecrets) MsgIsZero() bool {
+ return ((*z).SignatureVerifier.MsgIsZero()) && ((*z).SK == (ed25519PrivateKey{}))
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *VRFSecrets) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "PK"
+ o = append(o, 0x82, 0xa2, 0x50, 0x4b)
+ o = msgp.AppendBytes(o, ((*z).PK)[:])
+ // string "SK"
+ o = append(o, 0xa2, 0x53, 0x4b)
+ o = msgp.AppendBytes(o, ((*z).SK)[:])
+ return
+}
+
+func (_ *VRFSecrets) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*VRFSecrets)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *VRFSecrets) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = msgp.ReadExactBytes(bts, ((*z).PK)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "PK")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ zb0003--
+ bts, err = msgp.ReadExactBytes(bts, ((*z).SK)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "SK")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0004 {
+ (*z) = VRFSecrets{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "PK":
+ bts, err = msgp.ReadExactBytes(bts, ((*z).PK)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "PK")
+ return
+ }
+ case "SK":
+ bts, err = msgp.ReadExactBytes(bts, ((*z).SK)[:])
+ if err != nil {
+ err = msgp.WrapError(err, "SK")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *VRFSecrets) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*VRFSecrets)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *VRFSecrets) Msgsize() (s int) {
+ s = 1 + 3 + msgp.ArrayHeaderSize + (32 * (msgp.ByteSize)) + 3 + msgp.ArrayHeaderSize + (64 * (msgp.ByteSize))
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *VRFSecrets) MsgIsZero() bool {
+ return ((*z).PK == (VrfPubkey{})) && ((*z).SK == (VrfPrivkey{}))
+}
+
// MarshalMsg implements msgp.Marshaler
func (z *VrfOutput) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
diff --git a/crypto/msgp_gen_test.go b/crypto/msgp_gen_test.go
index 6b4266149a..cdf29cb7d4 100644
--- a/crypto/msgp_gen_test.go
+++ b/crypto/msgp_gen_test.go
@@ -877,6 +877,130 @@ func BenchmarkUnmarshalSignature(b *testing.B) {
}
}
+func TestMarshalUnmarshalSignatureSecrets(t *testing.T) {
+ v := SignatureSecrets{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingSignatureSecrets(t *testing.T) {
+ protocol.RunEncodingTest(t, &SignatureSecrets{})
+}
+
+func BenchmarkMarshalMsgSignatureSecrets(b *testing.B) {
+ v := SignatureSecrets{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgSignatureSecrets(b *testing.B) {
+ v := SignatureSecrets{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalSignatureSecrets(b *testing.B) {
+ v := SignatureSecrets{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalVRFSecrets(t *testing.T) {
+ v := VRFSecrets{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingVRFSecrets(t *testing.T) {
+ protocol.RunEncodingTest(t, &VRFSecrets{})
+}
+
+func BenchmarkMarshalMsgVRFSecrets(b *testing.B) {
+ v := VRFSecrets{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgVRFSecrets(b *testing.B) {
+ v := VRFSecrets{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalVRFSecrets(b *testing.B) {
+ v := VRFSecrets{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalVrfOutput(t *testing.T) {
v := VrfOutput{}
bts, err := v.MarshalMsg(nil)
diff --git a/crypto/vrf.go b/crypto/vrf.go
index 53a13a6a64..25de87e7cb 100644
--- a/crypto/vrf.go
+++ b/crypto/vrf.go
@@ -17,8 +17,14 @@
package crypto
// #cgo CFLAGS: -Wall -std=c99
-// #cgo CFLAGS: -I${SRCDIR}/include/
-// #cgo LDFLAGS: ${SRCDIR}/lib/libsodium.a
+// #cgo darwin,amd64 CFLAGS: -I${SRCDIR}/libs/darwin/amd64/include
+// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/libs/darwin/amd64/lib/libsodium.a
+// #cgo linux,amd64 CFLAGS: -I${SRCDIR}/libs/linux/amd64/include
+// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/libs/linux/amd64/lib/libsodium.a
+// #cgo linux,arm64 CFLAGS: -I${SRCDIR}/libs/linux/arm64/include
+// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/libs/linux/arm64/lib/libsodium.a
+// #cgo linux,arm CFLAGS: -I${SRCDIR}/libs/linux/arm/include
+// #cgo linux,arm LDFLAGS: ${SRCDIR}/libs/linux/arm/lib/libsodium.a
// #include
// #include "sodium.h"
import "C"
@@ -39,6 +45,8 @@ type VRFProof = VrfProof
// VRFSecrets is a wrapper for a VRF keypair. Use *VrfPrivkey instead
type VRFSecrets struct {
+ _struct struct{} `codec:""`
+
PK VrfPubkey
SK VrfPrivkey
}
diff --git a/daemon/algod/server.go b/daemon/algod/server.go
index 6437f6ce91..63ab8e51fb 100644
--- a/daemon/algod/server.go
+++ b/daemon/algod/server.go
@@ -59,7 +59,7 @@ type Server struct {
}
// Initialize creates a Node instance with applicable network services
-func (s *Server) Initialize(cfg config.Local) error {
+func (s *Server) Initialize(cfg config.Local, phonebookAddresses []string) error {
// set up node
s.log = logging.Base()
@@ -119,13 +119,7 @@ func (s *Server) Initialize(cfg config.Local) error {
NodeExporterPath: cfg.NodeExporterPath,
})
- ex, err := os.Executable()
- if err != nil {
- return fmt.Errorf("cannot locate node executable: %s", err)
- }
- phonebookDir := filepath.Dir(ex)
-
- s.node, err = node.MakeFull(s.log, s.RootPath, cfg, phonebookDir, s.Genesis)
+ s.node, err = node.MakeFull(s.log, s.RootPath, cfg, phonebookAddresses, s.Genesis)
if os.IsNotExist(err) {
return fmt.Errorf("node has not been installed: %s", err)
}
@@ -272,9 +266,3 @@ func (s *Server) Stop() {
os.Remove(s.netFile)
os.Remove(s.netListenFile)
}
-
-// OverridePhonebook is used to replace the phonebook associated with
-// the server's node.
-func (s *Server) OverridePhonebook(dialOverride ...string) {
- s.node.ReplacePeerList(dialOverride...)
-}
diff --git a/daemon/kmd/wallet/driver/ledger.go b/daemon/kmd/wallet/driver/ledger.go
index 4ebddf1885..3383288687 100644
--- a/daemon/kmd/wallet/driver/ledger.go
+++ b/daemon/kmd/wallet/driver/ledger.go
@@ -18,10 +18,12 @@ package driver
import (
"bytes"
+ "crypto/sha512"
"encoding/binary"
"errors"
"fmt"
"sort"
+ "strings"
"github.com/algorand/go-deadlock"
@@ -36,6 +38,7 @@ import (
const (
ledgerWalletDriverName = "ledger"
ledgerWalletDriverVersion = 1
+ ledgerIDLen = 16
ledgerClass = uint8(0x80)
ledgerInsGetPublicKey = uint8(0x03)
@@ -116,14 +119,15 @@ func (lwd *LedgerWalletDriver) scanWalletsLocked() error {
// Try to open each new device, skipping ones that are already open.
var newDevs []LedgerUSB
for _, info := range infos {
- if curPaths[info.Path] {
- delete(curPaths, info.Path)
+ walletID := pathToID(info.Path)
+ if curPaths[walletID] {
+ delete(curPaths, walletID)
continue
}
dev, err := info.Open()
if err != nil {
- lwd.log.Warnf("enumerated but failed to open ledger %x: %v", info.ProductID, err)
+ lwd.log.Warnf("enumerated but failed to open ledger %s %x: %v", info.Path, info.ProductID, err)
continue
}
@@ -141,13 +145,22 @@ func (lwd *LedgerWalletDriver) scanWalletsLocked() error {
delete(lwd.wallets, deadPath)
}
- // Add in new devices
+ // Add in new ledger wallets if they appear valid
for _, dev := range newDevs {
- id := dev.USBInfo().Path
- lwd.wallets[id] = &LedgerWallet{
+ newWallet := &LedgerWallet{
dev: dev,
}
+
+ // Check that device responds to Algorand app requests
+ _, err := newWallet.ListKeys()
+ if err != nil {
+ continue
+ }
+
+ id := pathToID(dev.USBInfo().Path)
+ lwd.wallets[id] = newWallet
}
+
return nil
}
@@ -211,15 +224,27 @@ func (lw *LedgerWallet) ExportMasterDerivationKey(pw []byte) (crypto.MasterDeriv
return crypto.MasterDerivationKey{}, errNotSupported
}
+func pathToID(path string) string {
+ // The Path USB info field is platform-dependent and sometimes
+ // very long. We hash it to make the wallet name/ID less unwieldy
+ pathHashFull := sha512.Sum512_256([]byte(path))
+ return fmt.Sprintf("%x", pathHashFull[:ledgerIDLen])
+}
+
// Metadata implements the Wallet interface.
func (lw *LedgerWallet) Metadata() (wallet.Metadata, error) {
lw.mu.Lock()
defer lw.mu.Unlock()
info := lw.dev.USBInfo()
+
+ walletID := pathToID(info.Path)
+ walletName := fmt.Sprintf("%s-%s-%s-%s", info.Manufacturer, info.Product, info.Serial, walletID)
+ walletName = strings.Replace(walletName, " ", "-", -1)
+
return wallet.Metadata{
- ID: []byte(info.Path),
- Name: []byte(fmt.Sprintf("%s %s (serial %s, path %s)", info.Manufacturer, info.Product, info.Serial, info.Path)),
+ ID: []byte(walletID),
+ Name: []byte(walletName),
DriverName: ledgerWalletDriverName,
DriverVersion: ledgerWalletDriverVersion,
SupportedTransactions: ledgerWalletSupportedTxs,
diff --git a/daemon/kmd/wallet/driver/sqlite.go b/daemon/kmd/wallet/driver/sqlite.go
index 4f80f8f49c..71a530fff1 100644
--- a/daemon/kmd/wallet/driver/sqlite.go
+++ b/daemon/kmd/wallet/driver/sqlite.go
@@ -44,7 +44,7 @@ const (
sqliteWalletDriverVersion = 1
sqliteWalletsDirName = "sqlite_wallets"
sqliteWalletsDirPermissions = 0700
- sqliteWalletDBOptions = "_secure_delete=on&_tx_lock=exclusive"
+ sqliteWalletDBOptions = "_secure_delete=on&_txlock=exclusive"
sqliteMaxWalletNameLen = 64
sqliteMaxWalletIDLen = 64
sqliteIntOverflow = 1 << 63
@@ -831,7 +831,7 @@ func (sw *SQLiteWallet) GenerateKey(displayMnemonic bool) (addr crypto.Digest, e
return
}
- // Begin an exclusive database transaction (we set _tx_lock=exclusive on the
+ // Begin an exclusive database transaction (we set _txlock=exclusive on the
// database connection string)
tx, err := db.Beginx()
if err != nil {
diff --git a/data/account/account.go b/data/account/account.go
index c97162ebd9..a61c64630d 100644
--- a/data/account/account.go
+++ b/data/account/account.go
@@ -111,7 +111,8 @@ func RestoreRoot(store db.Accessor) (acc Root, err error) {
return
}
- err = protocol.Decode(raw, &acc.secrets)
+ acc.secrets = &crypto.SignatureSecrets{}
+ err = protocol.Decode(raw, acc.secrets)
if err != nil {
err = fmt.Errorf("RestoreRoot: error decoding account: %v", err)
return
@@ -165,12 +166,14 @@ func RestoreParticipation(store db.Accessor) (acc Participation, err error) {
return Participation{}, err
}
- err = protocol.Decode(rawVRF, &acc.VRF)
+ acc.VRF = &crypto.VRFSecrets{}
+ err = protocol.Decode(rawVRF, acc.VRF)
if err != nil {
return Participation{}, err
}
- err = protocol.Decode(rawVoting, &acc.Voting)
+ acc.Voting = &crypto.OneTimeSignatureSecrets{}
+ err = protocol.Decode(rawVoting, acc.Voting)
if err != nil {
return Participation{}, err
}
diff --git a/data/account/participation.go b/data/account/participation.go
index af5e403859..c2f0c1d837 100644
--- a/data/account/participation.go
+++ b/data/account/participation.go
@@ -100,7 +100,8 @@ func (part Participation) DeleteOldKeys(current basics.Round, proto config.Conse
})
close(errorCh)
}
- encodedVotingSecrets := protocol.Encode(part.Voting.Snapshot())
+ voting := part.Voting.Snapshot()
+ encodedVotingSecrets := protocol.Encode(&voting)
go deleteOldKeys(encodedVotingSecrets)
return errorCh
}
@@ -191,7 +192,8 @@ func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firs
// Persist writes a Participation out to a database on the disk
func (part Participation) Persist() error {
rawVRF := protocol.Encode(part.VRF)
- rawVoting := protocol.Encode(part.Voting.Snapshot())
+ voting := part.Voting.Snapshot()
+ rawVoting := protocol.Encode(&voting)
return part.Store.Atomic(func(tx *sql.Tx) error {
err := partInstallDatabase(tx)
diff --git a/data/basics/msgp_gen.go b/data/basics/msgp_gen.go
index cc9b6fc442..f540d8b5d7 100644
--- a/data/basics/msgp_gen.go
+++ b/data/basics/msgp_gen.go
@@ -852,6 +852,9 @@ func (z AssetIndex) MarshalMsg(b []byte) (o []byte, err error) {
func (_ AssetIndex) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(AssetIndex)
+ if !ok {
+ _, ok = (z).(*AssetIndex)
+ }
return ok
}
@@ -1951,6 +1954,9 @@ func (z Round) MarshalMsg(b []byte) (o []byte, err error) {
func (_ Round) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(Round)
+ if !ok {
+ _, ok = (z).(*Round)
+ }
return ok
}
@@ -1994,6 +2000,9 @@ func (z RoundInterval) MarshalMsg(b []byte) (o []byte, err error) {
func (_ RoundInterval) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(RoundInterval)
+ if !ok {
+ _, ok = (z).(*RoundInterval)
+ }
return ok
}
@@ -2037,6 +2046,9 @@ func (z Status) MarshalMsg(b []byte) (o []byte, err error) {
func (_ Status) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(Status)
+ if !ok {
+ _, ok = (z).(*Status)
+ }
return ok
}
diff --git a/data/basics/units.go b/data/basics/units.go
index 23feb02531..95fe82266a 100644
--- a/data/basics/units.go
+++ b/data/basics/units.go
@@ -73,6 +73,12 @@ func (a *MicroAlgos) CodecDecodeSelf(dec *codec.Decoder) {
dec.MustDecode(&a.Raw)
}
+// CanMarshalMsg implements msgp.Marshaler
+func (MicroAlgos) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(MicroAlgos)
+ return ok
+}
+
// MarshalMsg implements msgp.Marshaler
func (a MicroAlgos) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, msgp.Uint64Size)
@@ -80,6 +86,12 @@ func (a MicroAlgos) MarshalMsg(b []byte) (o []byte, err error) {
return
}
+// CanUnmarshalMsg implements msgp.Unmarshaler
+func (*MicroAlgos) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*MicroAlgos)
+ return ok
+}
+
// UnmarshalMsg implements msgp.Unmarshaler
func (a *MicroAlgos) UnmarshalMsg(bts []byte) (o []byte, err error) {
a.Raw, o, err = msgp.ReadUint64Bytes(bts)
diff --git a/data/committee/common_test.go b/data/committee/common_test.go
index bbd8c6d29d..fe2264765f 100644
--- a/data/committee/common_test.go
+++ b/data/committee/common_test.go
@@ -171,7 +171,7 @@ type AgreementSelector struct {
// ToBeHashed implements the crypto.Hashable interface.
func (sel AgreementSelector) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.AgreementSelector, protocol.Encode(&sel)
+ return protocol.AgreementSelector, protocol.EncodeReflect(&sel)
}
// CommitteeSize returns the size of the committee,
diff --git a/data/committee/credential.go b/data/committee/credential.go
index 2d5dfe2ef8..fc9abb8cc8 100644
--- a/data/committee/credential.go
+++ b/data/committee/credential.go
@@ -134,6 +134,7 @@ func MakeCredential(secrets *crypto.VrfPrivkey, sel Selector) UnauthenticatedCre
// Less returns true if this Credential is less than the other credential; false
// otherwise (i.e., >=).
+// Used for breaking ties when there are multiple proposals.
//
// Precondition: both credentials have nonzero weight
func (cred Credential) Less(otherCred Credential) bool {
@@ -155,9 +156,60 @@ func (cred Credential) Selected() bool {
return cred.Weight > 0
}
+// lowestOutput is used for breaking ties when there are multiple proposals.
+// People will vote for the proposal whose credential has the lowest lowestOutput.
+//
+// We hash the credential and interpret the output as a bigint.
+// For credentials with weight w > 1, we hash the credential w times (with
+// different counter values) and use the lowest output.
+//
+// This is because a weight w credential is simulating being selected to be on the
+// leader committee w times, so each of the w proposals would have a different hash,
+// and the lowest would win.
func (cred Credential) lowestOutput() *big.Int {
var lowest big.Int
+ h1 := cred.VrfOut
+ // It is important that i start at 1 rather than 0 because cred.Hashable
+ // was already hashed with iter = 0 earlier (in UnauthenticatedCredential.Verify)
+ // for determining the weight of the credential. A nonzero iter provides
+ // domain separation between lowestOutput and UnauthenticatedCredential.Verify
+ //
+ // If we reused the iter = 0 hash output here it would be nonuniformly
+ // distributed (because lowestOutput can only get called if weight > 0).
+ // In particular if i starts at 0 then weight-1 credentials are at a
+ // significant disadvantage because UnauthenticatedCredential.Verify
+ // wants the hash to be large but tiebreaking between proposals wants
+ // the hash to be small.
+ for i := uint64(1); i <= cred.Weight; i++ {
+ var h crypto.Digest
+ if cred.DomainSeparationEnabled {
+ cred.Hashable.Iter = i
+ h = crypto.HashObj(cred.Hashable)
+ } else {
+ var h2 crypto.Digest
+ binary.BigEndian.PutUint64(h2[:], i)
+ h = crypto.Hash(append(h1[:], h2[:]...))
+ }
+
+ if i == 1 {
+ lowest.SetBytes(h[:])
+ } else {
+ var temp big.Int
+ temp.SetBytes(h[:])
+ if temp.Cmp(&lowest) < 0 {
+ lowest.Set(&temp)
+ }
+ }
+ }
+
+ return &lowest
+}
+
+// TODO(upgrade): Please remove the entire lowestOutputBuggy function as soon as the corresponding protocol upgrade goes through.
+func (cred Credential) lowestOutputBuggy() *big.Int {
+ var lowest big.Int
+
h1 := cred.VrfOut
for i := uint64(0); i < cred.Weight; i++ {
var h crypto.Digest
@@ -184,6 +236,28 @@ func (cred Credential) lowestOutput() *big.Int {
return &lowest
}
+// LessBuggy is the buggy version of Less
+// TODO(upgrade): Please remove the entire LessBuggy function as soon as the corresponding protocol upgrade goes through
+func (cred Credential) LessBuggy(otherCred Credential) bool {
+ i1 := cred.lowestOutputBuggy()
+ i2 := otherCred.lowestOutputBuggy()
+
+ return i1.Cmp(i2) < 0
+}
+
+// LowestOutputDigest gives the lowestOutput as a crypto.Digest, which allows
+// pretty-printing a proposal's lowest output.
+// This function is only used for debugging.
+func (cred Credential) LowestOutputDigest() crypto.Digest {
+ lbytes := cred.lowestOutput().Bytes()
+ var out crypto.Digest
+ if len(lbytes) > len(out) {
+ panic("Cred lowest output too long")
+ }
+ copy(out[len(out) - len(lbytes):], lbytes)
+ return out
+}
+
func (cred hashableCredential) ToBeHashed() (protocol.HashID, []byte) {
return protocol.Credential, protocol.Encode(&cred)
}
diff --git a/data/datatest/impls.go b/data/datatest/impls.go
index 9c570a654a..1ccbeb9076 100644
--- a/data/datatest/impls.go
+++ b/data/datatest/impls.go
@@ -137,7 +137,7 @@ func (i ledgerImpl) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion,
}
// EnsureDigest implements Ledger.EnsureDigest.
-func (i ledgerImpl) EnsureDigest(cert agreement.Certificate, quit chan struct{}, verifier *agreement.AsyncVoteVerifier) {
+func (i ledgerImpl) EnsureDigest(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) {
r := cert.Round
consistencyCheck := func() bool {
if r < i.NextRound() {
@@ -158,14 +158,4 @@ func (i ledgerImpl) EnsureDigest(cert agreement.Certificate, quit chan struct{},
if consistencyCheck() {
return
}
-
- select {
- case <-quit:
- return
- case <-i.Wait(r):
- if !consistencyCheck() {
- err := fmt.Errorf("Wait channel fired without matching block in round %v", r)
- panic(err)
- }
- }
}
diff --git a/data/ledger.go b/data/ledger.go
index bbfefdc059..c3f1302503 100644
--- a/data/ledger.go
+++ b/data/ledger.go
@@ -286,6 +286,7 @@ func (l *Ledger) EnsureValidatedBlock(vb *ledger.ValidatedBlock, c agreement.Cer
// EnsureBlock ensures that the block, and associated certificate c, are
// written to the ledger, or that some other block for the same round is
// written to the ledger.
+// This function can be called concurrently.
func (l *Ledger) EnsureBlock(block *bookkeeping.Block, c agreement.Certificate) {
round := block.Round()
protocolErrorLogged := false
diff --git a/data/ledger_test.go b/data/ledger_test.go
index 76a3c902d1..4d79d7dd70 100644
--- a/data/ledger_test.go
+++ b/data/ledger_test.go
@@ -34,7 +34,6 @@ import (
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util/execpool"
)
func incaddr(user *basics.Address) {
@@ -70,9 +69,6 @@ func BenchmarkAssemblePayset(b *testing.B) {
secrets := make([]*crypto.SignatureSecrets, numUsers)
addresses := make([]basics.Address, numUsers)
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- defer backlogPool.Shutdown()
-
genesis := make(map[basics.Address]basics.AccountData)
for i := 0; i < numUsers; i++ {
secret := keypair()
@@ -164,7 +160,7 @@ func BenchmarkAssemblePayset(b *testing.B) {
}
b.StartTimer()
newEmptyBlk := bookkeeping.MakeBlock(prev)
- eval, err := l.StartEvaluator(newEmptyBlk.BlockHeader, tp, backlogPool)
+ eval, err := l.StartEvaluator(newEmptyBlk.BlockHeader)
if err != nil {
b.Errorf("could not make proposals at round %d: could not start evaluator: %v", next, err)
return
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index efc4c6bc95..e2c47513d0 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -451,16 +451,6 @@ func (pool *TransactionPool) OnNewBlock(block bookkeeping.Block, delta ledger.St
}
}
-// alwaysVerifiedPool implements ledger.VerifiedTxnCache and returns every
-// transaction as verified.
-type alwaysVerifiedPool struct {
- pool *TransactionPool
-}
-
-func (*alwaysVerifiedPool) Verified(txn transactions.SignedTxn, params verify.Params) bool {
- return true
-}
-
func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactions.SignedTxn) error {
r := pool.pendingBlockEvaluator.Round() + pool.numPendingWholeBlocks
for _, tx := range txgroup {
@@ -521,7 +511,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact
next := bookkeeping.MakeBlock(prev)
pool.numPendingWholeBlocks = 0
- pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, &alwaysVerifiedPool{pool}, nil)
+ pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader)
if err != nil {
logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: cannot start evaluator: %v", err)
return
diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go
index 757b14b0fe..9cbd86d10a 100644
--- a/data/pools/transactionPool_test.go
+++ b/data/pools/transactionPool_test.go
@@ -102,7 +102,7 @@ func newBlockEvaluator(t TestingT, l *ledger.Ledger) *ledger.BlockEvaluator {
require.NoError(t, err)
next := bookkeeping.MakeBlock(prev)
- eval, err := l.StartEvaluator(next.BlockHeader, &alwaysVerifiedPool{}, nil)
+ eval, err := l.StartEvaluator(next.BlockHeader)
require.NoError(t, err)
return eval
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index c0262a080a..dd995b7c4a 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -775,10 +775,14 @@ func typecheck(expected, got StackType) bool {
}
func filterFieldsForLineComment(fields []string) []string {
+ prevField := ""
for i, s := range fields {
if strings.HasPrefix(s, "//") {
- return fields[:i]
+ if prevField != "base64" && prevField != "b64" {
+ return fields[:i]
+ }
}
+ prevField = s
}
return fields
}
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 9daf98e579..f3975e94ff 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -240,6 +240,25 @@ bnz wat`
require.Nil(t, program)
}
+func TestAssembleBase64(t *testing.T) {
+ text := `byte base64 //GWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz+0=
+byte base64 avGWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz//=
+//
+//text
+==
+int 1 //sometext
+&& //somemoretext
+==
+byte b64 //GWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz+8=
+byte b64 avGWRM+yy3BCavBDXO/FYTNZ6o2Jai5edsMCBdDEz//=
+==
+||`
+ program, err := AssembleString(text)
+ require.NoError(t, err)
+ s := hex.EncodeToString(program)
+ require.Equal(t, "01200101260320fff19644cfb2cb70426af0435cefc5613359ea8d896a2e5e76c30205d0c4cfed206af19644cfb2cb70426af0435cefc5613359ea8d896a2e5e76c30205d0c4cfff20fff19644cfb2cb70426af0435cefc5613359ea8d896a2e5e76c30205d0c4cfef2829122210122a291211", s)
+}
+
func TestAssembleRejectUnkLabel(t *testing.T) {
text := `int 1
bnz nowhere`
diff --git a/data/transactions/msgp_gen.go b/data/transactions/msgp_gen.go
index 56e2bb1c86..b59a4327f6 100644
--- a/data/transactions/msgp_gen.go
+++ b/data/transactions/msgp_gen.go
@@ -1504,6 +1504,9 @@ func (z MinFeeError) MarshalMsg(b []byte) (o []byte, err error) {
func (_ MinFeeError) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(MinFeeError)
+ if !ok {
+ _, ok = (z).(*MinFeeError)
+ }
return ok
}
@@ -1722,6 +1725,9 @@ func (z Payset) MarshalMsg(b []byte) (o []byte, err error) {
func (_ Payset) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(Payset)
+ if !ok {
+ _, ok = (z).(*Payset)
+ }
return ok
}
diff --git a/data/transactions/payment_test.go b/data/transactions/payment_test.go
index 6bd3fd6249..ec62805d32 100644
--- a/data/transactions/payment_test.go
+++ b/data/transactions/payment_test.go
@@ -45,28 +45,28 @@ func TestAlgosEncoding(t *testing.T) {
var i uint64
a.Raw = 222233333
- err := protocol.Decode(protocol.Encode(a), &b)
+ err := protocol.Decode(protocol.Encode(&a), &b)
if err != nil {
panic(err)
}
require.Equal(t, a, b)
a.Raw = 12345678
- err = protocol.Decode(protocol.Encode(a), &i)
+ err = protocol.DecodeReflect(protocol.Encode(a), &i)
if err != nil {
panic(err)
}
require.Equal(t, a.Raw, i)
i = 87654321
- err = protocol.Decode(protocol.Encode(i), &a)
+ err = protocol.Decode(protocol.EncodeReflect(i), &a)
if err != nil {
panic(err)
}
require.Equal(t, a.Raw, i)
x := true
- err = protocol.Decode(protocol.Encode(x), &a)
+ err = protocol.Decode(protocol.EncodeReflect(x), &a)
if err == nil {
panic("decode of bool into MicroAlgos succeeded")
}
diff --git a/data/txHandler.go b/data/txHandler.go
index b2d7c2287c..9d27621840 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -113,7 +113,7 @@ func (handler *TxHandler) Stop() {
func reencode(stxns []transactions.SignedTxn) []byte {
var result [][]byte
for _, stxn := range stxns {
- result = append(result, protocol.Encode(stxn))
+ result = append(result, protocol.Encode(&stxn))
}
return bytes.Join(result, nil)
}
diff --git a/docker/build/Dockerfile-deploy b/docker/build/Dockerfile-deploy
index 69fdbd3145..356298eab8 100644
--- a/docker/build/Dockerfile-deploy
+++ b/docker/build/Dockerfile-deploy
@@ -8,7 +8,7 @@ ENV GOROOT=/usr/local/go \
GOPATH=$HOME/go
RUN mkdir -p $GOPATH/src/github.com/algorand
WORKDIR $GOPATH/src/github.com/algorand
-COPY ./go-algorand ./go-algorand/
+COPY . ./go-algorand/
ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \
BRANCH=${BRANCH} \
CHANNEL=${CHANNEL} \
diff --git a/docker/build/arm.Dockerfile b/docker/build/arm.Dockerfile
new file mode 100644
index 0000000000..bc2a79917e
--- /dev/null
+++ b/docker/build/arm.Dockerfile
@@ -0,0 +1,30 @@
+FROM arm32v6/golang:1.12-alpine
+RUN apk update && \
+ apk add make && \
+ apk add bash && \
+ apk add git && \
+ apk add python3 && \
+ apk add boost-dev && \
+ apk add expect && \
+ apk add jq && \
+ apk add autoconf && \
+ apk add --update alpine-sdk && \
+ apk add libtool && \
+ apk add automake && \
+ apk add fmt && \
+ apk add build-base && \
+ apk add musl-dev && \
+ apk add sqlite
+
+RUN apk add dpkg && \
+ wget http://deb.debian.org/debian/pool/main/s/shellcheck/shellcheck_0.5.0-3_armhf.deb && \
+ dpkg-deb -R shellcheck_0.5.0-3_armhf.deb shellcheck && \
+ cd shellcheck && \
+ mv usr/bin/shellcheck /usr/bin/
+COPY . $GOPATH/src/github.com/algorand/go-algorand
+WORKDIR $GOPATH/src/github.com/algorand/go-algorand
+ENV GCC_CONFIG="--with-arch=armv6"
+RUN make ci-deps && make clean
+RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \
+ mkdir -p $GOPATH/src/github.com/algorand/go-algorand
+CMD ["/bin/bash"]
diff --git a/docker/build/cicd.Dockerfile b/docker/build/cicd.Dockerfile
new file mode 100644
index 0000000000..d2c58ee3f2
--- /dev/null
+++ b/docker/build/cicd.Dockerfile
@@ -0,0 +1,29 @@
+ARG ARCH="amd64"
+
+FROM ${ARCH}/centos:7
+ENV GOLANG_VERSION 1.12
+ARG ARCH="amd64"
+RUN yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
+ yum update -y && \
+ yum install -y autoconf wget awscli git gnupg2 nfs-utils python36 sqlite3 boost-devel expect jq libtool gcc-c++ libstdc++-devel libstdc++-static rpmdevtools createrepo rpm-sign bzip2 which ShellCheck
+WORKDIR /root
+RUN wget https://dl.google.com/go/go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz \
+ && tar -xvf go${GOLANG_VERSION}.linux-${ARCH%v*}.tar.gz && \
+ mv go /usr/local
+ENV GOROOT=/usr/local/go \
+ GOPATH=$HOME/go
+RUN mkdir -p $GOPATH/src/github.com/algorand
+COPY . $GOPATH/src/github.com/algorand/go-algorand
+ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \
+ BRANCH=${BRANCH} \
+ CHANNEL=${CHANNEL} \
+ BUILDCHANNEL=${BUILDCHANNEL} \
+ DEFAULTNETWORK=${DEFAULTNETWORK} \
+ FULLVERSION=${FULLVERSION} \
+ PKG_ROOT=${PKG_ROOT}
+WORKDIR $GOPATH/src/github.com/algorand/go-algorand
+RUN make ci-deps && make clean
+RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \
+ mkdir -p $GOPATH/src/github.com/algorand/go-algorand
+RUN echo "vm.max_map_count = 262144" >> /etc/sysctl.conf
+CMD ["/bin/bash"]
diff --git a/gen/generate.go b/gen/generate.go
index c80186b151..f062a55541 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -21,7 +21,12 @@ import (
"io/ioutil"
"os"
"path/filepath"
+ "runtime"
"sort"
+ "sync"
+ "sync/atomic"
+
+ "github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/account"
@@ -51,7 +56,7 @@ type genesisAllocation struct {
}
// GenerateGenesisFiles generates the genesis.json file and wallet files for a give genesis configuration.
-func GenerateGenesisFiles(genesisData GenesisData, outDir string, verbose bool) error {
+func GenerateGenesisFiles(genesisData GenesisData, consensus config.ConsensusProtocols, outDir string, verbose bool) error {
err := os.Mkdir(outDir, os.ModeDir|os.FileMode(0777))
if err != nil && os.IsNotExist(err) {
return fmt.Errorf("couldn't make output directory '%s': %v", outDir, err.Error())
@@ -93,127 +98,183 @@ func GenerateGenesisFiles(genesisData GenesisData, outDir string, verbose bool)
genesisData.RewardsPool = defaultPoolAddr
}
- return generateGenesisFiles(outDir, proto, genesisData.NetworkName, genesisData.VersionModifier, allocation, genesisData.FirstPartKeyRound, genesisData.LastPartKeyRound, genesisData.PartKeyDilution, genesisData.FeeSink, genesisData.RewardsPool, genesisData.Comment, verbose)
+ consensusParams, ok := consensus[proto]
+ if !ok {
+ return fmt.Errorf("protocol %s not supported", proto)
+ }
+
+ return generateGenesisFiles(outDir, proto, consensusParams, genesisData.NetworkName, genesisData.VersionModifier, allocation, genesisData.FirstPartKeyRound, genesisData.LastPartKeyRound, genesisData.PartKeyDilution, genesisData.FeeSink, genesisData.RewardsPool, genesisData.Comment, verbose)
}
-func generateGenesisFiles(outDir string, proto protocol.ConsensusVersion, netName string, schemaVersionModifier string,
+func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, netName string, schemaVersionModifier string,
allocation []genesisAllocation, firstWalletValid uint64, lastWalletValid uint64, partKeyDilution uint64, feeSink, rewardsPool basics.Address, comment string, verbose bool) (err error) {
genesisAddrs := make(map[string]basics.Address)
records := make(map[string]basics.AccountData)
- params, ok := config.Consensus[proto]
- if !ok {
- return fmt.Errorf("protocol %s not supported", proto)
- }
if partKeyDilution == 0 {
- partKeyDilution = params.DefaultKeyDilution
+ partKeyDilution = protoParams.DefaultKeyDilution
}
// Sort account names alphabetically
sort.SliceStable(allocation, func(i, j int) bool {
return allocation[i].Name < allocation[j].Name
})
- rootKeyCreated := 0
- partKeyCreated := 0
-
- for _, wallet := range allocation {
- var root account.Root
- var part account.Participation
-
- wfilename := filepath.Join(outDir, config.RootKeyFilename(wallet.Name))
- pfilename := filepath.Join(outDir, config.PartKeyFilename(wallet.Name, firstWalletValid, lastWalletValid))
+ rootKeyCreated := int64(0)
+ partKeyCreated := int64(0)
+
+ pendingWallets := make(chan genesisAllocation, len(allocation))
+
+ concurrentWalletGenerators := runtime.NumCPU() * 2
+ errorsChannel := make(chan error, concurrentWalletGenerators)
+ verbosedOutput := make(chan string)
+ var creatingWalletsWaitGroup sync.WaitGroup
+ var writeMu deadlock.Mutex
+
+ createWallet := func() {
+ var err error
+ defer creatingWalletsWaitGroup.Done()
+ for {
+ var wallet genesisAllocation
+ select {
+ case wallet = <-pendingWallets:
+ default:
+ return
+ }
+ var root account.Root
+ var part account.Participation
- root, rootDB, rootkeyErr := loadRootKey(wfilename)
- if rootkeyErr != nil && !os.IsNotExist(rootkeyErr) {
- return rootkeyErr
- }
+ wfilename := filepath.Join(outDir, config.RootKeyFilename(wallet.Name))
+ pfilename := filepath.Join(outDir, config.PartKeyFilename(wallet.Name, firstWalletValid, lastWalletValid))
- part, partDB, partkeyErr := loadPartKeys(pfilename)
- if partkeyErr != nil && !os.IsNotExist(partkeyErr) && partkeyErr != account.ErrUnsupportedSchema {
- return partkeyErr
- }
+ root, rootDB, rootkeyErr := loadRootKey(wfilename)
+ if rootkeyErr != nil && !os.IsNotExist(rootkeyErr) {
+ errorsChannel <- rootkeyErr
+ return
+ }
- if rootkeyErr == nil && partkeyErr == nil {
- if verbose {
- fmt.Println("Reusing existing wallet:", wfilename, pfilename)
+ part, partDB, partkeyErr := loadPartKeys(pfilename)
+ if partkeyErr != nil && !os.IsNotExist(partkeyErr) && partkeyErr != account.ErrUnsupportedSchema {
+ errorsChannel <- partkeyErr
+ return
}
- } else {
- // At this point either rootKeys is valid or rootkeyErr != nil
- // Likewise, either partkey is valid or partkeyErr != nil
- if rootkeyErr != nil {
- os.Remove(wfilename)
-
- rootDB, err = db.MakeErasableAccessor(wfilename)
- if err != nil {
- err = fmt.Errorf("couldn't open root DB accessor %s: %v", wfilename, err)
- } else {
- root, err = account.GenerateRoot(rootDB)
+
+ if rootkeyErr == nil && partkeyErr == nil {
+ if verbose {
+ verbosedOutput <- fmt.Sprintln("Reusing existing wallet:", wfilename, pfilename)
}
- if err != nil {
+ } else {
+ // At this point either rootKeys is valid or rootkeyErr != nil
+ // Likewise, either partkey is valid or partkeyErr != nil
+ if rootkeyErr != nil {
os.Remove(wfilename)
- return
+
+ rootDB, err = db.MakeErasableAccessor(wfilename)
+ if err != nil {
+ err = fmt.Errorf("couldn't open root DB accessor %s: %v", wfilename, err)
+ } else {
+ root, err = account.GenerateRoot(rootDB)
+ }
+ if err != nil {
+ os.Remove(wfilename)
+ errorsChannel <- err
+ return
+ }
+ if verbose {
+ verbosedOutput <- fmt.Sprintf("Created new rootkey: %s", wfilename)
+ }
+ atomic.AddInt64(&rootKeyCreated, 1)
}
- if verbose {
- fmt.Printf("Created new rootkey: %s\n", wfilename)
+
+ if partkeyErr != nil && wallet.Online == basics.Online {
+ os.Remove(pfilename)
+
+ partDB, err = db.MakeErasableAccessor(pfilename)
+ if err != nil {
+ err = fmt.Errorf("couldn't open participation DB accessor %s: %v", pfilename, err)
+ os.Remove(pfilename)
+ errorsChannel <- err
+ return
+ }
+
+ part, err = account.FillDBWithParticipationKeys(partDB, root.Address(), basics.Round(firstWalletValid), basics.Round(lastWalletValid), partKeyDilution)
+ if err != nil {
+ err = fmt.Errorf("could not generate new participation file %s: %v", pfilename, err)
+ os.Remove(pfilename)
+ errorsChannel <- err
+ return
+ }
+ if verbose {
+ verbosedOutput <- fmt.Sprintf("Created new partkey: %s", pfilename)
+ }
+ atomic.AddInt64(&partKeyCreated, 1)
}
- rootKeyCreated++
}
- if partkeyErr != nil && wallet.Online == basics.Online {
- os.Remove(pfilename)
+ var data basics.AccountData
+ data.Status = wallet.Online
+ data.MicroAlgos.Raw = wallet.Stake
+ if wallet.Online == basics.Online {
+ data.VoteID = part.VotingSecrets().OneTimeSignatureVerifier
+ data.SelectionID = part.VRFSecrets().PK
+ data.VoteFirstValid = part.FirstValid
+ data.VoteLastValid = part.LastValid
+ data.VoteKeyDilution = part.KeyDilution
+ }
- partDB, err = db.MakeErasableAccessor(pfilename)
- if err != nil {
- err = fmt.Errorf("couldn't open participation DB accessor %s: %v", pfilename, err)
- os.Remove(pfilename)
- return
- }
+ writeMu.Lock()
+ records[wallet.Name] = data
- part, err = account.FillDBWithParticipationKeys(partDB, root.Address(), basics.Round(firstWalletValid), basics.Round(lastWalletValid), partKeyDilution)
- if err != nil {
- err = fmt.Errorf("could not generate new participation file %s: %v", pfilename, err)
- os.Remove(pfilename)
- return
- }
- if verbose {
- fmt.Printf("Created new partkey: %s\n", pfilename)
- }
- partKeyCreated++
+ genesisAddrs[wallet.Name] = root.Address()
+ writeMu.Unlock()
+
+ rootDB.Close()
+ if wallet.Online == basics.Online {
+ partDB.Close()
}
}
+ }
- var data basics.AccountData
- data.Status = wallet.Online
- data.MicroAlgos.Raw = wallet.Stake
- if wallet.Online == basics.Online {
- data.VoteID = part.VotingSecrets().OneTimeSignatureVerifier
- data.SelectionID = part.VRFSecrets().PK
- data.VoteFirstValid = part.FirstValid
- data.VoteLastValid = part.LastValid
- data.VoteKeyDilution = part.KeyDilution
- }
+ for _, wallet := range allocation {
+ pendingWallets <- wallet
+ }
+
+ if verbose {
+ // create a listener for the verbosedOutput
+ go func() {
+ for textOut := range verbosedOutput {
+ fmt.Printf("%s\n", textOut)
+ }
+ }()
+ }
- records[wallet.Name] = data
+ creatingWalletsWaitGroup.Add(concurrentWalletGenerators)
+ for routinesCounter := 0; routinesCounter < concurrentWalletGenerators; routinesCounter++ {
+ go createWallet()
+ }
- genesisAddrs[wallet.Name] = root.Address()
+ // wait until all goroutines are done.
+ creatingWalletsWaitGroup.Wait()
- rootDB.Close()
- if wallet.Online == basics.Online {
- partDB.Close()
- }
+ close(verbosedOutput)
+
+ // check to see if we had any errors.
+ select {
+ case err := <-errorsChannel:
+ return err
+ default:
}
genesisAddrs["FeeSink"] = feeSink
genesisAddrs["RewardsPool"] = rewardsPool
if verbose {
- fmt.Println(proto, config.Consensus[proto].MinBalance)
+ fmt.Println(protoVersion, protoParams.MinBalance)
}
records["FeeSink"] = basics.AccountData{
Status: basics.NotParticipating,
- MicroAlgos: basics.MicroAlgos{Raw: config.Consensus[proto].MinBalance},
+ MicroAlgos: basics.MicroAlgos{Raw: protoParams.MinBalance},
}
records["RewardsPool"] = basics.AccountData{
Status: basics.NotParticipating,
@@ -222,7 +283,7 @@ func generateGenesisFiles(outDir string, proto protocol.ConsensusVersion, netNam
sinkAcct := genesisAllocation{
Name: "FeeSink",
- Stake: config.Consensus[proto].MinBalance,
+ Stake: protoParams.MinBalance,
Online: basics.NotParticipating,
}
poolAcct := genesisAllocation{
@@ -238,7 +299,7 @@ func generateGenesisFiles(outDir string, proto protocol.ConsensusVersion, netNam
g := bookkeeping.Genesis{
SchemaID: schemaID + schemaVersionModifier,
- Proto: proto,
+ Proto: protoVersion,
Network: protocol.NetworkID(netName),
Timestamp: 0,
FeeSink: feeSink.String(),
diff --git a/gen/generate_test.go b/gen/generate_test.go
new file mode 100644
index 0000000000..47748a7448
--- /dev/null
+++ b/gen/generate_test.go
@@ -0,0 +1,102 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package gen
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "testing"
+
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/util/db"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestLoadMultiRootKeyConcurrent(t *testing.T) {
+ t.Skip() // skip in auto-test mode
+ a := require.New(t)
+ tempDir, err := ioutil.TempDir("", "loadkey-test-")
+ a.NoError(err)
+ defer os.RemoveAll(tempDir)
+
+ const numThreads = 100
+ var wg sync.WaitGroup
+ wg.Add(numThreads)
+
+ for i := 0; i < numThreads; i++ {
+ go func(idx int) {
+ defer wg.Done()
+ wallet := filepath.Join(tempDir, fmt.Sprintf("wallet%d", idx+1))
+ rootDB, err := db.MakeErasableAccessor(wallet)
+ defer rootDB.Close()
+ a.NoError(err)
+ _, err = account.GenerateRoot(rootDB)
+ a.NoError(err)
+ }(i)
+ }
+
+ wg.Wait()
+
+ for r := 0; r < 1000; r++ {
+ var wg sync.WaitGroup
+ wg.Add(numThreads)
+ for i := 0; i < numThreads; i++ {
+ go func(idx int) {
+ defer wg.Done()
+ wallet := filepath.Join(tempDir, fmt.Sprintf("wallet%d", idx+1))
+ _, db, err := loadRootKey(wallet)
+ a.NoError(err)
+ db.Close()
+ }(i)
+ }
+ wg.Wait()
+ }
+}
+
+func TestLoadSingleRootKeyConcurrent(t *testing.T) {
+ t.Skip() // skip in auto-test mode
+ a := require.New(t)
+ tempDir, err := ioutil.TempDir("", "loadkey-test-")
+ a.NoError(err)
+ defer os.RemoveAll(tempDir)
+
+ wallet := filepath.Join(tempDir, "wallet1")
+ rootDB, err := db.MakeErasableAccessor(wallet)
+ a.NoError(err)
+ _, err = account.GenerateRoot(rootDB)
+ rootDB.Close()
+ a.NoError(err)
+
+ const numThreads = 10000
+ var wg sync.WaitGroup
+ wg.Add(numThreads)
+
+ for i := 0; i < numThreads; i++ {
+ go func(idx int) {
+ defer wg.Done()
+ wallet := filepath.Join(tempDir, "wallet1")
+ _, db, err := loadRootKey(wallet)
+ a.NoError(err)
+ db.Close()
+ }(i)
+ }
+ wg.Wait()
+}
diff --git a/go.mod b/go.mod
index 84fb806f50..727a471038 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ go 1.12
require (
github.com/algorand/go-codec/codec v0.0.0-20190507210007-269d70b6135d
github.com/algorand/go-deadlock v0.0.0-20181221160745-78d8cb5e2759
- github.com/algorand/msgp v1.1.37
+ github.com/algorand/msgp v1.1.39
github.com/algorand/websocket v1.4.1
github.com/aws/aws-sdk-go v1.16.5
github.com/cpuguy83/go-md2man v1.0.8 // indirect
@@ -31,6 +31,7 @@ require (
github.com/mattn/go-colorable v0.0.9 // indirect
github.com/mattn/go-isatty v0.0.4 // indirect
github.com/mattn/go-sqlite3 v1.10.0
+ github.com/miekg/dns v1.1.27
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
github.com/olivere/elastic v6.2.14+incompatible
github.com/onsi/ginkgo v1.8.0 // indirect
diff --git a/go.sum b/go.sum
index 569f26213e..e41c9c54fb 100644
--- a/go.sum
+++ b/go.sum
@@ -4,12 +4,10 @@ github.com/algorand/go-codec/codec v0.0.0-20190507210007-269d70b6135d h1:W9MgGUo
github.com/algorand/go-codec/codec v0.0.0-20190507210007-269d70b6135d/go.mod h1:qm6LyXvDa1+uZJxaVg8X+OEjBqt/zDinDa2EohtTDxU=
github.com/algorand/go-deadlock v0.0.0-20181221160745-78d8cb5e2759 h1:IiCuOE1YCReVyEr1IQHKTBTvFLKdeBCfQuxrqhniq+I=
github.com/algorand/go-deadlock v0.0.0-20181221160745-78d8cb5e2759/go.mod h1:Kve3O9VpxZIHsPzpfxNdyFltFU9jBTeVYMYxSC99tdg=
-github.com/algorand/msgp v1.1.33 h1:v4WXJa2r9Z+hBcSTx7sLuzybU0zeGdWxgC67JI3xz3I=
-github.com/algorand/msgp v1.1.33/go.mod h1:LtOntbYiCHj/Sl/Sqxtf8CZOrDt2a8Dv3tLaS6mcnUE=
-github.com/algorand/msgp v1.1.34 h1:Now8/CFnsea11GYboThekL2SGFIOSoVeioU8mSSDKIc=
-github.com/algorand/msgp v1.1.34/go.mod h1:LtOntbYiCHj/Sl/Sqxtf8CZOrDt2a8Dv3tLaS6mcnUE=
-github.com/algorand/msgp v1.1.37 h1:RzEtCgliE4rRqzrHKI8Jy1RdoiDLYWBqstzB2RKVyH0=
-github.com/algorand/msgp v1.1.37/go.mod h1:LtOntbYiCHj/Sl/Sqxtf8CZOrDt2a8Dv3tLaS6mcnUE=
+github.com/algorand/msgp v1.1.38 h1:nR125Hsit9jn+acfkcq97w1fsxkt3z1JGOFtrgFx0UI=
+github.com/algorand/msgp v1.1.38/go.mod h1:LtOntbYiCHj/Sl/Sqxtf8CZOrDt2a8Dv3tLaS6mcnUE=
+github.com/algorand/msgp v1.1.39 h1:sVDmS0CH7hDtJHWwNqwy3wgu5DQ9EM3VBIXS3KaNlNI=
+github.com/algorand/msgp v1.1.39/go.mod h1:LtOntbYiCHj/Sl/Sqxtf8CZOrDt2a8Dv3tLaS6mcnUE=
github.com/algorand/websocket v1.4.1 h1:FPoNHI8i2VZWZzhCscY8JTzsAE7Vv73753cMbzb3udk=
github.com/algorand/websocket v1.4.1/go.mod h1:0nFSn+xppw/GZS9hgWPS3b8/4FcA3Pj7XQxm+wqHGx8=
github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc=
@@ -76,6 +74,8 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
+github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
github.com/olivere/elastic v6.2.14+incompatible h1:k+KadwNP/dkXE0/eu+T6otk1+5fe0tEpPyQJ4XVm5i8=
@@ -123,6 +123,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0O
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -135,6 +136,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c h1:+EXw7AwNOKzPFXMZ1yNjO40aW
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13 h1:tdsQdquKbTNMsSZLqnLELJGzCANp9oXhu6zFBW6ODx4=
golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -144,6 +146,7 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207224406-61798d64f025 h1:i84/3szN87uN9jFX/jRqUbszQto2oAsFlqPf6lbR8H4=
golang.org/x/tools v0.0.0-20200207224406-61798d64f025/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/installer/config.json.example b/installer/config.json.example
index c0b5eef526..a710ab850c 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 5,
+ "Version": 6,
"AnnounceParticipationKey": true,
"Archival": false,
"BaseLoggerDebugLevel": 4,
@@ -9,6 +9,7 @@
"CatchupParallelBlocks": 16,
"ConnectionsRateLimitingCount": 60,
"ConnectionsRateLimitingWindowSeconds": 1,
+ "DisableOutgoingConnectionThrottling": false,
"DeadlockDetection": 0,
"DNSBootstrapID": ".algorand.network",
"EnableIncomingMessageFilter": false,
@@ -33,6 +34,7 @@
"NodeExporterPath": "./node_exporter",
"OutgoingMessageFilterBucketCount": 3,
"OutgoingMessageFilterBucketSize": 128,
+ "PeerConnectionsUpdateInterval": 3600,
"PriorityPeers": {},
"ReconnectTime": 60000000000,
"ReservedFDs": 256,
@@ -47,5 +49,7 @@
"TxSyncIntervalSeconds": 60,
"TxSyncServeResponseSize": 1000000,
"TxSyncTimeoutSeconds": 30,
- "PeerConnectionsUpdateInterval": 3600
+ "PeerConnectionsUpdateInterval": 3600,
+ "DNSSecurityFlags": 1,
+ "EnablePingHandler": true
}
diff --git a/installer/system.json b/installer/system.json
index 8985824659..0e0ea1cb09 100644
--- a/installer/system.json
+++ b/installer/system.json
@@ -1 +1,4 @@
-{"shared_server":true}
+{
+ "shared_server":true,
+ "systemd_managed": true
+}
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index db6babcda0..ed8c3add92 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -160,6 +160,7 @@ func TestAccountDBInit(t *testing.T) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
dbs := dbOpenTest(t)
+ setDbLogging(t, dbs)
defer dbs.close()
tx, err := dbs.wdb.Handle.Begin()
@@ -180,6 +181,7 @@ func TestAccountDBRound(t *testing.T) {
proto := config.Consensus[protocol.ConsensusCurrentVersion]
dbs := dbOpenTest(t)
+ setDbLogging(t, dbs)
defer dbs.close()
tx, err := dbs.wdb.Handle.Begin()
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 05440629ed..23a624558f 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -161,12 +161,12 @@ func (au *accountUpdates) loadFromDisk(l ledgerForTracker) error {
for loaded < latest {
next := loaded + 1
- blk, aux, err := l.blockAux(next)
+ blk, err := l.Block(next)
if err != nil {
return err
}
- delta, err := l.trackerEvalVerified(blk, aux)
+ delta, err := l.trackerEvalVerified(blk)
if err != nil {
return err
}
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index d218526bbc..ad2bc1741e 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -34,11 +34,15 @@ import (
type mockLedgerForTracker struct {
dbs dbPair
blocks []blockEntry
+ log logging.Logger
}
func makeMockLedgerForTracker(t *testing.T) *mockLedgerForTracker {
dbs := dbOpenTest(t)
- return &mockLedgerForTracker{dbs: dbs}
+ dblogger := logging.TestingLog(t)
+ dbs.rdb.SetLogger(dblogger)
+ dbs.wdb.SetLogger(dblogger)
+ return &mockLedgerForTracker{dbs: dbs, log: dblogger}
}
func (ml *mockLedgerForTracker) close() {
@@ -49,7 +53,7 @@ func (ml *mockLedgerForTracker) Latest() basics.Round {
return basics.Round(len(ml.blocks)) - 1
}
-func (ml *mockLedgerForTracker) trackerEvalVerified(blk bookkeeping.Block, aux evalAux) (StateDelta, error) {
+func (ml *mockLedgerForTracker) trackerEvalVerified(blk bookkeeping.Block) (StateDelta, error) {
delta := StateDelta{
hdr: &bookkeeping.BlockHeader{},
}
@@ -72,20 +76,12 @@ func (ml *mockLedgerForTracker) BlockHdr(rnd basics.Round) (bookkeeping.BlockHea
return ml.blocks[int(rnd)].block.BlockHeader, nil
}
-func (ml *mockLedgerForTracker) blockAux(rnd basics.Round) (bookkeeping.Block, evalAux, error) {
- if rnd > ml.Latest() {
- return bookkeeping.Block{}, evalAux{}, fmt.Errorf("rnd %d out of bounds", rnd)
- }
-
- return ml.blocks[int(rnd)].block, ml.blocks[int(rnd)].aux, nil
-}
-
func (ml *mockLedgerForTracker) trackerDB() dbPair {
return ml.dbs
}
func (ml *mockLedgerForTracker) trackerLog() logging.Logger {
- return logging.Base()
+ return ml.log
}
func checkAcctUpdates(t *testing.T, au *accountUpdates, base basics.Round, latestRnd basics.Round, accts []map[basics.Address]basics.AccountData, rewards []uint64, proto config.ConsensusParams) {
diff --git a/ledger/archival_test.go b/ledger/archival_test.go
index 853450e996..b7ad33b635 100644
--- a/ledger/archival_test.go
+++ b/ledger/archival_test.go
@@ -59,13 +59,8 @@ func (wl *wrappedLedger) BlockHdr(rnd basics.Round) (bookkeeping.BlockHeader, er
return wl.l.BlockHdr(rnd)
}
-func (wl *wrappedLedger) blockAux(rnd basics.Round) (bookkeeping.Block, evalAux, error) {
- wl.recordBlockQuery(rnd)
- return wl.l.blockAux(rnd)
-}
-
-func (wl *wrappedLedger) trackerEvalVerified(blk bookkeeping.Block, aux evalAux) (StateDelta, error) {
- return wl.l.trackerEvalVerified(blk, aux)
+func (wl *wrappedLedger) trackerEvalVerified(blk bookkeeping.Block) (StateDelta, error) {
+ return wl.l.trackerEvalVerified(blk)
}
func (wl *wrappedLedger) Latest() basics.Round {
@@ -109,7 +104,8 @@ func TestArchival(t *testing.T) {
genesisInitState := getInitState()
const inMem = true
const archival = true
- l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, archival)
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, dbName, inMem, genesisInitState, archival)
require.NoError(t, err)
defer l.Close()
wl := &wrappedLedger{
@@ -500,7 +496,8 @@ func TestArchivalFromNonArchival(t *testing.T) {
const inMem = false // use persistent storage
archival := false
- l, err := OpenLedger(logging.Base(), dbPrefix, inMem, genesisInitState, archival)
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, dbPrefix, inMem, genesisInitState, archival)
require.NoError(t, err)
blk := genesisInitState.Block
@@ -529,7 +526,7 @@ func TestArchivalFromNonArchival(t *testing.T) {
l.Close()
archival = true
- l, err = OpenLedger(logging.Base(), dbPrefix, inMem, genesisInitState, archival)
+ l, err = OpenLedger(log, dbPrefix, inMem, genesisInitState, archival)
require.NoError(t, err)
defer l.Close()
diff --git a/ledger/blockdb.go b/ledger/blockdb.go
index aabaf2b3e6..73e4abcada 100644
--- a/ledger/blockdb.go
+++ b/ledger/blockdb.go
@@ -28,14 +28,14 @@ import (
"github.com/algorand/go-algorand/protocol"
)
+// 2019-12-15: removed column 'auxdata blob' from 'CREATE TABLE' statement. It was not explicitly removed from databases and may continue to exist with empty entries in some old databases.
var blockSchema = []string{
`CREATE TABLE IF NOT EXISTS blocks (
rnd integer primary key,
proto text,
hdrdata blob,
blkdata blob,
- certdata blob,
- auxdata blob)`,
+ certdata blob)`,
}
var blockResetExprs = []string{
@@ -46,7 +46,7 @@ func blockInit(tx *sql.Tx, initBlocks []bookkeeping.Block) error {
for _, tableCreate := range blockSchema {
_, err := tx.Exec(tableCreate)
if err != nil {
- return err
+ return fmt.Errorf("blockdb blockInit could not create table %v", err)
}
}
@@ -57,7 +57,7 @@ func blockInit(tx *sql.Tx, initBlocks []bookkeeping.Block) error {
if next == 0 {
for _, blk := range initBlocks {
- err = blockPut(tx, blk, agreement.Certificate{}, evalAux{})
+ err = blockPut(tx, blk, agreement.Certificate{})
if err != nil {
serr, ok := err.(sqlite3.Error)
if ok && serr.Code == sqlite3.ErrConstraint {
@@ -141,27 +141,7 @@ func blockGetCert(tx *sql.Tx, rnd basics.Round) (blk bookkeeping.Block, cert agr
return
}
-func blockGetAux(tx *sql.Tx, rnd basics.Round) (blk bookkeeping.Block, aux evalAux, err error) {
- var blkbuf []byte
- var auxbuf []byte
- err = tx.QueryRow("SELECT blkdata, auxdata FROM blocks WHERE rnd=?", rnd).Scan(&blkbuf, &auxbuf)
- if err != nil {
- if err == sql.ErrNoRows {
- err = ErrNoEntry{Round: rnd}
- }
-
- return
- }
-
- err = protocol.Decode(blkbuf, &blk)
- if err != nil {
- return
- }
-
- return
-}
-
-func blockPut(tx *sql.Tx, blk bookkeeping.Block, cert agreement.Certificate, aux evalAux) error {
+func blockPut(tx *sql.Tx, blk bookkeeping.Block, cert agreement.Certificate) error {
var max sql.NullInt64
err := tx.QueryRow("SELECT MAX(rnd) FROM blocks").Scan(&max)
if err != nil {
@@ -180,12 +160,13 @@ func blockPut(tx *sql.Tx, blk bookkeeping.Block, cert agreement.Certificate, aux
}
}
- _, err = tx.Exec("INSERT INTO blocks (rnd, proto, hdrdata, blkdata, certdata, auxdata) VALUES (?, ?, ?, ?, ?, ?)",
- blk.Round(), blk.CurrentProtocol,
- protocol.Encode(blk.BlockHeader),
- protocol.Encode(blk),
- protocol.Encode(cert),
- protocol.Encode(aux))
+ _, err = tx.Exec("INSERT INTO blocks (rnd, proto, hdrdata, blkdata, certdata) VALUES (?, ?, ?, ?, ?)",
+ blk.Round(),
+ blk.CurrentProtocol,
+ protocol.Encode(&blk.BlockHeader),
+ protocol.Encode(&blk),
+ protocol.Encode(&cert),
+ )
return err
}
diff --git a/ledger/blockdb_test.go b/ledger/blockdb_test.go
index d57a3ad558..3a3550cead 100644
--- a/ledger/blockdb_test.go
+++ b/ledger/blockdb_test.go
@@ -26,13 +26,13 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
func randomBlock(r basics.Round) blockEntry {
b := bookkeeping.Block{}
c := agreement.Certificate{}
- a := evalAux{}
b.BlockHeader.Round = r
b.BlockHeader.TimeStamp = int64(crypto.RandUint64())
@@ -43,7 +43,6 @@ func randomBlock(r basics.Round) blockEntry {
return blockEntry{
block: b,
cert: c,
- aux: a,
}
}
@@ -52,7 +51,6 @@ func randomInitChain(proto protocol.ConsensusVersion, nblock int) []blockEntry {
for i := 0; i < nblock; i++ {
blkent := randomBlock(basics.Round(i))
blkent.cert = agreement.Certificate{}
- blkent.aux = evalAux{}
blkent.block.CurrentProtocol = proto
res = append(res, blkent)
}
@@ -97,19 +95,21 @@ func checkBlockDB(t *testing.T, tx *sql.Tx, blocks []blockEntry) {
require.NoError(t, err)
require.Equal(t, blk, blocks[rnd].block)
require.Equal(t, cert, blocks[rnd].cert)
-
- blk, aux, err := blockGetAux(tx, rnd)
- require.NoError(t, err)
- require.Equal(t, blk, blocks[rnd].block)
- require.Equal(t, aux, blocks[rnd].aux)
}
_, err = blockGet(tx, basics.Round(len(blocks)))
require.Error(t, err)
}
+func setDbLogging(t *testing.T, dbs dbPair) {
+ dblogger := logging.TestingLog(t)
+ dbs.rdb.SetLogger(dblogger)
+ dbs.wdb.SetLogger(dblogger)
+}
+
func TestBlockDBEmpty(t *testing.T) {
dbs := dbOpenTest(t)
+ setDbLogging(t, dbs)
defer dbs.close()
tx, err := dbs.wdb.Handle.Begin()
@@ -123,6 +123,7 @@ func TestBlockDBEmpty(t *testing.T) {
func TestBlockDBInit(t *testing.T) {
dbs := dbOpenTest(t)
+ setDbLogging(t, dbs)
defer dbs.close()
tx, err := dbs.wdb.Handle.Begin()
@@ -142,6 +143,7 @@ func TestBlockDBInit(t *testing.T) {
func TestBlockDBAppend(t *testing.T) {
dbs := dbOpenTest(t)
+ setDbLogging(t, dbs)
defer dbs.close()
tx, err := dbs.wdb.Handle.Begin()
@@ -156,7 +158,7 @@ func TestBlockDBAppend(t *testing.T) {
for i := 0; i < 10; i++ {
blkent := randomBlock(basics.Round(len(blocks)))
- err = blockPut(tx, blkent.block, blkent.cert, blkent.aux)
+ err = blockPut(tx, blkent.block, blkent.cert)
require.NoError(t, err)
blocks = append(blocks, blkent)
diff --git a/ledger/blockqueue.go b/ledger/blockqueue.go
index a76b669661..e1b34cede2 100644
--- a/ledger/blockqueue.go
+++ b/ledger/blockqueue.go
@@ -33,7 +33,6 @@ import (
type blockEntry struct {
block bookkeeping.Block
cert agreement.Certificate
- aux evalAux
}
type blockQueue struct {
@@ -92,7 +91,7 @@ func (bq *blockQueue) syncer() {
err := bq.l.blockDBs.wdb.Atomic(func(tx *sql.Tx) error {
for _, e := range workQ {
- err0 := blockPut(tx, e.block, e.cert, e.aux)
+ err0 := blockPut(tx, e.block, e.cert)
if err0 != nil {
return err0
}
@@ -156,7 +155,7 @@ func (bq *blockQueue) latestCommitted() basics.Round {
return bq.lastCommitted
}
-func (bq *blockQueue) putBlock(blk bookkeeping.Block, cert agreement.Certificate, aux evalAux) error {
+func (bq *blockQueue) putBlock(blk bookkeeping.Block, cert agreement.Certificate) error {
bq.mu.Lock()
defer bq.mu.Unlock()
@@ -183,7 +182,6 @@ func (bq *blockQueue) putBlock(blk bookkeeping.Block, cert agreement.Certificate
bq.q = append(bq.q, blockEntry{
block: blk,
cert: cert,
- aux: aux,
})
bq.cond.Broadcast()
return nil
@@ -304,22 +302,3 @@ func (bq *blockQueue) getBlockCert(r basics.Round) (blk bookkeeping.Block, cert
err = updateErrNoEntry(err, lastCommitted, latest)
return
}
-
-func (bq *blockQueue) getBlockAux(r basics.Round) (blk bookkeeping.Block, aux evalAux, err error) {
- e, lastCommitted, latest, err := bq.checkEntry(r)
- if e != nil {
- return e.block, e.aux, nil
- }
-
- if err != nil {
- return
- }
-
- err = bq.l.blockDBs.rdb.Atomic(func(tx *sql.Tx) error {
- var err0 error
- blk, aux, err0 = blockGetAux(tx, r)
- return err0
- })
- err = updateErrNoEntry(err, lastCommitted, latest)
- return
-}
diff --git a/ledger/eval.go b/ledger/eval.go
index 05de0d5bc6..b98465e651 100644
--- a/ledger/eval.go
+++ b/ledger/eval.go
@@ -36,11 +36,6 @@ import (
// ErrNoSpace indicates insufficient space for transaction in block
var ErrNoSpace = errors.New("block does not have space for transaction")
-// evalAux is left after removing explicit reward claims,
-// in case we need this infrastructure in the future.
-type evalAux struct {
-}
-
// VerifiedTxnCache captures the interface for a cache of previously
// verified transactions. This is expected to match the transaction
// pool object.
@@ -159,10 +154,8 @@ func (cs *roundCowState) ConsensusParams() config.ConsensusParams {
// against the ledger.
type BlockEvaluator struct {
state *roundCowState
- aux *evalAux
validate bool
generate bool
- txcache VerifiedTxnCache
prevHeader bookkeeping.BlockHeader // cached
proto config.ConsensusParams
@@ -171,8 +164,6 @@ type BlockEvaluator struct {
block bookkeeping.Block
blockTxBytes int
- verificationPool execpool.BacklogPool
-
l ledgerForEvaluator
}
@@ -189,20 +180,16 @@ type ledgerForEvaluator interface {
// StartEvaluator creates a BlockEvaluator, given a ledger and a block header
// of the block that the caller is planning to evaluate.
-func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, txcache VerifiedTxnCache, executionPool execpool.BacklogPool) (*BlockEvaluator, error) {
- return startEvaluator(l, hdr, nil, true, true, txcache, executionPool)
+func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader) (*BlockEvaluator, error) {
+ return startEvaluator(l, hdr, true, true)
}
-func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, aux *evalAux, validate bool, generate bool, txcache VerifiedTxnCache, executionPool execpool.BacklogPool) (*BlockEvaluator, error) {
+func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, validate bool, generate bool) (*BlockEvaluator, error) {
proto, ok := config.Consensus[hdr.CurrentProtocol]
if !ok {
return nil, protocol.Error(hdr.CurrentProtocol)
}
- if aux == nil {
- aux = &evalAux{}
- }
-
base := &roundCowBase{
l: l,
// round that lookups come from is previous block. We validate
@@ -214,15 +201,12 @@ func startEvaluator(l ledgerForEvaluator, hdr bookkeeping.BlockHeader, aux *eval
}
eval := &BlockEvaluator{
- aux: aux,
- validate: validate,
- generate: generate,
- txcache: txcache,
- block: bookkeeping.Block{BlockHeader: hdr},
- proto: proto,
- genesisHash: l.GenesisHash(),
- verificationPool: executionPool,
- l: l,
+ validate: validate,
+ generate: generate,
+ block: bookkeeping.Block{BlockHeader: hdr},
+ proto: proto,
+ genesisHash: l.GenesisHash(),
+ l: l,
}
if hdr.Round > 0 {
@@ -404,11 +388,6 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx
// on a single transaction, but does not actually add the transaction to the block
// evaluator, or modify the block evaluator state in any other visible way.
func (eval *BlockEvaluator) testTransaction(txn transactions.SignedTxn, cow *roundCowState) error {
- // Verify that groups are supported.
- if !txn.Txn.Group.IsZero() && !eval.proto.SupportTxGroups {
- return fmt.Errorf("transaction groups not supported")
- }
-
// Transaction valid (not expired)?
err := txn.Txn.Alive(eval.block)
if err != nil {
@@ -478,17 +457,10 @@ func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWit
cow := eval.state.child()
- groupNoAD := make([]transactions.SignedTxn, len(txgroup))
- for i := range txgroup {
- groupNoAD[i] = txgroup[i].SignedTxn
- }
-
- ctxs := verify.PrepareContexts(groupNoAD, eval.block.BlockHeader)
-
for gi, txad := range txgroup {
var txib transactions.SignedTxnInBlock
- err := eval.transaction(txad.SignedTxn, txad.ApplyData, groupNoAD, gi, ctxs[gi], cow, &txib)
+ err := eval.transaction(txad.SignedTxn, txad.ApplyData, cow, &txib)
if err != nil {
return err
}
@@ -538,16 +510,10 @@ func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWit
// transaction tentatively executes a new transaction as part of this block evaluation.
// If the transaction cannot be added to the block without violating some constraints,
// an error is returned and the block evaluator state is unchanged.
-func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, ad transactions.ApplyData, txgroup []transactions.SignedTxn, groupIndex int, ctx verify.Context, cow *roundCowState, txib *transactions.SignedTxnInBlock) error {
+func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, ad transactions.ApplyData, cow *roundCowState, txib *transactions.SignedTxnInBlock) error {
var err error
- spec := transactions.SpecialAddresses{
- FeeSink: eval.block.BlockHeader.FeeSink,
- RewardsPool: eval.block.BlockHeader.RewardsPool,
- }
-
if eval.validate {
- // Transaction valid (not expired)?
err = txn.Txn.Alive(eval.block)
if err != nil {
return err
@@ -562,24 +528,11 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, ad transacti
if dup {
return TransactionInLedgerError{txn.ID()}
}
+ }
- // Well-formed on its own?
- err = txn.Txn.WellFormed(spec, eval.proto)
- if err != nil {
- return fmt.Errorf("transaction %v: malformed: %v", txn.ID(), err)
- }
-
- if eval.txcache == nil || !eval.txcache.Verified(txn, ctx.Params) {
- err = verify.TxnPool(&txn, ctx, eval.verificationPool)
- if err != nil {
- return fmt.Errorf("transaction %v: failed to verify: %v", txn.ID(), err)
- }
- }
-
- // Verify that groups are supported.
- if !txn.Txn.Group.IsZero() && !eval.proto.SupportTxGroups {
- return fmt.Errorf("transaction groups not supported")
- }
+ spec := transactions.SpecialAddresses{
+ FeeSink: eval.block.BlockHeader.FeeSink,
+ RewardsPool: eval.block.BlockHeader.RewardsPool,
}
// Apply the transaction, updating the cow balances
@@ -702,53 +655,145 @@ func (eval *BlockEvaluator) GenerateBlock() (*ValidatedBlock, error) {
vb := ValidatedBlock{
blk: eval.block,
delta: eval.state.mods,
- aux: *eval.aux,
}
return &vb, nil
}
-func (l *Ledger) eval(ctx context.Context, blk bookkeeping.Block, aux *evalAux, validate bool, txcache VerifiedTxnCache, executionPool execpool.BacklogPool) (StateDelta, evalAux, error) {
- eval, err := startEvaluator(l, blk.BlockHeader, aux, validate, false, txcache, executionPool)
+type evalTxValidator struct {
+ txcache VerifiedTxnCache
+ block bookkeeping.Block
+ proto config.ConsensusParams
+ verificationPool execpool.BacklogPool
+
+ ctx context.Context
+ cf context.CancelFunc
+ txgroups chan []transactions.SignedTxnWithAD
+ done chan error
+}
+
+func (validator *evalTxValidator) run() {
+ for txgroup := range validator.txgroups {
+ select {
+ case <-validator.ctx.Done():
+ validator.done <- validator.ctx.Err()
+ validator.cf()
+ close(validator.done)
+ return
+ default:
+ }
+ groupNoAD := make([]transactions.SignedTxn, len(txgroup))
+ for i := range txgroup {
+ groupNoAD[i] = txgroup[i].SignedTxn
+ }
+ ctxs := verify.PrepareContexts(groupNoAD, validator.block.BlockHeader)
+
+ for gi, tx := range txgroup {
+ err := validateTransaction(tx.SignedTxn, validator.block, validator.proto, validator.txcache, ctxs[gi], validator.verificationPool)
+ if err != nil {
+ validator.done <- err
+ validator.cf()
+ close(validator.done)
+ return
+ }
+ }
+ }
+ close(validator.done)
+}
+
+func validateTransaction(txn transactions.SignedTxn, block bookkeeping.Block, proto config.ConsensusParams, txcache VerifiedTxnCache, ctx verify.Context, verificationPool execpool.BacklogPool) error {
+ // Transaction valid (not expired)?
+ err := txn.Txn.Alive(block)
if err != nil {
- return StateDelta{}, evalAux{}, err
+ return err
}
- // TODO: batch tx sig verification: ingest blk.Payset and output a list of ValidatedTx
+ if txcache == nil || !txcache.Verified(txn, ctx.Params) {
+ err = verify.TxnPool(&txn, ctx, verificationPool)
+ if err != nil {
+ return fmt.Errorf("transaction %v: failed to verify: %v", txn.ID(), err)
+ }
+ }
+ return nil
+}
+
+// used by Ledger.Validate() Ledger.AddBlock() Ledger.trackerEvalVerified()(accountUpdates.loadFromDisk())
+//
+// Validate: eval(ctx, blk, true, txcache, executionPool)
+// AddBlock: eval(context.Background(), blk, false, nil, nil)
+// tracker: eval(context.Background(), blk, false, nil, nil)
+func (l *Ledger) eval(ctx context.Context, blk bookkeeping.Block, validate bool, txcache VerifiedTxnCache, executionPool execpool.BacklogPool) (StateDelta, error) {
+ eval, err := startEvaluator(l, blk.BlockHeader, validate, false)
+ if err != nil {
+ return StateDelta{}, err
+ }
// Next, transactions
paysetgroups, err := blk.DecodePaysetGroups()
if err != nil {
- return StateDelta{}, evalAux{}, err
+ return StateDelta{}, err
+ }
+
+ var txvalidator evalTxValidator
+ ctx, cf := context.WithCancel(ctx)
+ defer cf()
+ if validate {
+ proto, ok := config.Consensus[blk.CurrentProtocol]
+ if !ok {
+ return StateDelta{}, protocol.Error(blk.CurrentProtocol)
+ }
+ txvalidator.txcache = txcache
+ txvalidator.block = blk
+ txvalidator.proto = proto
+ txvalidator.verificationPool = executionPool
+
+ txvalidator.ctx = ctx
+ txvalidator.cf = cf
+ txvalidator.txgroups = make(chan []transactions.SignedTxnWithAD, len(paysetgroups))
+ txvalidator.done = make(chan error, 1)
+ go txvalidator.run()
}
for _, txgroup := range paysetgroups {
select {
case <-ctx.Done():
- return StateDelta{}, evalAux{}, ctx.Err()
+ select {
+ case err := <-txvalidator.done:
+ return StateDelta{}, err
+ default:
+ }
+ return StateDelta{}, ctx.Err()
default:
}
+ if validate {
+ txvalidator.txgroups <- txgroup
+ }
err = eval.TransactionGroup(txgroup)
if err != nil {
- return StateDelta{}, evalAux{}, err
+ return StateDelta{}, err
}
}
// Finally, procees any pending end-of-block state changes
err = eval.endOfBlock()
if err != nil {
- return StateDelta{}, evalAux{}, err
+ return StateDelta{}, err
}
// If validating, do final block checks that depend on our new state
if validate {
+ close(txvalidator.txgroups)
+ err, gotErr := <-txvalidator.done
+ if gotErr && err != nil {
+ return StateDelta{}, err
+ }
err = eval.finalValidation()
if err != nil {
- return StateDelta{}, evalAux{}, err
+ return StateDelta{}, err
}
}
- return eval.state.mods, *eval.aux, nil
+ return eval.state.mods, nil
}
// Validate uses the ledger to validate block blk as a candidate next block.
@@ -756,7 +801,7 @@ func (l *Ledger) eval(ctx context.Context, blk bookkeeping.Block, aux *evalAux,
// not a valid block (e.g., it has duplicate transactions, overspends some
// account, etc).
func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, txcache VerifiedTxnCache, executionPool execpool.BacklogPool) (*ValidatedBlock, error) {
- delta, aux, err := l.eval(ctx, blk, nil, true, txcache, executionPool)
+ delta, err := l.eval(ctx, blk, true, txcache, executionPool)
if err != nil {
return nil, err
}
@@ -764,7 +809,6 @@ func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, txcache Ve
vb := ValidatedBlock{
blk: blk,
delta: delta,
- aux: aux,
}
return &vb, nil
}
@@ -775,7 +819,6 @@ func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, txcache Ve
type ValidatedBlock struct {
blk bookkeeping.Block
delta StateDelta
- aux evalAux
}
// Block returns the underlying Block for a ValidatedBlock.
@@ -791,6 +834,5 @@ func (vb ValidatedBlock) WithSeed(s committee.Seed) ValidatedBlock {
return ValidatedBlock{
blk: newblock,
delta: vb.delta,
- aux: vb.aux,
}
}
diff --git a/ledger/eval_test.go b/ledger/eval_test.go
index 310f77e361..c5a934b419 100644
--- a/ledger/eval_test.go
+++ b/ledger/eval_test.go
@@ -30,7 +30,6 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util/execpool"
)
var testPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
@@ -45,9 +44,6 @@ func init() {
func TestBlockEvaluator(t *testing.T) {
genesisInitState, addrs, keys := genesis(10)
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- defer backlogPool.Shutdown()
-
dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64())
const inMem = true
const archival = true
@@ -56,7 +52,7 @@ func TestBlockEvaluator(t *testing.T) {
defer l.Close()
newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader)
- eval, err := l.StartEvaluator(newBlock.BlockHeader, nil, backlogPool)
+ eval, err := l.StartEvaluator(newBlock.BlockHeader)
require.NoError(t, err)
genHash := genesisInitState.Block.BlockHeader.GenesisHash
@@ -75,20 +71,8 @@ func TestBlockEvaluator(t *testing.T) {
},
}
- // Zero signature should fail
- st := transactions.SignedTxn{
- Txn: txn,
- }
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
- // Random signature should fail
- crypto.RandBytes(st.Sig[:])
- err = eval.Transaction(st, transactions.ApplyData{})
- require.Error(t, err)
-
// Correct signature should work
- st = txn.Sign(keys[0])
+ st := txn.Sign(keys[0])
err = eval.Transaction(st, transactions.ApplyData{})
require.NoError(t, err)
diff --git a/ledger/ledger.go b/ledger/ledger.go
index befee4f838..cb8a90c8e3 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -99,18 +99,25 @@ func OpenLedger(
l.trackerDBs, l.blockDBs, err = openLedgerDB(dbPathPrefix, dbMem)
if err != nil {
+ err = fmt.Errorf("OpenLedger.openLedgerDB %v", err)
return nil, err
}
+ l.trackerDBs.rdb.SetLogger(log)
+ l.trackerDBs.wdb.SetLogger(log)
+ l.blockDBs.rdb.SetLogger(log)
+ l.blockDBs.wdb.SetLogger(log)
err = l.blockDBs.wdb.Atomic(func(tx *sql.Tx) error {
return initBlocksDB(tx, l, []bookkeeping.Block{genesisInitState.Block}, isArchival)
})
if err != nil {
+ err = fmt.Errorf("OpenLedger.initBlocksDB %v", err)
return nil, err
}
l.blockQ, err = bqInit(l)
if err != nil {
+ err = fmt.Errorf("OpenLedger.bqInit %v", err)
return nil, err
}
@@ -132,6 +139,7 @@ func OpenLedger(
err = l.trackers.loadFromDisk(l)
if err != nil {
+ err = fmt.Errorf("OpenLedger.loadFromDisk %v", err)
return nil, err
}
@@ -207,6 +215,7 @@ func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs dbPair, blockDBs
func initBlocksDB(tx *sql.Tx, l *Ledger, initBlocks []bookkeeping.Block, isArchival bool) (err error) {
err = blockInit(tx, initBlocks)
if err != nil {
+ err = fmt.Errorf("initBlocksDB.blockInit %v", err)
return err
}
@@ -214,6 +223,7 @@ func initBlocksDB(tx *sql.Tx, l *Ledger, initBlocks []bookkeeping.Block, isArchi
if isArchival {
earliest, err := blockEarliest(tx)
if err != nil {
+ err = fmt.Errorf("initBlocksDB.blockEarliest %v", err)
return err
}
@@ -223,10 +233,12 @@ func initBlocksDB(tx *sql.Tx, l *Ledger, initBlocks []bookkeeping.Block, isArchi
l.log.Warnf("resetting blocks DB (earliest block is %v)", earliest)
err := blockResetDB(tx)
if err != nil {
+ err = fmt.Errorf("initBlocksDB.blockResetDB %v", err)
return err
}
err = blockInit(tx, initBlocks)
if err != nil {
+ err = fmt.Errorf("initBlocksDB.blockInit 2 %v", err)
return err
}
}
@@ -356,10 +368,6 @@ func (l *Ledger) LatestCommitted() basics.Round {
return l.blockQ.latestCommitted()
}
-func (l *Ledger) blockAux(rnd basics.Round) (bookkeeping.Block, evalAux, error) {
- return l.blockQ.getBlockAux(rnd)
-}
-
// Block returns the block for round rnd.
func (l *Ledger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) {
return l.blockQ.getBlock(rnd)
@@ -395,7 +403,7 @@ func (l *Ledger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreem
// is returned if this is not the expected next block number.
func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) error {
// passing nil as the verificationPool is ok since we've asking the evaluator to skip verification.
- updates, aux, err := l.eval(context.Background(), blk, nil, false, nil, nil)
+ updates, err := l.eval(context.Background(), blk, false, nil, nil)
if err != nil {
return err
}
@@ -403,7 +411,6 @@ func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) err
vb := ValidatedBlock{
blk: blk,
delta: updates,
- aux: aux,
}
return l.AddValidatedBlock(vb, cert)
@@ -419,7 +426,7 @@ func (l *Ledger) AddValidatedBlock(vb ValidatedBlock, cert agreement.Certificate
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
- err := l.blockQ.putBlock(vb.blk, cert, vb.aux)
+ err := l.blockQ.putBlock(vb.blk, cert)
if err != nil {
return err
}
@@ -474,9 +481,9 @@ func (l *Ledger) trackerLog() logging.Logger {
return l.log
}
-func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, aux evalAux) (StateDelta, error) {
+func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block) (StateDelta, error) {
// passing nil as the verificationPool is ok since we've asking the evaluator to skip verification.
- delta, _, err := l.eval(context.Background(), blk, &aux, false, nil, nil)
+ delta, err := l.eval(context.Background(), blk, false, nil, nil)
return delta, err
}
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 4925b397b4..e06bb74008 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -202,7 +202,8 @@ func TestLedgerBasic(t *testing.T) {
genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion)
const inMem = true
const archival = true
- l, err := OpenLedger(logging.Base(), t.Name(), inMem, genesisInitState, archival)
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, t.Name(), inMem, genesisInitState, archival)
require.NoError(t, err, "could not open ledger")
defer l.Close()
}
@@ -353,7 +354,8 @@ func TestLedgerSingleTx(t *testing.T) {
genesisInitState, initSecrets := testGenerateInitState(t, protocol.ConsensusV7)
const inMem = true
const archival = true
- l, err := OpenLedger(logging.Base(), t.Name(), inMem, genesisInitState, archival)
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, t.Name(), inMem, genesisInitState, archival)
a.NoError(err, "could not open ledger")
defer l.Close()
@@ -488,6 +490,11 @@ func TestLedgerSingleTx(t *testing.T) {
sbadTx.Sig = crypto.Signature{}
a.Error(l.appendUnvalidatedSignedTx(t, initAccounts, sbadTx, ad), "added tx with no signature")
+ badTx = correctPay
+ sbadTx = sign(initSecrets, badTx)
+ sbadTx.Sig[5]++
+ a.Error(l.appendUnvalidatedSignedTx(t, initAccounts, sbadTx, ad), "added tx with corrupt signature")
+
// TODO set multisig and test
badTx = correctPay
@@ -538,7 +545,8 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion
genesisInitState, initSecrets := testGenerateInitState(t, version)
const inMem = true
const archival = true
- l, err := OpenLedger(logging.Base(), t.Name(), inMem, genesisInitState, archival)
+ log := logging.TestingLog(t)
+ l, err := OpenLedger(log, t.Name(), inMem, genesisInitState, archival)
a.NoError(err, "could not open ledger")
defer l.Close()
diff --git a/ledger/perf_test.go b/ledger/perf_test.go
index becb1c8e5e..31aa586196 100644
--- a/ledger/perf_test.go
+++ b/ledger/perf_test.go
@@ -111,7 +111,7 @@ func BenchmarkManyAccounts(b *testing.B) {
txib, err := blk.EncodeSignedTxn(st, transactions.ApplyData{})
require.NoError(b, err)
- txlen := len(protocol.Encode(txib))
+ txlen := len(protocol.Encode(&txib))
if txbytes+txlen > proto.MaxTxnBytesPerBlock {
break
}
@@ -170,7 +170,7 @@ func BenchmarkValidate(b *testing.B) {
txib, err := newblk.EncodeSignedTxn(st, transactions.ApplyData{})
require.NoError(b, err)
- txlen := len(protocol.Encode(txib))
+ txlen := len(protocol.Encode(&txib))
if txbytes+txlen > proto.MaxTxnBytesPerBlock {
break
}
diff --git a/ledger/tracker.go b/ledger/tracker.go
index 55f8202424..babb09fd86 100644
--- a/ledger/tracker.go
+++ b/ledger/tracker.go
@@ -81,12 +81,11 @@ type ledgerTracker interface {
type ledgerForTracker interface {
trackerDB() dbPair
trackerLog() logging.Logger
- trackerEvalVerified(bookkeeping.Block, evalAux) (StateDelta, error)
+ trackerEvalVerified(bookkeeping.Block) (StateDelta, error)
Latest() basics.Round
Block(basics.Round) (bookkeeping.Block, error)
BlockHdr(basics.Round) (bookkeeping.BlockHeader, error)
- blockAux(basics.Round) (bookkeeping.Block, evalAux, error)
}
type trackerRegistry struct {
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index cbf64dcced..2a092c1b17 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -50,6 +50,7 @@ type Client struct {
kmdStartArgs nodecontrol.KMDStartArgs
dataDir string
cacheDir string
+ consensus config.ConsensusProtocols
}
// ClientConfig is data to configure a Client
@@ -164,6 +165,11 @@ func (c *Client) init(config ClientConfig, clientType ClientType) error {
return err
}
}
+
+ c.consensus, err = nc.GetConsensus()
+ if err != nil {
+ return err
+ }
return nil
}
@@ -495,7 +501,7 @@ func (c *Client) ComputeValidityRounds(firstValid, lastValid, validRounds uint64
if err != nil {
return 0, 0, err
}
- cparams, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
+ cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
if !ok {
return 0, 0, fmt.Errorf("cannot construct transaction: unknown consensus protocol %s", params.ConsensusVersion)
}
@@ -558,7 +564,7 @@ func (c *Client) ConstructPayment(from, to string, fee, amount uint64, note []by
return transactions.Transaction{}, err
}
- cp, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
+ cp, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
if !ok {
return transactions.Transaction{}, fmt.Errorf("ConstructPayment: unknown consensus protocol %s", params.ConsensusVersion)
}
@@ -795,7 +801,7 @@ func (c *Client) ConsensusParams(round uint64) (consensus config.ConsensusParams
return
}
- params, ok := config.Consensus[protocol.ConsensusVersion(block.CurrentProtocol)]
+ params, ok := c.consensus[protocol.ConsensusVersion(block.CurrentProtocol)]
if !ok {
err = fmt.Errorf("ConsensusParams: unknown consensus protocol %s", block.CurrentProtocol)
return
diff --git a/libgoal/lockedFile.go b/libgoal/lockedFile.go
index 36a96b2d7b..fc46a3f811 100644
--- a/libgoal/lockedFile.go
+++ b/libgoal/lockedFile.go
@@ -28,11 +28,15 @@ type locker interface {
unlock(fd *os.File) error
}
-func newLockedFile(path string) *lockedFile {
+func newLockedFile(path string) (*lockedFile, error) {
+ locker, err := makeLocker()
+ if err != nil {
+ return nil, err
+ }
return &lockedFile{
path: path,
- locker: makeLocker(),
- }
+ locker: locker,
+ }, nil
}
// lockedFile implementation
diff --git a/libgoal/lockedFileLinux.go b/libgoal/lockedFileLinux.go
index 5330723351..982d9eb8a8 100644
--- a/libgoal/lockedFileLinux.go
+++ b/libgoal/lockedFileLinux.go
@@ -19,53 +19,70 @@
package libgoal
import (
+ "fmt"
"io"
"os"
- "syscall"
"golang.org/x/sys/unix"
)
type linuxLocker struct {
+ setLockWait int
}
// makeLocker create a unix file locker.
-// note that the desired way is to use the OFD locker, which locks on the file descriptor level.
-// falling back to the non-OFD lock would allow obtaining two locks by the same process. If this becomes
-// and issue, we might want to use flock, which wouldn't work across NFS.
-func makeLocker() *linuxLocker {
+// Note that the desired way is to use the OFD locker, which locks on the file descriptor level.
+// Since older kernels (Linux kernel < 3.15) do not support OFD, we fall back to non-OFD in that case.
+// Falling back to the non-OFD lock would allow obtaining two locks by the same process. If this becomes
+// and issue, we might want to use flock, which wouldn't work across NFS on older Linux kernels.
+func makeLocker() (*linuxLocker, error) {
locker := &linuxLocker{}
- return locker
+
+ // Check whether F_OFD_SETLKW is supported
+ getlk := unix.Flock_t{Type: unix.F_RDLCK}
+ err := unix.FcntlFlock(0, unix.F_OFD_GETLK, &getlk)
+ if err == nil {
+ locker.setLockWait = unix.F_OFD_SETLKW
+ } else if err == unix.EINVAL {
+ // The command F_OFD_SETLKW is not available
+ // Fall back to non-OFD locks
+ locker.setLockWait = unix.F_SETLKW
+ } else {
+ // Another unknown error occurred
+ return nil, fmt.Errorf("unknown error of FnctlFlock: %v", err)
+ }
+
+ return locker, nil
}
// the FcntlFlock has the most consistent behaviour across platforms,
// and supports both local and network file systems.
func (f *linuxLocker) tryRLock(fd *os.File) error {
- flock := &syscall.Flock_t{
- Type: syscall.F_RDLCK,
+ flock := &unix.Flock_t{
+ Type: unix.F_RDLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
- return syscall.FcntlFlock(fd.Fd(), unix.F_OFD_SETLKW, flock)
+ return unix.FcntlFlock(fd.Fd(), f.setLockWait, flock)
}
func (f *linuxLocker) tryLock(fd *os.File) error {
- flock := &syscall.Flock_t{
- Type: syscall.F_WRLCK,
+ flock := &unix.Flock_t{
+ Type: unix.F_WRLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
- return syscall.FcntlFlock(fd.Fd(), unix.F_OFD_SETLKW, flock)
+ return unix.FcntlFlock(fd.Fd(), f.setLockWait, flock)
}
func (f *linuxLocker) unlock(fd *os.File) error {
- flock := &syscall.Flock_t{
- Type: syscall.F_UNLCK,
+ flock := &unix.Flock_t{
+ Type: unix.F_UNLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
- return syscall.FcntlFlock(fd.Fd(), unix.F_OFD_SETLKW, flock)
+ return unix.FcntlFlock(fd.Fd(), f.setLockWait, flock)
}
diff --git a/libgoal/lockedFileUnix.go b/libgoal/lockedFileUnix.go
index 917d63209a..d7905e61de 100644
--- a/libgoal/lockedFileUnix.go
+++ b/libgoal/lockedFileUnix.go
@@ -14,64 +14,61 @@
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see .
-// +build !linux,!windows
+// Support all unix system except linux
+// in https://github.com/golang/sys/blob/master/unix/syscall_unix.go
+
+// +build aix darwin dragonfly freebsd netbsd openbsd solaris
package libgoal
import (
"io"
"os"
- "syscall"
+
+ "golang.org/x/sys/unix"
)
type unixLocker struct {
- setLockWait int
}
// makeLocker create a unix file locker.
-// note that the desired way is to use the OFD locker, which locks on the file descriptor level.
-// falling back to the non-OFD lock would allow obtaining two locks by the same process. If this becomes
+// Note that the desired way is to use the OFD locker, which locks on the file descriptor level.
+// As OFD is not available on non-Linux OS, we fall back to the non-OFD lock
+// Falling back to the non-OFD lock would allow obtaining two locks by the same process. If this becomes
// and issue, we might want to use flock, which wouldn't work across NFS.
-func makeLocker() *unixLocker {
+func makeLocker() (*unixLocker, error) {
locker := &unixLocker{}
- getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
- if err := syscall.FcntlFlock(0, 36 /*F_OFD_GETLK*/, &getlk); err == nil {
- // constants from /usr/include/bits/fcntl-linux.h
- locker.setLockWait = 38 // F_OFD_SETLKW
- } else {
- locker.setLockWait = syscall.F_SETLKW
- }
- return locker
+ return locker, nil
}
// the FcntlFlock has the most unixLocker behaviour across platforms,
// and supports both local and network file systems.
func (f *unixLocker) tryRLock(fd *os.File) error {
- flock := &syscall.Flock_t{
- Type: syscall.F_RDLCK,
+ flock := &unix.Flock_t{
+ Type: unix.F_RDLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
- return syscall.FcntlFlock(fd.Fd(), f.setLockWait, flock)
+ return unix.FcntlFlock(fd.Fd(), unix.F_SETLKW, flock)
}
func (f *unixLocker) tryLock(fd *os.File) error {
- flock := &syscall.Flock_t{
- Type: syscall.F_WRLCK,
+ flock := &unix.Flock_t{
+ Type: unix.F_WRLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
- return syscall.FcntlFlock(fd.Fd(), f.setLockWait, flock)
+ return unix.FcntlFlock(fd.Fd(), unix.F_SETLKW, flock)
}
func (f *unixLocker) unlock(fd *os.File) error {
- flock := &syscall.Flock_t{
- Type: syscall.F_UNLCK,
+ flock := &unix.Flock_t{
+ Type: unix.F_UNLCK,
Whence: int16(io.SeekStart),
Start: 0,
Len: 0,
}
- return syscall.FcntlFlock(fd.Fd(), f.setLockWait, flock)
+ return unix.FcntlFlock(fd.Fd(), unix.F_SETLKW, flock)
}
diff --git a/libgoal/participation.go b/libgoal/participation.go
index 53b9d20c23..6acd607ea6 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -132,7 +132,7 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
return
}
- proto, ok := config.Consensus[protocol.ConsensusVersion(stat.LastVersion)]
+ proto, ok := c.consensus[protocol.ConsensusVersion(stat.LastVersion)]
if !ok {
err = fmt.Errorf("consensus protocol %s not supported", stat.LastVersion)
return
@@ -214,7 +214,7 @@ func (c *Client) InstallParticipationKeys(inputfile string) (part account.Partic
return
}
- proto, ok := config.Consensus[protocol.ConsensusCurrentVersion]
+ proto, ok := c.consensus[protocol.ConsensusCurrentVersion]
if !ok {
err = fmt.Errorf("Unknown consensus protocol %s", protocol.ConsensusCurrentVersion)
return
diff --git a/libgoal/system.go b/libgoal/system.go
index 58238bfe8b..e5f78a3763 100644
--- a/libgoal/system.go
+++ b/libgoal/system.go
@@ -26,7 +26,8 @@ import (
type SystemConfig struct {
// SharedServer is true if this is a daemon on a multiuser system.
// If not shared, kmd and other files are often stored under $ALGORAND_DATA when otherwise they might go under $HOME/.algorand/
- SharedServer bool `json:"shared_server,omitempty"`
+ SharedServer bool `json:"shared_server,omitempty"`
+ SystemdManaged bool `json:"systemd_managed,omitempty"`
}
// map data dir to loaded config
@@ -72,3 +73,16 @@ func AlgorandDataIsPrivate(dataDir string) bool {
}
return !sc.SharedServer
}
+
+// AlgorandDaemonSystemdManaged returns true if the algod process for a given data dir is managed by systemd
+// if not, algod will be managed as an indivudal process for the dir
+func AlgorandDaemonSystemdManaged(dataDir string) bool {
+ if dataDir == "" {
+ return false
+ }
+ sc, err := ReadSystemConfig(dataDir)
+ if err != nil {
+ return false
+ }
+ return sc.SystemdManaged
+}
diff --git a/libgoal/transactions.go b/libgoal/transactions.go
index 63a0ed1fd9..e278561125 100644
--- a/libgoal/transactions.go
+++ b/libgoal/transactions.go
@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/account"
@@ -67,7 +66,7 @@ func (c *Client) SignProgramWithWallet(walletHandle, pw []byte, addr string, pro
// MultisigSignTransactionWithWallet creates a multisig (or adds to an existing partial multisig, if one is provided), signing with the key corresponding to the given address and using the specified wallet
// TODO instead of returning MultisigSigs, accept and return blobs
func (c *Client) MultisigSignTransactionWithWallet(walletHandle, pw []byte, utx transactions.Transaction, signerAddr string, partial crypto.MultisigSig) (msig crypto.MultisigSig, err error) {
- txBytes := protocol.Encode(utx)
+ txBytes := protocol.Encode(&utx)
addr, err := basics.UnmarshalChecksumAddress(signerAddr)
if err != nil {
return
@@ -154,7 +153,7 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, part *account.Participat
return transactions.Transaction{}, err
}
- cparams, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
+ cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
if !ok {
return transactions.Transaction{}, errors.New("unknown consensus version")
}
@@ -211,7 +210,7 @@ func (c *Client) MakeUnsignedGoOfflineTx(address string, firstValid, lastValid,
return transactions.Transaction{}, err
}
- cparams, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
+ cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
if !ok {
return transactions.Transaction{}, errors.New("unknown consensus version")
}
@@ -266,7 +265,7 @@ func (c *Client) MakeUnsignedBecomeNonparticipatingTx(address string, firstValid
return transactions.Transaction{}, err
}
- cparams, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
+ cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
if !ok {
return transactions.Transaction{}, errors.New("unknown consensus version")
}
@@ -321,7 +320,7 @@ func (c *Client) FillUnsignedTxTemplate(sender string, firstValid, lastValid, fe
return transactions.Transaction{}, err
}
- cparams, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
+ cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
if !ok {
return transactions.Transaction{}, errors.New("unknown consensus version")
}
@@ -407,11 +406,20 @@ func (c *Client) MakeUnsignedAssetCreateTx(total uint64, defaultFrozen bool, man
return transactions.Transaction{}, err
}
- cparams, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
+ cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
if !ok {
return transactions.Transaction{}, errors.New("unknown consensus version")
}
+ // If assets are not yet enabled, lookup the base parameters to allow creating assets during catchup
+ if !cparams.Asset {
+ cparams, ok = c.consensus[protocol.ConsensusCurrentVersion]
+
+ if !ok {
+ return transactions.Transaction{}, errors.New("unknown consensus version")
+ }
+ }
+
if len(url) > cparams.MaxAssetURLBytes {
return tx, fmt.Errorf("asset url %s is too long (max %d bytes)", url, cparams.MaxAssetURLBytes)
}
diff --git a/libgoal/walletHandles.go b/libgoal/walletHandles.go
index 5a5bd5e4b1..ece9a7a0c2 100644
--- a/libgoal/walletHandles.go
+++ b/libgoal/walletHandles.go
@@ -31,12 +31,18 @@ type walletHandles struct {
}
func readLocked(path string) ([]byte, error) {
- lf := newLockedFile(path)
+ lf, err := newLockedFile(path)
+ if err != nil {
+ return nil, err
+ }
return lf.read()
}
func writeLocked(path string, data []byte, perm os.FileMode) error {
- lf := newLockedFile(path)
+ lf, err := newLockedFile(path)
+ if err != nil {
+ return err
+ }
return lf.write(data, perm)
}
diff --git a/logging/log.go b/logging/log.go
index 182e18799f..a500eba5b9 100644
--- a/logging/log.go
+++ b/logging/log.go
@@ -373,9 +373,6 @@ func (l logger) EnableTelemetry(cfg TelemetryConfig) (err error) {
}
func (l logger) UpdateTelemetryURI(uri string) (err error) {
- if l.loggerState.telemetry.hook == nil {
- return nil
- }
err = l.loggerState.telemetry.hook.UpdateHookURI(uri)
if err == nil {
telemetryConfig.URI = uri
diff --git a/logging/telemetry.go b/logging/telemetry.go
index d195f9d39b..313ebdc804 100644
--- a/logging/telemetry.go
+++ b/logging/telemetry.go
@@ -84,6 +84,8 @@ func makeTelemetryState(cfg TelemetryConfig, hookFactory hookFactory) (*telemetr
return nil, err
}
telemetry.hook = createAsyncHookLevels(hook, 32, 100, makeLevels(cfg.MinLogLevel))
+ } else {
+ telemetry.hook = new(dummyHook)
}
telemetry.sendToLog = cfg.SendToLog
return telemetry, nil
@@ -227,13 +229,13 @@ func (t *telemetryState) logTelemetry(l logger, message string, details interfac
if t.sendToLog {
entry.Info(message)
}
- if t.hook != nil {
- t.hook.Fire(entry)
- }
+ t.hook.Fire(entry)
}
func (t *telemetryState) Close() {
- t.hook.Close()
+ if t.hook != nil {
+ t.hook.Close()
+ }
}
func (t *telemetryState) Flush() {
diff --git a/logging/telemetryCommon.go b/logging/telemetryCommon.go
index 1e8762c172..a66484e5c9 100644
--- a/logging/telemetryCommon.go
+++ b/logging/telemetryCommon.go
@@ -35,9 +35,20 @@ type TelemetryOperation struct {
pending int32
}
+type telemetryHook interface {
+ Fire(entry *logrus.Entry) error
+ Levels() []logrus.Level
+ Close()
+ Flush()
+ UpdateHookURI(uri string) (err error)
+
+ appendEntry(entry *logrus.Entry) bool
+ waitForEventAndReady() bool
+}
+
type telemetryState struct {
history *logBuffer
- hook *asyncTelemetryHook
+ hook telemetryHook
sendToLog bool
}
@@ -70,4 +81,7 @@ type asyncTelemetryHook struct {
urlUpdate chan bool
}
+// A dummy noop type to get rid of checks like telemetry.hook != nil
+type dummyHook struct{}
+
type hookFactory func(cfg TelemetryConfig) (logrus.Hook, error)
diff --git a/logging/telemetryConfig.go b/logging/telemetryConfig.go
index 439d05dcf4..062159f458 100644
--- a/logging/telemetryConfig.go
+++ b/logging/telemetryConfig.go
@@ -64,6 +64,7 @@ func createTelemetryConfig() TelemetryConfig {
URI: "",
MinLogLevel: logrus.WarnLevel,
ReportHistoryLevel: logrus.WarnLevel,
+ // These credentials are here intentionally. Not a bug.
UserName: "telemetry-v9",
Password: "oq%$FA1TOJ!yYeMEcJ7D688eEOE#MGCu",
}
diff --git a/logging/telemetryConfig_test.go b/logging/telemetryConfig_test.go
index d19a3cd719..a30abdff50 100644
--- a/logging/telemetryConfig_test.go
+++ b/logging/telemetryConfig_test.go
@@ -33,6 +33,7 @@ func Test_loadTelemetryConfig(t *testing.T) {
URI: "elastic.algorand.com",
MinLogLevel: 4,
ReportHistoryLevel: 4,
+ // These credentials are here intentionally. Not a bug.
UserName: "telemetry-v9",
Password: "oq%$FA1TOJ!yYeMEcJ7D688eEOE#MGCu",
}
diff --git a/logging/telemetryhook.go b/logging/telemetryhook.go
index 801ac75d54..48aeacc1a6 100644
--- a/logging/telemetryhook.go
+++ b/logging/telemetryhook.go
@@ -48,7 +48,25 @@ func createAsyncHookLevels(wrappedHook logrus.Hook, channelDepth uint, maxQueueD
}
go func() {
- defer hook.wg.Done()
+ defer func() {
+ // flush the channel
+ moreEntries := true
+ for moreEntries {
+ select {
+ case entry := <-hook.entries:
+ hook.appendEntry(entry)
+ default:
+ moreEntries = false
+ }
+ }
+ for range hook.pending {
+ // The telemetry service is
+ // exiting. Un-wait for the left out
+ // messages.
+ hook.wg.Done()
+ }
+ hook.wg.Done()
+ }()
exit := false
for !exit {
@@ -126,6 +144,9 @@ func (hook *asyncTelemetryHook) waitForEventAndReady() bool {
func (hook *asyncTelemetryHook) Fire(entry *logrus.Entry) error {
hook.wg.Add(1)
select {
+ case <-hook.quit:
+ // telemetry quit
+ hook.wg.Done()
case hook.entries <- entry:
default:
hook.wg.Done()
@@ -156,6 +177,27 @@ func (hook *asyncTelemetryHook) Flush() {
hook.wg.Wait()
}
+func (hook *dummyHook) UpdateHookURI(uri string) (err error) {
+ return
+}
+func (hook *dummyHook) Levels() []logrus.Level {
+ return []logrus.Level{}
+}
+func (hook *dummyHook) Fire(entry *logrus.Entry) error {
+ return nil
+}
+func (hook *dummyHook) Close() {
+}
+func (hook *dummyHook) Flush() {
+}
+
+func (hook *dummyHook) appendEntry(entry *logrus.Entry) bool {
+ return true
+}
+func (hook *dummyHook) waitForEventAndReady() bool {
+ return true
+}
+
func createElasticHook(cfg TelemetryConfig) (hook logrus.Hook, err error) {
// Returning an error here causes issues... need the hooks to be created even if the elastic hook fails so that
// things can recover later.
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index c809b5704c..c528fcd9f5 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -184,6 +184,10 @@ type PeerEventDetails struct {
HostName string
Incoming bool
InstanceName string
+ // Endpoint is the dialed-to address, for an outgoing connection. Not being used for incoming connection.
+ Endpoint string `json:",omitempty"`
+ // MessageDelay is the avarage relative message delay. Not being used for incoming connection.
+ MessageDelay int64 `json:",omitempty"`
}
// ConnectPeerFailEvent event
@@ -275,4 +279,6 @@ type PeerConnectionDetails struct {
ConnectionDuration uint
// Endpoint is the dialed-to address, for an outgoing connection. Not being used for incoming connection.
Endpoint string `json:",omitempty"`
+ // MessageDelay is the avarage relative message delay. Not being used for incoming connection.
+ MessageDelay int64 `json:",omitempty"`
}
diff --git a/mule.yaml b/mule.yaml
new file mode 100644
index 0000000000..57acd25c1d
--- /dev/null
+++ b/mule.yaml
@@ -0,0 +1,54 @@
+stages:
+ build-linux-amd64:
+ - task: docker.Version
+ name: linux-amd64
+ arch: amd64
+ configFilePath: scripts/configure_dev-deps.sh
+ - task: shell.docker.Ensure
+ image: algorand/go-algorand-linux
+ arch: amd64
+ version: '{{ docker.Version.linux-amd64.version }}'
+ dockerFilePath: docker/build/cicd.Dockerfile
+ - task: docker.Make
+ image: algorand/go-algorand-linux
+ version: '{{ docker.Version.linux-amd64.version }}'
+ workDir: /go/src/github.com/algorand/go-algorand
+ target: fulltest ci-build
+ build-linux-arm64:
+ - task: docker.Version
+ name: linux-arm64
+ arch: arm64v8
+ configFilePath: scripts/configure_dev-deps.sh
+ - task: shell.docker.Ensure
+ image: algorand/go-algorand-linux
+ arch: arm64v8
+ version: '{{ docker.Version.linux-arm64.version }}'
+ dockerFilePath: docker/build/cicd.Dockerfile
+ - task: docker.Make
+ image: algorand/go-algorand-linux
+ version: '{{ docker.Version.linux-arm64.version }}'
+ workDir: /go/src/github.com/algorand/go-algorand
+ target: fulltest ci-build
+ build-linux-arm:
+ - task: docker.Version
+ name: linux-arm
+ arch: arm32v6
+ configFilePath: scripts/configure_dev-deps.sh
+ - task: shell.docker.Ensure
+ image: algorand/go-algorand-linux
+ arch: arm32v6
+ version: '{{ docker.Version.linux-arm.version }}'
+ dockerFilePath: docker/build/arm.Dockerfile
+ - task: docker.Make
+ image: algorand/go-algorand-linux
+ version: '{{ docker.Version.linux-arm.version }}'
+ workDir: /go/src/github.com/algorand/go-algorand
+ target: ci-build
+ build-local:
+ - task: shell.Make
+ target: ci-deps fulltest ci-build
+ release:
+ - task: release.notes.GenerateReleaseNotes
+ releaseVersion: ${GO_ALGORAND_RELEASE_VERSION}
+ githubPatToken: ${GITHUB_PAT_TOKEN}
+ githubRepoFullName: algorand/go-algorand
diff --git a/netdeploy/network.go b/netdeploy/network.go
index 06eead476f..89e9e82fec 100644
--- a/netdeploy/network.go
+++ b/netdeploy/network.go
@@ -57,7 +57,7 @@ type Network struct {
// CreateNetworkFromTemplate uses the specified template to deploy a new private network
// under the specified root directory.
-func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, importKeys bool, nodeExitCallback nodecontrol.AlgodExitErrorCallback) (Network, error) {
+func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, importKeys bool, nodeExitCallback nodecontrol.AlgodExitErrorCallback, consensus config.ConsensusProtocols) (Network, error) {
n := Network{
rootDir: rootDir,
nodeExitCallback: nodeExitCallback,
@@ -78,7 +78,7 @@ func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, impor
if err != nil {
return n, err
}
-
+ template.Consensus = consensus
err = template.generateGenesisAndWallets(rootDir, name, binDir)
if err != nil {
return n, err
@@ -90,6 +90,7 @@ func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, impor
}
err = n.Save(rootDir)
+ n.SetConsensus(binDir, consensus)
return n, err
}
@@ -437,3 +438,25 @@ func (n Network) Delete(binDir string) error {
n.Stop(binDir)
return os.RemoveAll(n.rootDir)
}
+
+// SetConsensus applies a new consensus settings which would get deployed before
+// any of the nodes starts
+func (n Network) SetConsensus(binDir string, consensus config.ConsensusProtocols) error {
+ for _, relayDir := range n.cfg.RelayDirs {
+ relayFulllPath := n.getNodeFullPath(relayDir)
+ nc := nodecontrol.MakeNodeController(binDir, relayFulllPath)
+ err := nc.SetConsensus(consensus)
+ if err != nil {
+ return err
+ }
+ }
+ for _, nodeDir := range n.nodeDirs {
+ nodeFulllPath := n.getNodeFullPath(nodeDir)
+ nc := nodecontrol.MakeNodeController(binDir, nodeFulllPath)
+ err := nc.SetConsensus(consensus)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go
index 8462bdd70f..1f6e365848 100644
--- a/netdeploy/networkTemplate.go
+++ b/netdeploy/networkTemplate.go
@@ -21,6 +21,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "math/big"
"os"
"path/filepath"
"runtime"
@@ -30,25 +31,15 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/gen"
"github.com/algorand/go-algorand/libgoal"
+ "github.com/algorand/go-algorand/netdeploy/remote"
"github.com/algorand/go-algorand/util"
)
-type walletTemplateData struct {
- Name string
- ParticipationOnly bool
-}
-
-type nodeConfig struct {
- Name string
- IsRelay bool
- Wallets []walletTemplateData
- DeadlockDetection int
-}
-
// NetworkTemplate represents the template used for creating private named networks
type NetworkTemplate struct {
- Genesis gen.GenesisData
- Nodes []nodeConfig
+ Genesis gen.GenesisData
+ Nodes []remote.NodeConfigGoal
+ Consensus config.ConsensusProtocols
}
var defaultNetworkTemplate = NetworkTemplate{
@@ -58,7 +49,8 @@ var defaultNetworkTemplate = NetworkTemplate{
func (t NetworkTemplate) generateGenesisAndWallets(targetFolder, networkName, binDir string) error {
genesisData := t.Genesis
genesisData.NetworkName = networkName
- return gen.GenerateGenesisFiles(genesisData, targetFolder, true)
+ mergedConsensus := config.Consensus.Merge(t.Consensus)
+ return gen.GenerateGenesisFiles(genesisData, mergedConsensus, targetFolder, true)
}
// Create data folders for all NodeConfigs, configuring relays appropriately and
@@ -146,7 +138,7 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin
// Create any necessary config.json file for this node
nodeCfg := filepath.Join(nodeDir, config.ConfigFilename)
- err = cfg.createConfigFile(nodeCfg, len(t.Nodes)-1) // minus 1 to avoid counting self
+ err = createConfigFile(cfg, nodeCfg, len(t.Nodes)-1) // minus 1 to avoid counting self
if err != nil {
return
}
@@ -180,17 +172,21 @@ func loadTemplateFromReader(reader io.Reader, template *NetworkTemplate) error {
func (t NetworkTemplate) Validate() error {
// Genesis wallet percentages must add up to 100
// Genesis account names must be unique
- totalPct := uint(0)
+ totalPct := big.NewFloat(float64(0))
accounts := make(map[string]bool)
for _, wallet := range t.Genesis.Wallets {
- totalPct += uint(wallet.Stake)
+ if wallet.Stake < 0 {
+ return fmt.Errorf("invalid template: negative stake on Genesis account %s", wallet.Name)
+ }
+ totalPct = totalPct.Add(totalPct, big.NewFloat(wallet.Stake))
upperAcct := strings.ToUpper(wallet.Name)
if _, found := accounts[upperAcct]; found {
return fmt.Errorf("invalid template: duplicate Genesis account %s", wallet.Name)
}
accounts[upperAcct] = true
}
- if totalPct != 100 {
+ totalPctInt, _ := totalPct.Int64()
+ if totalPctInt != 100 {
return fmt.Errorf("invalid template: Genesis account allocations must total 100 (actual %v)", totalPct)
}
@@ -218,7 +214,7 @@ func (t NetworkTemplate) Validate() error {
}
// TODO: Build the JSON object using a real encoder
-func (node nodeConfig) createConfigFile(configFile string, numNodes int) error {
+func createConfigFile(node remote.NodeConfigGoal, configFile string, numNodes int) error {
// Override default :8080 REST endpoint, and disable SRV lookup
configString := `{ "GossipFanout": ` + fmt.Sprintf("%d", numNodes) +
`, "EndpointAddress": "127.0.0.1:0", "DNSBootstrapID": "", "EnableProfiler": true`
diff --git a/netdeploy/networkTemplates_test.go b/netdeploy/networkTemplates_test.go
index 042b244359..18d02a76d4 100644
--- a/netdeploy/networkTemplates_test.go
+++ b/netdeploy/networkTemplates_test.go
@@ -64,3 +64,22 @@ func TestGenerateGenesis(t *testing.T) {
fileExists := err == nil
a.True(fileExists)
}
+
+func TestValidate(t *testing.T) {
+ a := require.New(t)
+
+ templateDir, _ := filepath.Abs("../test/testdata/nettemplates")
+ template, _ := loadTemplate(filepath.Join(templateDir, "David20.json"))
+ err := template.Validate()
+ a.NoError(err)
+
+ templateDir, _ = filepath.Abs("../test/testdata/nettemplates")
+ template, _ = loadTemplate(filepath.Join(templateDir, "TenThousandAccountsEqual.json"))
+ err = template.Validate()
+ a.NoError(err)
+
+ templateDir, _ = filepath.Abs("../test/testdata/nettemplates")
+ template, _ = loadTemplate(filepath.Join(templateDir, "NegativeStake.json"))
+ err = template.Validate()
+ a.Error(err)
+}
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index f29391de5c..5e10ebeb99 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -253,7 +253,7 @@ func (cfg DeployedNetwork) BuildNetworkFromTemplate(buildCfg BuildConfig, rootDi
if cfg.useExistingGenesis {
fmt.Println(" *** using existing genesis files ***")
} else {
- if err = gen.GenerateGenesisFiles(cfg.GenesisData, genesisFolder, true); err != nil {
+ if err = gen.GenerateGenesisFiles(cfg.GenesisData, config.Consensus, genesisFolder, true); err != nil {
return
}
}
diff --git a/netdeploy/remote/nodeConfig.go b/netdeploy/remote/nodeConfig.go
index 5d70f367bb..5eba9282ff 100644
--- a/netdeploy/remote/nodeConfig.go
+++ b/netdeploy/remote/nodeConfig.go
@@ -50,3 +50,11 @@ func (nc NodeConfig) IsRelay() bool {
// If we advertise to the world an address where we listen for gossip network connections, we are taking on the role of relay.
return nc.NetAddress != ""
}
+
+// NodeConfigGoal represents is a simplified version of NodeConfig used with 'goal network' commands
+type NodeConfigGoal struct {
+ Name string
+ IsRelay bool `json:",omitempty"`
+ Wallets []NodeWalletData
+ DeadlockDetection int `json:"-"`
+}
diff --git a/network/connPerfMon.go b/network/connPerfMon.go
new file mode 100644
index 0000000000..896bd774c8
--- /dev/null
+++ b/network/connPerfMon.go
@@ -0,0 +1,351 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package network
+
+import (
+ "sort"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/crypto"
+)
+
+type pmStage int
+
+const (
+ pmStagePresync pmStage = iota // pmStagePresync used as a warmup for the monitoring. it ensures that we've received at least a single message from each peer, and that we've waited enough time before attempting to sync up.
+ pmStageSync pmStage = iota // pmStageSync is syncing up the peer message streams. It exists once all the connections have demonstrated a given idle time.
+ pmStageAccumulate pmStage = iota // pmStageAccumulate monitors streams and accumulate the messages between the connections.
+ pmStageStopping pmStage = iota // pmStageStopping keep monitoring the streams, but do not accept new messages. It tries to expire pending messages until all pending messages expires.
+ pmStageStopped pmStage = iota // pmStageStopped is the final stage; it means that the performance monitor reached a conclusion regarding the performance statistics
+)
+
+const (
+ pmPresyncTime = 10 * time.Second
+ pmSyncIdleTime = 2 * time.Second
+ pmSyncMaxTime = 25 * time.Second
+ pmAccumulationTime = 60 * time.Second
+ pmAccumulationTimeRange = 30 * time.Second
+ pmAccumulationIdlingTime = 2 * time.Second
+ pmMaxMessageWaitTime = 15 * time.Second
+ pmUndeliveredMessagePenaltyTime = 5 * time.Second
+ pmDesiredMessegeDelayThreshold = 50 * time.Millisecond
+)
+
+// pmMessage is the internal storage for a single message. We save the time the message arrived from each of the peers.
+type pmMessage struct {
+ peerMsgTime map[Peer]int64 // for each peer, when did we see a message the first time
+ firstPeerTime int64 // the timestamp of the first peer that has seen this message.
+}
+
+// pmPeerStatistics is the per-peer resulting datastructure of the performance analysis.
+type pmPeerStatistics struct {
+ peer Peer // the peer interface
+ peerDelay int64 // the peer avarage relative message delay
+ peerFirstMessage float32 // what precentage of the messages were delivered by this peer before any other peer
+}
+
+// pmStatistics is the resulting datastructure of the performance analysis.
+type pmStatistics struct {
+ peerStatistics []pmPeerStatistics // an ordered list of the peers performance statistics
+ messageCount int64 // the number of messages used to calculate the above statistics
+}
+
+// connectionPerformanceMonitor is the connection monitor datatype. We typically would like to have a single monitor for all
+// the outgoing connections.
+type connectionPerformanceMonitor struct {
+ deadlock.Mutex
+ monitoredConnections map[Peer]bool // the map of the connection we're going to monitor. Messages coming from other connections would be ignored.
+ monitoredMessageTags map[Tag]bool // the map of the message tags we're interested in monitoring. Messages that aren't broadcast-type typically would be a good choice here.
+ stage pmStage // the performance monitoring stage.
+ peerLastMsgTime map[Peer]int64 // the map describing the last time we received a message from each of the peers.
+ lastIncomingMsgTime int64 // the time at which the last message was received from any of the peers.
+ stageStartTime int64 // the timestamp at which we switched to the current stage.
+ pendingMessages map[crypto.Digest]*pmMessage // the pendingMessages map contains messages that haven't been received from all the peers within the pmMaxMessageWaitTime
+ connectionDelay map[Peer]int64 // contains the total delay we've sustained by each peer when we're in stages pmStagePresync-pmStageStopping and the average delay after that. ( in nano seconds )
+ firstMessageCount map[Peer]int64 // maps the peers to their accumulated first messages ( the number of times a message seen coming from this peer first )
+ msgCount int64 // total number of messages that we've accumulated.
+ accumulationTime int64 // the duration of which we're going to accumulate messages. This will get randomized to prevent cross-node syncronization.
+}
+
+// makeConnectionPerformanceMonitor creates a new performance monitor instance, that is configured for monitoring the given message tags.
+func makeConnectionPerformanceMonitor(messageTags []Tag) *connectionPerformanceMonitor {
+ msgTagMap := make(map[Tag]bool, len(messageTags))
+ for _, tag := range messageTags {
+ msgTagMap[tag] = true
+ }
+ return &connectionPerformanceMonitor{
+ monitoredConnections: make(map[Peer]bool, 0),
+ monitoredMessageTags: msgTagMap,
+ }
+}
+
+// GetPeersStatistics returns the statistics result of the performance monitoring, once these becomes available.
+// otherwise, it returns nil.
+func (pm *connectionPerformanceMonitor) GetPeersStatistics() (stat *pmStatistics) {
+ pm.Lock()
+ defer pm.Unlock()
+ if pm.stage != pmStageStopped || len(pm.connectionDelay) == 0 {
+ return nil
+ }
+ stat = &pmStatistics{
+ peerStatistics: make([]pmPeerStatistics, 0, len(pm.connectionDelay)),
+ messageCount: pm.msgCount,
+ }
+ for peer, delay := range pm.connectionDelay {
+ peerStat := pmPeerStatistics{
+ peer: peer,
+ peerDelay: delay,
+ }
+ if pm.msgCount > 0 {
+ peerStat.peerFirstMessage = float32(pm.firstMessageCount[peer]) / float32(pm.msgCount)
+ }
+ stat.peerStatistics = append(stat.peerStatistics, peerStat)
+ }
+ sort.Slice(stat.peerStatistics, func(i, j int) bool {
+ return stat.peerStatistics[i].peerDelay > stat.peerStatistics[j].peerDelay
+ })
+ return
+}
+
+// ComparePeers compares the given peers list or the existing peers being monitored. If the
+// peers list have changed since Reset was called, it would return false.
+// The method is insensitive to peer ordering and uses the peer interface pointer to determine equality.
+func (pm *connectionPerformanceMonitor) ComparePeers(peers []Peer) bool {
+ pm.Lock()
+ defer pm.Unlock()
+ for _, peer := range peers {
+ if pm.monitoredConnections[peer] == false {
+ return false
+ }
+ }
+ return len(peers) == len(pm.monitoredConnections)
+}
+
+// Reset updates the existing peers list to the one provided. The Reset method is expected to be used
+// in three scenarios :
+// 1. clearing out the existing monitoring - which brings it to initial state and disable monitoring.
+// 2. change monitored peers - in case we've had some of our peers disconnected/reconnected during the monitoring process.
+// 3. start monitoring
+func (pm *connectionPerformanceMonitor) Reset(peers []Peer) {
+ pm.Lock()
+ defer pm.Unlock()
+ pm.pendingMessages = make(map[crypto.Digest]*pmMessage, 0)
+ pm.monitoredConnections = make(map[Peer]bool, len(peers))
+ pm.peerLastMsgTime = make(map[Peer]int64, len(peers))
+ pm.connectionDelay = make(map[Peer]int64, len(peers))
+ pm.firstMessageCount = make(map[Peer]int64, len(peers))
+ pm.msgCount = 0
+ pm.advanceStage(pmStagePresync, time.Now().UnixNano())
+ pm.accumulationTime = int64(pmAccumulationTime) + int64(crypto.RandUint63())%int64(pmAccumulationTime)
+
+ for _, peer := range peers {
+ pm.monitoredConnections[peer] = true
+ pm.peerLastMsgTime[peer] = pm.stageStartTime
+ pm.connectionDelay[peer] = 0
+ pm.firstMessageCount[peer] = 0
+ }
+
+}
+
+// Notify is the single entrypoint for an incoming message processing. When an outgoing connection
+// is being monitored, it would make a call to Notify, sending the incoming message details.
+// The Notify function will forward this notification to the current stage processing function.
+func (pm *connectionPerformanceMonitor) Notify(msg *IncomingMessage) {
+ pm.Lock()
+ defer pm.Unlock()
+ if pm.monitoredConnections[msg.Sender] == false {
+ return
+ }
+ if pm.monitoredMessageTags[msg.Tag] == false {
+ return
+ }
+ switch pm.stage {
+ case pmStagePresync:
+ pm.notifyPresync(msg)
+ case pmStageSync:
+ pm.notifySync(msg)
+ case pmStageAccumulate:
+ pm.notifyAccumulate(msg)
+ case pmStageStopping:
+ pm.notifyStopping(msg)
+ default: // pmStageStopped
+ }
+}
+
+// notifyPresync waits until pmPresyncTime has passed and monitor the last arrivial time
+// of messages from each of the peers.
+func (pm *connectionPerformanceMonitor) notifyPresync(msg *IncomingMessage) {
+ pm.peerLastMsgTime[msg.Sender] = msg.Received
+ if (msg.Received - pm.stageStartTime) < int64(pmPresyncTime) {
+ return
+ }
+ // presync complete. move to the next stage.
+ noMsgPeers := make(map[Peer]bool, 0)
+ for peer, lastMsgTime := range pm.peerLastMsgTime {
+ if lastMsgTime == pm.stageStartTime {
+ // we haven't received a single message from this peer during the entire presync time.
+ noMsgPeers[peer] = true
+ }
+ }
+ if len(noMsgPeers) >= (len(pm.peerLastMsgTime) / 2) {
+ // if more than half of the peers have not sent us a single message,
+ // extend the presync time. We might be in agreement recovery, where we have very low
+ // traffic. If this becomes a repeated issue, it will get solved by the
+ // clique detection algorithm and some of the nodes would get disconnected.
+ pm.stageStartTime = msg.Received
+ return
+ }
+ if len(noMsgPeers) > 0 {
+ // we have one or more peers that did not send a single message thoughtout the presync time.
+ // ( but less than half ). since we cannot rely on these to send us messages in the future,
+ // we'll disconnect from these peers.
+ pm.advanceStage(pmStageStopped, msg.Received)
+ for peer := range pm.monitoredConnections {
+ if noMsgPeers[peer] {
+ pm.connectionDelay[peer] = int64(pmUndeliveredMessagePenaltyTime)
+ } else {
+ pm.connectionDelay[peer] = 0
+ }
+ }
+ return
+ }
+ pm.lastIncomingMsgTime = msg.Received
+ // otherwise, once we recieved a message from each of the peers, move to the sync stage.
+ pm.advanceStage(pmStageSync, msg.Received)
+}
+
+// notifySync waits for all the peers connection's to go into an idle phase.
+// when we go into this stage, the peerLastMsgTime will be already updated
+// with the recent message time per peer.
+func (pm *connectionPerformanceMonitor) notifySync(msg *IncomingMessage) {
+ minMsgInterval := pm.updateMessageIdlingInterval(msg.Received)
+ if minMsgInterval > int64(pmSyncIdleTime) || (msg.Received-pm.stageStartTime > int64(pmSyncMaxTime)) {
+ // if we hit the first expression, then it means that we've managed to sync up the connections.
+ // otherwise, we've failed to sync up the connections. That's not great, as we're likely to
+ // have some "penalties" applied, but we can't do much about it.
+ pm.accumulateMessage(msg, true)
+ pm.advanceStage(pmStageAccumulate, msg.Received)
+ }
+}
+
+// notifyAccumulate accumulate the incoming message as needed, and waiting between pm.accumulationTime to
+// (pm.accumulationTime + pmAccumulationTimeRange) before moving to the next stage.
+func (pm *connectionPerformanceMonitor) notifyAccumulate(msg *IncomingMessage) {
+ minMsgInterval := pm.updateMessageIdlingInterval(msg.Received)
+ if msg.Received-pm.stageStartTime >= pm.accumulationTime {
+ if minMsgInterval > int64(pmAccumulationIdlingTime) ||
+ (msg.Received-pm.stageStartTime >= pm.accumulationTime+int64(pmAccumulationTimeRange)) {
+ // move to the next stage.
+ pm.advanceStage(pmStageStopping, msg.Received)
+ return
+ }
+ }
+ pm.accumulateMessage(msg, true)
+ pm.pruneOldMessages(msg.Received)
+}
+
+// notifyStopping attempts to stop the message accumulation. Once we reach this stage, no new messages are being
+// added, and old pending messages are being pruned. Once all messages are pruned, it moves to the next stage.
+func (pm *connectionPerformanceMonitor) notifyStopping(msg *IncomingMessage) {
+ pm.accumulateMessage(msg, false)
+ pm.pruneOldMessages(msg.Received)
+ if len(pm.pendingMessages) > 0 {
+ return
+ }
+ // time to wrap up.
+ if pm.msgCount > 0 {
+ for peer := range pm.monitoredConnections {
+ pm.connectionDelay[peer] /= int64(pm.msgCount)
+ }
+ }
+ pm.advanceStage(pmStageStopped, msg.Received)
+}
+
+// advanceStage set the stage variable and update the stage start time.
+func (pm *connectionPerformanceMonitor) advanceStage(newStage pmStage, now int64) {
+ pm.stage = newStage
+ pm.stageStartTime = now
+}
+
+// updateMessageIdlingInterval updates the last message received timestamps and determines how long it has been since
+// the last message was received on any of the incoming peers
+func (pm *connectionPerformanceMonitor) updateMessageIdlingInterval(now int64) (minMsgInterval int64) {
+ currentIncomingMsgTime := pm.lastIncomingMsgTime
+ if pm.lastIncomingMsgTime < now {
+ pm.lastIncomingMsgTime = now
+ }
+ if currentIncomingMsgTime <= now {
+ return now - currentIncomingMsgTime
+ }
+ return 0
+}
+
+func (pm *connectionPerformanceMonitor) pruneOldMessages(now int64) {
+ oldestMessage := now - int64(pmMaxMessageWaitTime)
+ for digest, pendingMsg := range pm.pendingMessages {
+ if oldestMessage < pendingMsg.firstPeerTime {
+ continue
+ }
+ for peer := range pm.monitoredConnections {
+ if msgTime, hasPeer := pendingMsg.peerMsgTime[peer]; hasPeer {
+ msgDelayInterval := msgTime - pendingMsg.firstPeerTime
+ pm.connectionDelay[peer] += msgDelayInterval
+ } else {
+ // we never received this message from this peer.
+ pm.connectionDelay[peer] += int64(pmUndeliveredMessagePenaltyTime)
+ }
+ }
+ delete(pm.pendingMessages, digest)
+ }
+}
+
+func (pm *connectionPerformanceMonitor) accumulateMessage(msg *IncomingMessage, newMessages bool) {
+ msgDigest := generateMessageDigest(msg.Tag, msg.Data)
+
+ pendingMsg := pm.pendingMessages[msgDigest]
+ if pendingMsg == nil {
+ if newMessages {
+ // we don't have this one yet, add it.
+ pm.pendingMessages[msgDigest] = &pmMessage{
+ peerMsgTime: map[Peer]int64{
+ msg.Sender: msg.Received,
+ },
+ firstPeerTime: msg.Received,
+ }
+ pm.firstMessageCount[msg.Sender]++
+ pm.msgCount++
+ }
+ return
+ }
+ // we already seen this digest
+ // make sure we're only moving forward in time. This could be caused when
+ // we have lock contension.
+ pendingMsg.peerMsgTime[msg.Sender] = msg.Received
+ if msg.Received < pendingMsg.firstPeerTime {
+ pendingMsg.firstPeerTime = msg.Received
+ }
+
+ if len(pendingMsg.peerMsgTime) == len(pm.monitoredConnections) {
+ // we've received the same message from all out peers.
+ for peer, msgTime := range pendingMsg.peerMsgTime {
+ pm.connectionDelay[peer] += msgTime - pendingMsg.firstPeerTime
+ }
+ delete(pm.pendingMessages, msgDigest)
+ }
+}
diff --git a/network/dialer.go b/network/dialer.go
new file mode 100644
index 0000000000..26cd6ac088
--- /dev/null
+++ b/network/dialer.go
@@ -0,0 +1,94 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package network
+
+import (
+ "context"
+ "net"
+ "time"
+
+ "github.com/algorand/go-algorand/tools/network/dnssec"
+)
+
+type netDialer interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// Dialer establish tcp-level connection with the destination
+type Dialer struct {
+ phonebook Phonebook
+ innerDialer netDialer
+ resolver *net.Resolver
+}
+
+// makeRateLimitingDialer creates a rate limiting dialer that would limit the connections
+// according to the entries in the phonebook.
+func makeRateLimitingDialer(phonebook Phonebook, resolver *dnssec.Resolver) Dialer {
+ var innerDialer netDialer = &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+
+ // if a DNSSEC-aware resolver provided, use a wrapping dnssec.Dialer to parse addr, resolve it securely
+ // and call a regular net.Dialer
+ if resolver != nil {
+ innerDialer = &dnssec.Dialer{
+ InnerDialer: innerDialer.(*net.Dialer),
+ Resolver: resolver,
+ }
+ }
+
+ return Dialer{
+ phonebook: phonebook,
+ innerDialer: innerDialer,
+ }
+}
+
+// Dial connects to the address on the named network.
+// It waits if needed not to exceed connectionsRateLimitingCount.
+func (d *Dialer) Dial(network, address string) (net.Conn, error) {
+ return d.DialContext(context.Background(), network, address)
+}
+
+// DialContext connects to the address on the named network using the provided context.
+// It waits if needed not to exceed connectionsRateLimitingCount.
+func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ var waitTime time.Duration
+ var provisionalTime time.Time
+
+ for {
+ _, waitTime, provisionalTime = d.phonebook.GetConnectionWaitTime(address)
+ if waitTime == 0 {
+ break // break out of the loop and proceed to the connection
+ }
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-time.After(waitTime):
+ }
+ }
+ conn, err := d.innerDialContext(ctx, network, address)
+ d.phonebook.UpdateConnectionTime(address, provisionalTime)
+
+ return conn, err
+}
+
+func (d *Dialer) innerDialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ // this would be a good place to have the dnssec evaluated.
+ return d.innerDialer.DialContext(ctx, network, address)
+}
diff --git a/network/msgOfInterest.go b/network/msgOfInterest.go
new file mode 100644
index 0000000000..2c4af3362e
--- /dev/null
+++ b/network/msgOfInterest.go
@@ -0,0 +1,65 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package network
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/algorand/go-algorand/protocol"
+)
+
+var errUnableUnmarshallMessage = errors.New("unmarshalMessageOfInterest: could not unmarshall message")
+var errInvalidMessageOfInterest = errors.New("unmarshalMessageOfInterest: message missing the tags key")
+var errInvalidMessageOfInterestLength = errors.New("unmarshalMessageOfInterest: message length is too long")
+
+const maxMessageOfInterestTags = 1024
+
+func unmarshallMessageOfInterest(data []byte) (map[protocol.Tag]bool, error) {
+ // decode the message, and ensure it's a valid message.
+ topics, err := UnmarshallTopics(data)
+ if err != nil {
+ return nil, errUnableUnmarshallMessage
+ }
+ tags, found := topics.GetValue("tags")
+ if !found {
+ return nil, errInvalidMessageOfInterest
+ }
+ if len(tags) > maxMessageOfInterestTags {
+ return nil, errInvalidMessageOfInterestLength
+ }
+ // convert the tags into a tags map.
+ msgTagsMap := make(map[protocol.Tag]bool, len(tags))
+ for _, tag := range strings.Split(string(tags), ",") {
+ msgTagsMap[protocol.Tag(tag)] = true
+ }
+ return msgTagsMap, nil
+}
+
+// MarshallMessageOfInterest generate a message of interest message body for a given set of message tags.
+func MarshallMessageOfInterest(messageTags []protocol.Tag) []byte {
+ // create a long string with all these messages.
+ tags := ""
+ for _, tag := range messageTags {
+ tags += "," + string(tag)
+ }
+ if len(tags) > 0 {
+ tags = tags[1:]
+ }
+ topics := Topics{Topic{key: "tags", data: []byte(tags)}}
+ return topics.MarshallTopics()
+}
diff --git a/network/msgOfInterest_test.go b/network/msgOfInterest_test.go
new file mode 100644
index 0000000000..b1b22b8a50
--- /dev/null
+++ b/network/msgOfInterest_test.go
@@ -0,0 +1,67 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package network
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/protocol"
+)
+
+func TestUnmarshallMessageOfInterestErrors(t *testing.T) {
+ tags, err := unmarshallMessageOfInterest([]byte{0x88})
+ require.Equal(t, errUnableUnmarshallMessage, err)
+ require.Equal(t, 0, len(tags))
+
+ invalidTopics := Topics{Topic{key: "something-else", data: []byte{}}}
+ tags, err = unmarshallMessageOfInterest(invalidTopics.MarshallTopics())
+ require.Equal(t, errInvalidMessageOfInterest, err)
+ require.Equal(t, 0, len(tags))
+
+ longTagsList := ""
+ for i := 0; i < 1024; i++ {
+ longTagsList += ",XQ"
+ }
+ longTagsList = longTagsList[1:]
+ longtagsTopics := Topics{Topic{key: "tags", data: []byte(longTagsList)}}
+ tags, err = unmarshallMessageOfInterest(longtagsTopics.MarshallTopics())
+ require.Equal(t, errInvalidMessageOfInterestLength, err)
+ require.Equal(t, 0, len(tags))
+}
+
+func TestMarshallMessageOfInterest(t *testing.T) {
+ bytes := MarshallMessageOfInterest([]protocol.Tag{protocol.AgreementVoteTag})
+ tags, err := unmarshallMessageOfInterest(bytes)
+ require.NoError(t, err)
+ require.Equal(t, tags[protocol.AgreementVoteTag], true)
+ require.Equal(t, 1, len(tags))
+
+ bytes = MarshallMessageOfInterest([]protocol.Tag{protocol.AgreementVoteTag, protocol.NetPrioResponseTag})
+ tags, err = unmarshallMessageOfInterest(bytes)
+ require.NoError(t, err)
+ require.Equal(t, tags[protocol.AgreementVoteTag], true)
+ require.Equal(t, tags[protocol.NetPrioResponseTag], true)
+ require.Equal(t, 2, len(tags))
+
+ bytes = MarshallMessageOfInterest([]protocol.Tag{protocol.AgreementVoteTag, protocol.AgreementVoteTag})
+ tags, err = unmarshallMessageOfInterest(bytes)
+ require.NoError(t, err)
+ require.Equal(t, tags[protocol.AgreementVoteTag], true)
+ require.Equal(t, 1, len(tags))
+}
diff --git a/network/netprio_test.go b/network/netprio_test.go
index 7f6cc457f7..4e9bd299e5 100644
--- a/network/netprio_test.go
+++ b/network/netprio_test.go
@@ -52,12 +52,12 @@ func (nps *netPrioStub) MakePrioResponse(challenge string) []byte {
Addr: nps.addr,
Prio: nps.prio,
}
- return protocol.Encode(r)
+ return protocol.EncodeReflect(r)
}
func (nps *netPrioStub) VerifyPrioResponse(challenge string, response []byte) (addr basics.Address, err error) {
var r netPrioStubResponse
- err = protocol.Decode(response, &r)
+ err = protocol.DecodeReflect(response, &r)
if err != nil {
return
}
diff --git a/network/phonebook.go b/network/phonebook.go
index aabe08eb57..ad7db2dcec 100644
--- a/network/phonebook.go
+++ b/network/phonebook.go
@@ -35,17 +35,97 @@ type Phonebook interface {
// UpdateRetryAfter updates the retry-after field for the entries matching the given address
UpdateRetryAfter(addr string, retryAfter time.Time)
+
+ // GetConnectionWaitTime will calculate and return the wait
+ // time to prevent exceeding connectionsRateLimitingCount.
+ // The connection should be established when the waitTime is 0.
+ // It will register a provisional next connection time when the waitTime is 0.
+ // The provisional time should be updated after the connection with UpdateConnectionTime
+ GetConnectionWaitTime(addr string) (addrInPhonebook bool,
+ waitTime time.Duration, provisionalTime time.Time)
+
+ // UpdateConnectionTime will update the provisional connection time.
+ // Returns true of the addr was in the phonebook
+ UpdateConnectionTime(addr string, provisionalTime time.Time) bool
+
+ // ReplacePeerList merges a set of addresses with that passed in for networkName
+ // new entries in dnsAddresses are being added
+ // existing items that aren't included in dnsAddresses are being removed
+ // matching entries don't change
+ ReplacePeerList(dnsAddresses []string, networkName string)
+
+ // ExtendPeerList adds unique addresses to this set of addresses
+ ExtendPeerList(more []string, networkName string)
+}
+
+// addressData: holds the information associated with each phonebook address.
+// retryAfter: is the time to wait before retrying to connect to the address.
+// recentConnectionTimes: is the log of connection times used to observe the maximum
+// connections to the address in a given time window.
+// networkNames: lists the networks to which the given address belongs.
+type addressData struct {
+ retryAfter time.Time
+ recentConnectionTimes []time.Time
+ networkNames map[string]bool
+}
+
+func makePhonebookEntryData(networkName string) addressData {
+ pbData := addressData{
+ networkNames: make(map[string]bool),
+ recentConnectionTimes: make([]time.Time, 0),
+ }
+ pbData.networkNames[networkName] = true
+ return pbData
+}
+
+// phonebookImpl holds the server connection configuration values
+// and the list of request times within the time window for each
+// address.
+type phonebookImpl struct {
+ connectionsRateLimitingCount uint
+ connectionsRateLimitingWindow time.Duration
+ data map[string]addressData
+ lock deadlock.RWMutex
+}
+
+// MakePhonebook creates phonebookImpl with the passed configuration values
+func MakePhonebook(connectionsRateLimitingCount uint,
+ connectionsRateLimitingWindow time.Duration) Phonebook {
+ return &phonebookImpl{
+ connectionsRateLimitingCount: connectionsRateLimitingCount,
+ connectionsRateLimitingWindow: connectionsRateLimitingWindow,
+ data: make(map[string]addressData, 0),
+ }
}
-type phonebookData struct {
- retryAfter time.Time
+func (e *phonebookImpl) deletePhonebookEntry(entryName, networkName string) {
+ pbEntry := e.data[entryName]
+ delete(pbEntry.networkNames, networkName)
+ if 0 == len(pbEntry.networkNames) {
+ delete(e.data, entryName)
+ }
}
-type phonebookEntries map[string]phonebookData
+// PopEarliestTime removes the earliest time from recentConnectionTimes in
+// addressData for addr
+// It is expected to be later than ConnectionsRateLimitingWindow
+func (e *phonebookImpl) popNElements(n int, addr string) {
+ entry := e.data[addr]
+ entry.recentConnectionTimes = entry.recentConnectionTimes[n:]
+ e.data[addr] = entry
+}
-func (e *phonebookEntries) filterRetryTime(t time.Time) []string {
- o := make([]string, 0, len(*e))
- for addr, entry := range *e {
+// AppendTime adds the current time to recentConnectionTimes in
+// addressData of addr
+func (e *phonebookImpl) appendTime(addr string, t time.Time) {
+ entry := e.data[addr]
+ entry.recentConnectionTimes = append(entry.recentConnectionTimes, t)
+ e.data[addr] = entry
+}
+
+func (e *phonebookImpl) filterRetryTime(t time.Time) []string {
+ o := make([]string, 0, len(e.data))
+ for addr, entry := range e.data {
if t.After(entry.retryAfter) {
o = append(o, addr)
}
@@ -54,47 +134,130 @@ func (e *phonebookEntries) filterRetryTime(t time.Time) []string {
}
// ReplacePeerList merges a set of addresses with that passed in.
-// new entries in they are being added
-// existing items that aren't included in they are being removed
+// new entries in addressesThey are being added
+// existing items that aren't included in addressesThey are being removed
// matching entries don't change
-func (e *phonebookEntries) ReplacePeerList(they []string) {
+func (e *phonebookImpl) ReplacePeerList(addressesThey []string, networkName string) {
+ e.lock.Lock()
+ defer e.lock.Unlock()
// prepare a map of items we'd like to remove.
removeItems := make(map[string]bool, 0)
- for k := range *e {
- removeItems[k] = true
+ for k, pbd := range e.data {
+ if pbd.networkNames[networkName] {
+ removeItems[k] = true
+ }
}
- for _, addr := range they {
- if _, has := (*e)[addr]; has {
- // we already have this. do nothing.
+ for _, addr := range addressesThey {
+ if pbData, has := e.data[addr]; has {
+ // we already have this.
+ // Update the networkName
+ pbData.networkNames[networkName] = true
+
+ // do not remove this entry
delete(removeItems, addr)
} else {
// we don't have this item. add it.
- (*e)[addr] = phonebookData{}
+ e.data[addr] = makePhonebookEntryData(networkName)
}
}
- // remove items that were missing in they
+ // remove items that were missing in addressesThey
for k := range removeItems {
- delete((*e), k)
+ e.deletePhonebookEntry(k, networkName)
}
}
-func (e *phonebookEntries) updateRetryAfter(addr string, retryAfter time.Time) {
- (*e)[addr] = phonebookData{retryAfter: retryAfter}
+func (e *phonebookImpl) UpdateRetryAfter(addr string, retryAfter time.Time) {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+
+ var entry addressData
+
+ entry, found := e.data[addr]
+ if !found {
+ return
+ }
+ entry.retryAfter = retryAfter
+ e.data[addr] = entry
}
-// ArrayPhonebook is a simple wrapper on a phonebookEntries map
-type ArrayPhonebook struct {
- Entries phonebookEntries
+// GetConnectionWaitTime will calculate and return the wait
+// time to prevent exceeding connectionsRateLimitingCount.
+// The connection should be established when the waitTime is 0.
+// It will register a provisional next connection time when the waitTime is 0.
+// The provisional time should be updated after the connection with UpdateConnectionTime
+func (e *phonebookImpl) GetConnectionWaitTime(addr string) (addrInPhonebook bool,
+ waitTime time.Duration, provisionalTime time.Time) {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+
+ _, addrInPhonebook = e.data[addr]
+ curTime := time.Now()
+ if !addrInPhonebook {
+ // The addr is not in this phonebook.
+ // Will find the addr in a different phonebook.
+ return addrInPhonebook, 0 /* not unsed */, curTime /* not unsed */
+ }
+
+ var timeSince time.Duration
+ var numElmtsToRemove int
+ // Remove from recentConnectionTimes the times later than ConnectionsRateLimitingWindowSeconds
+ for numElmtsToRemove < len(e.data[addr].recentConnectionTimes) {
+ timeSince = curTime.Sub((e.data[addr].recentConnectionTimes)[numElmtsToRemove])
+ if timeSince >= e.connectionsRateLimitingWindow {
+ numElmtsToRemove++
+ } else {
+ break // break the loop. The rest are earlier than 1 second
+ }
+ }
+ // Remove the expired elements from e.data[addr].recentConnectionTimes
+ e.popNElements(numElmtsToRemove, addr)
+
+ // If there are max number of connections within the time window, wait
+ numElts := len(e.data[addr].recentConnectionTimes)
+ if uint(numElts) >= e.connectionsRateLimitingCount {
+ return addrInPhonebook, /* true */
+ (e.connectionsRateLimitingWindow - timeSince), curTime /* not unsed */
+ }
+
+ // Else, there is space in connectionsRateLimitingCount. The
+ // connection request of the caller will proceed
+ // Update curTime, since it may have significantly changed if waited
+ provisionalTime = time.Now()
+ // Append the provisional time for the next connection request
+ e.appendTime(addr, provisionalTime)
+ return addrInPhonebook /* true */, 0 /* no wait. proceed */, provisionalTime
}
-// MakeArrayPhonebook creates a ArrayPhonebook
-func MakeArrayPhonebook() *ArrayPhonebook {
- return &ArrayPhonebook{
- Entries: make(phonebookEntries, 0),
+// UpdateConnectionTime will update the provisional connection time.
+// Returns true of the addr was in the phonebook
+func (e *phonebookImpl) UpdateConnectionTime(addr string, provisionalTime time.Time) bool {
+ e.lock.Lock()
+ defer e.lock.Unlock()
+
+ entry, found := e.data[addr]
+ if !found {
+ return false
+ }
+
+ defer func() {
+ e.data[addr] = entry
+ }()
+
+ // Find the provisionalTime and update it
+ for indx, val := range entry.recentConnectionTimes {
+ if provisionalTime == val {
+ entry.recentConnectionTimes[indx] = time.Now()
+ return true
+ }
}
+ // Case where the time is not found: it was removed from the list.
+ // This may happen when the time expires before the connection was established with the server.
+ // The time should be added again.
+ entry.recentConnectionTimes = append(entry.recentConnectionTimes, time.Now())
+ return true
}
func shuffleStrings(set []string) {
@@ -126,133 +289,29 @@ func shuffleSelect(set []string, n int) []string {
return out
}
-// UpdateRetryAfter updates the retry-after field for the entries matching the given address
-func (p *ArrayPhonebook) UpdateRetryAfter(addr string, retryAfter time.Time) {
- p.Entries.updateRetryAfter(addr, retryAfter)
-}
-
-// GetAddresses returns up to N shuffled address
-func (p *ArrayPhonebook) GetAddresses(n int) []string {
- return shuffleSelect(p.Entries.filterRetryTime(time.Now()), n)
-}
-
-// ThreadsafePhonebook implements Phonebook interface
-type ThreadsafePhonebook struct {
- lock deadlock.RWMutex
- entries phonebookEntries
-}
-
-// MakeThreadsafePhonebook creates a ThreadsafePhonebook
-func MakeThreadsafePhonebook() *ThreadsafePhonebook {
- return &ThreadsafePhonebook{
- entries: make(phonebookEntries, 0),
- }
-}
-
// GetAddresses returns up to N shuffled address
-func (p *ThreadsafePhonebook) GetAddresses(n int) []string {
- p.lock.RLock()
- defer p.lock.RUnlock()
- return shuffleSelect(p.entries.filterRetryTime(time.Now()), n)
-}
-
-// UpdateRetryAfter updates the retry-after field for the entries matching the given address
-func (p *ThreadsafePhonebook) UpdateRetryAfter(addr string, retryAfter time.Time) {
- p.lock.RLock()
- defer p.lock.RUnlock()
- p.entries.updateRetryAfter(addr, retryAfter)
+func (e *phonebookImpl) GetAddresses(n int) []string {
+ e.lock.RLock()
+ defer e.lock.RUnlock()
+ return shuffleSelect(e.filterRetryTime(time.Now()), n)
}
// ExtendPeerList adds unique addresses to this set of addresses
-func (p *ThreadsafePhonebook) ExtendPeerList(more []string) {
- p.lock.Lock()
- defer p.lock.Unlock()
- // TODO: if this gets bad because p.addrs gets long, replace storage with a map[string]bool
+func (e *phonebookImpl) ExtendPeerList(more []string, networkName string) {
+ e.lock.Lock()
+ defer e.lock.Unlock()
for _, addr := range more {
- if _, has := p.entries[addr]; has {
+ if pbEntry, has := e.data[addr]; has {
+ pbEntry.networkNames[networkName] = true
continue
}
- p.entries[addr] = phonebookData{}
+ e.data[addr] = makePhonebookEntryData(networkName)
}
}
// Length returns the number of addrs contained
-func (p *ThreadsafePhonebook) Length() int {
- p.lock.RLock()
- defer p.lock.RUnlock()
- return len(p.entries)
-}
-
-// ReplacePeerList merges a set of addresses with that passed in.
-// new entries in they are being added
-// existing items that aren't included in they are being removed
-// matching entries don't change
-func (p *ThreadsafePhonebook) ReplacePeerList(they []string) {
- p.lock.Lock()
- defer p.lock.Unlock()
- p.entries.ReplacePeerList(they)
-}
-
-// MultiPhonebook contains a map of phonebooks
-type MultiPhonebook struct {
- phonebookMap map[string]Phonebook
- lock deadlock.RWMutex
-}
-
-// MakeMultiPhonebook constructs and returns a new Multi Phonebook
-func MakeMultiPhonebook() *MultiPhonebook {
- return &MultiPhonebook{phonebookMap: make(map[string]Phonebook)}
-}
-
-// GetAddresses returns up to N address
-func (mp *MultiPhonebook) GetAddresses(n int) []string {
- mp.lock.RLock()
- defer mp.lock.RUnlock()
-
- if len(mp.phonebookMap) == 1 {
- for _, phonebook := range mp.phonebookMap {
- return phonebook.GetAddresses(n)
- }
- }
- uniqueEntries := make(map[string]bool, 0)
- for _, p := range mp.phonebookMap {
- for _, addr := range p.GetAddresses(getAllAddresses) {
- uniqueEntries[addr] = true
- }
- }
- out := make([]string, len(uniqueEntries))
- i := 0
- for k := range uniqueEntries {
- out[i] = k
- i++
- }
-
- rand.Shuffle(len(out), func(i, j int) { t := out[i]; out[i] = out[j]; out[j] = t })
- if n < len(out) {
- return out[:n]
- }
- return out
-}
-
-// GetPhonebook retrieves a phonebook by it's name
-func (mp *MultiPhonebook) GetPhonebook(bootstrapNetworkName string) (p Phonebook) {
- mp.lock.Lock()
- defer mp.lock.Unlock()
- return mp.phonebookMap[bootstrapNetworkName]
-}
-
-// AddOrUpdatePhonebook adds or updates Phonebook in Phonebook map
-func (mp *MultiPhonebook) AddOrUpdatePhonebook(bootstrapNetworkName string, p Phonebook) {
- mp.lock.Lock()
- defer mp.lock.Unlock()
- mp.phonebookMap[bootstrapNetworkName] = p
-}
-
-// UpdateRetryAfter updates the retry-after field for the entries matching the given address
-func (mp *MultiPhonebook) UpdateRetryAfter(addr string, retryAfter time.Time) {
- mp.lock.Lock()
- defer mp.lock.Unlock()
- for _, op := range mp.phonebookMap {
- op.UpdateRetryAfter(addr, retryAfter)
- }
+func (e *phonebookImpl) Length() int {
+ e.lock.RLock()
+ defer e.lock.RUnlock()
+ return len(e.data)
}
diff --git a/network/phonebook_test.go b/network/phonebook_test.go
index 196d41caca..6a8ac78c04 100644
--- a/network/phonebook_test.go
+++ b/network/phonebook_test.go
@@ -21,8 +21,10 @@ import (
"math/rand"
"sync"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func testPhonebookAll(t *testing.T, set []string, ph Phonebook) {
@@ -85,46 +87,62 @@ func testPhonebookUniform(t *testing.T, set []string, ph Phonebook, getsize int)
func TestArrayPhonebookAll(t *testing.T) {
set := []string{"a", "b", "c", "d", "e"}
- ph := MakeArrayPhonebook()
+ ph := MakePhonebook(1, 1).(*phonebookImpl)
for _, e := range set {
- ph.Entries[e] = phonebookData{}
+ ph.data[e] = addressData{}
}
testPhonebookAll(t, set, ph)
}
func TestArrayPhonebookUniform1(t *testing.T) {
set := []string{"a", "b", "c", "d", "e"}
- ph := MakeArrayPhonebook()
+ ph := MakePhonebook(1, 1).(*phonebookImpl)
for _, e := range set {
- ph.Entries[e] = phonebookData{}
+ ph.data[e] = addressData{}
}
testPhonebookUniform(t, set, ph, 1)
}
func TestArrayPhonebookUniform3(t *testing.T) {
set := []string{"a", "b", "c", "d", "e"}
- ph := MakeArrayPhonebook()
+ ph := MakePhonebook(1, 1).(*phonebookImpl)
for _, e := range set {
- ph.Entries[e] = phonebookData{}
+ ph.data[e] = addressData{}
}
testPhonebookUniform(t, set, ph, 3)
}
-func extenderThread(th *ThreadsafePhonebook, more []string, wg *sync.WaitGroup, repetitions int) {
+// TestPhonebookExtension tests for extending different phonebooks with
+// addresses.
+func TestPhonebookExtension(t *testing.T) {
+ setA := []string{"a"}
+ moreB := []string{"b"}
+ ph := MakePhonebook(1, 1).(*phonebookImpl)
+ ph.ReplacePeerList(setA, "default")
+ ph.ExtendPeerList(moreB, "default")
+ ph.ExtendPeerList(setA, "other")
+ assert.Equal(t, 2, ph.Length())
+ assert.Equal(t, true, ph.data["a"].networkNames["default"])
+ assert.Equal(t, true, ph.data["a"].networkNames["other"])
+ assert.Equal(t, true, ph.data["b"].networkNames["default"])
+ assert.Equal(t, false, ph.data["b"].networkNames["other"])
+}
+
+func extenderThread(th *phonebookImpl, more []string, wg *sync.WaitGroup, repetitions int) {
defer wg.Done()
for i := 0; i <= repetitions; i++ {
start := rand.Intn(len(more))
end := rand.Intn(len(more)-start) + start
- th.ExtendPeerList(more[start:end])
+ th.ExtendPeerList(more[start:end], "default")
}
- th.ExtendPeerList(more)
+ th.ExtendPeerList(more, "default")
}
func TestThreadsafePhonebookExtension(t *testing.T) {
set := []string{"a", "b", "c", "d", "e"}
more := []string{"f", "g", "h", "i", "j"}
- ph := MakeThreadsafePhonebook()
- ph.ReplacePeerList(set)
+ ph := MakePhonebook(1, 1).(*phonebookImpl)
+ ph.ReplacePeerList(set, "default")
wg := sync.WaitGroup{}
wg.Add(5)
for ti := 0; ti < 5; ti++ {
@@ -135,7 +153,7 @@ func TestThreadsafePhonebookExtension(t *testing.T) {
assert.Equal(t, 10, ph.Length())
}
-func threadTestThreadsafePhonebookExtensionLong(wg *sync.WaitGroup, ph *ThreadsafePhonebook, setSize, repetitions int) {
+func threadTestThreadsafePhonebookExtensionLong(wg *sync.WaitGroup, ph *phonebookImpl, setSize, repetitions int) {
set := make([]string, setSize)
for i := range set {
set[i] = fmt.Sprintf("%06d", i)
@@ -149,7 +167,7 @@ func TestThreadsafePhonebookExtensionLong(t *testing.T) {
t.SkipNow()
return
}
- ph := MakeThreadsafePhonebook()
+ ph := MakePhonebook(1, 1).(*phonebookImpl)
wg := sync.WaitGroup{}
const threads = 5
const setSize = 1000
@@ -166,17 +184,17 @@ func TestThreadsafePhonebookExtensionLong(t *testing.T) {
func TestMultiPhonebook(t *testing.T) {
set := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"}
- pha := MakeArrayPhonebook()
+ pha := make([]string, 0)
for _, e := range set[:5] {
- pha.Entries[e] = phonebookData{}
+ pha = append(pha, e)
}
- phb := MakeArrayPhonebook()
+ phb := make([]string, 0)
for _, e := range set[5:] {
- phb.Entries[e] = phonebookData{}
+ phb = append(phb, e)
}
- mp := MakeMultiPhonebook()
- mp.AddOrUpdatePhonebook("pha", pha)
- mp.AddOrUpdatePhonebook("phb", phb)
+ mp := MakePhonebook(1, 1*time.Millisecond)
+ mp.ReplacePeerList(pha, "pha")
+ mp.ReplacePeerList(phb, "phb")
testPhonebookAll(t, set, mp)
testPhonebookUniform(t, set, mp, 1)
@@ -185,25 +203,154 @@ func TestMultiPhonebook(t *testing.T) {
func TestMultiPhonebookDuplicateFiltering(t *testing.T) {
set := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"}
- pha := MakeArrayPhonebook()
+ pha := make([]string, 0)
for _, e := range set[:7] {
- pha.Entries[e] = phonebookData{}
+ pha = append(pha, e)
}
- phb := MakeArrayPhonebook()
+ phb := make([]string, 0)
for _, e := range set[3:] {
- phb.Entries[e] = phonebookData{}
+ phb = append(phb, e)
}
- mp := MakeMultiPhonebook()
- mp.AddOrUpdatePhonebook("pha", pha)
- mp.AddOrUpdatePhonebook("phb", phb)
+ mp := MakePhonebook(1, 1*time.Millisecond)
+ mp.ReplacePeerList(pha, "pha")
+ mp.ReplacePeerList(phb, "phb")
testPhonebookAll(t, set, mp)
testPhonebookUniform(t, set, mp, 1)
testPhonebookUniform(t, set, mp, 3)
}
+func TestWaitAndAddConnectionTimeLongtWindow(t *testing.T) {
+ entries := MakePhonebook(3, 200*time.Millisecond).(*phonebookImpl)
+ addr1 := "addrABC"
+ addr2 := "addrXYZ"
+
+ // Address not in. Should return false
+ addrInPhonebook, _, provisionalTime := entries.GetConnectionWaitTime(addr1)
+ require.Equal(t, false, addrInPhonebook)
+ require.Equal(t, false, entries.UpdateConnectionTime(addr1, provisionalTime))
+
+ // Test the addresses are populated in the phonebook and a
+ // time can be added to one of them
+ entries.ReplacePeerList([]string{addr1, addr2}, "default")
+ addrInPhonebook, waitTime, provisionalTime := entries.GetConnectionWaitTime(addr1)
+ require.Equal(t, true, addrInPhonebook)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime))
+ phBookData := entries.data[addr1].recentConnectionTimes
+ require.Equal(t, 1, len(phBookData))
+
+ // introduce a gap between the two requests
+ time.Sleep(100 * time.Millisecond)
+
+ // add another value to addr
+ addrInPhonebook, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr1)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime))
+ phBookData = entries.data[addr1].recentConnectionTimes
+ require.Equal(t, 2, len(phBookData))
+
+ // wait for the time the first element should be removed
+ time.Sleep(100 * time.Millisecond)
+
+ // the first time should be removed and a new one added
+ // there should not be any wait
+ addrInPhonebook, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr1)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime))
+ phBookData2 := entries.data[addr1].recentConnectionTimes
+ require.Equal(t, 2, len(phBookData2))
+
+ // make sure the right time was removed
+ require.Equal(t, phBookData[1], phBookData2[0])
+ require.Equal(t, true, phBookData2[0].Before(phBookData2[1]))
+
+ // try requesting from another address, make sure
+ // a separate array is used for these new requests
+
+ // add 3 values to another address. should not wait
+ // value 1
+ _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime))
+
+ // introduce a gap between the two requests so that only the first will be removed later when waited
+ time.Sleep(100 * time.Millisecond)
+
+ // value 2
+ addrInPhonebook, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime))
+ // value 3
+ _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime))
+
+ phBookData = entries.data[addr2].recentConnectionTimes
+ // all three times should be queued
+ require.Equal(t, 3, len(phBookData))
+
+ // add another element to trigger wait
+ _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2)
+ require.Greater(t, int64(waitTime), int64(0))
+ // no element should be removed
+ phBookData2 = entries.data[addr2].recentConnectionTimes
+ require.Equal(t, phBookData[0], phBookData2[0])
+ require.Equal(t, phBookData[1], phBookData2[1])
+ require.Equal(t, phBookData[2], phBookData2[2])
+
+ time.Sleep(waitTime)
+
+ // The wait should be sufficient
+ _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr2)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr2, provisionalTime))
+ // only one element should be removed, and one added
+ phBookData2 = entries.data[addr2].recentConnectionTimes
+ require.Equal(t, 3, len(phBookData))
+
+ // make sure the right time was removed
+ require.Equal(t, phBookData[1], phBookData2[0])
+ require.Equal(t, phBookData[2], phBookData2[1])
+}
+
+func TestWaitAndAddConnectionTimeShortWindow(t *testing.T) {
+ entries := MakePhonebook(3, 2*time.Millisecond).(*phonebookImpl)
+ addr1 := "addrABC"
+
+ // Init the data structures
+ entries.ReplacePeerList([]string{addr1}, "default")
+
+ // add 3 values. should not wait
+ // value 1
+ addrInPhonebook, waitTime, provisionalTime := entries.GetConnectionWaitTime(addr1)
+ require.Equal(t, true, addrInPhonebook)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime))
+ // value 2
+ _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr1)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime))
+ // value 3
+ _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr1)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime))
+
+ // give enough time to expire all the elements
+ time.Sleep(10 * time.Millisecond)
+
+ // there should not be any wait
+ _, waitTime, provisionalTime = entries.GetConnectionWaitTime(addr1)
+ require.Equal(t, time.Duration(0), waitTime)
+ require.Equal(t, true, entries.UpdateConnectionTime(addr1, provisionalTime))
+
+ // only one time should be left (the newly added)
+ phBookData := entries.data[addr1].recentConnectionTimes
+ require.Equal(t, 1, len(phBookData))
+}
+
func BenchmarkThreadsafePhonebook(b *testing.B) {
- ph := MakeThreadsafePhonebook()
+ ph := MakePhonebook(1, 1).(*phonebookImpl)
threads := 5
if b.N < threads {
threads = b.N
diff --git a/network/ping_test.go b/network/ping_test.go
index 870c056c5b..f719798dfe 100644
--- a/network/ping_test.go
+++ b/network/ping_test.go
@@ -27,15 +27,17 @@ import (
func TestPing(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.config.GossipFanout = 1
+ netA.config.PeerPingPeriodSeconds = 5
netA.Start()
defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
netB := makeTestWebsocketNode(t)
netB.config.GossipFanout = 1
+ netB.config.PeerPingPeriodSeconds = 5
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook = MakeMultiPhonebook()
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook = MakePhonebook(1, 1*time.Millisecond)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
diff --git a/network/rateLimitingTransport.go b/network/rateLimitingTransport.go
new file mode 100644
index 0000000000..f7c167e785
--- /dev/null
+++ b/network/rateLimitingTransport.go
@@ -0,0 +1,74 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package network
+
+import (
+ "errors"
+ "net/http"
+ "time"
+)
+
+// rateLimitingTransport is the transport for execute a single HTTP transaction, obtaining the Response for a given Request.
+type rateLimitingTransport struct {
+ phonebook Phonebook
+ innerTransport *http.Transport
+ queueingTimeout time.Duration
+}
+
+// ErrConnectionQueueingTimeout indicates that we've exceeded the time allocated for
+// queueing the current request before the request attempt could be made.
+var ErrConnectionQueueingTimeout = errors.New("rateLimitingTransport: queueing timeout")
+
+// makeRateLimitingTransport creates a rate limiting http transport that would limit the requests rate
+// according to the entries in the phonebook.
+func makeRateLimitingTransport(phonebook Phonebook, queueingTimeout time.Duration, dialer *Dialer) rateLimitingTransport {
+ defaultTransport := http.DefaultTransport.(*http.Transport)
+ return rateLimitingTransport{
+ phonebook: phonebook,
+ innerTransport: &http.Transport{
+ Proxy: defaultTransport.Proxy,
+ DialContext: dialer.innerDialContext,
+ MaxIdleConns: defaultTransport.MaxIdleConns,
+ IdleConnTimeout: defaultTransport.IdleConnTimeout,
+ TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
+ ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ },
+ queueingTimeout: queueingTimeout,
+ }
+}
+
+// RoundTrip connects to the address on the named network using the provided context.
+// It waits if needed not to exceed connectionsRateLimitingCount.
+func (r *rateLimitingTransport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ var waitTime time.Duration
+ var provisionalTime time.Time
+ queueingTimedOut := time.After(r.queueingTimeout)
+ for {
+ _, waitTime, provisionalTime = r.phonebook.GetConnectionWaitTime(req.Host)
+ if waitTime == 0 {
+ break // break out of the loop and proceed to the connection
+ }
+ select {
+ case <-time.After(waitTime):
+ case <-queueingTimedOut:
+ return nil, ErrConnectionQueueingTimeout
+ }
+ }
+ res, err = r.innerTransport.RoundTrip(req)
+ r.phonebook.UpdateConnectionTime(req.Host, provisionalTime)
+ return
+}
diff --git a/network/requestLogger_test.go b/network/requestLogger_test.go
index 0e0b19088c..5bceb3b087 100644
--- a/network/requestLogger_test.go
+++ b/network/requestLogger_test.go
@@ -46,7 +46,7 @@ func TestRequestLogger(t *testing.T) {
netA := &WebsocketNetwork{
log: dl,
config: defaultConfig,
- phonebook: MakeMultiPhonebook(),
+ phonebook: MakePhonebook(1, 1*time.Millisecond),
GenesisID: "go-test-network-genesis",
NetworkID: config.Devtestnet,
}
@@ -62,8 +62,8 @@ func TestRequestLogger(t *testing.T) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook = MakeMultiPhonebook()
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook = MakePhonebook(1, 1*time.Millisecond)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go
index e1f9d51489..2f8f1957a6 100644
--- a/network/requestTracker_test.go
+++ b/network/requestTracker_test.go
@@ -78,7 +78,7 @@ func TestRateLimiting(t *testing.T) {
wn := &WebsocketNetwork{
log: log,
config: defaultConfig,
- phonebook: MakeMultiPhonebook(),
+ phonebook: MakePhonebook(1,1),
GenesisID: "go-test-network-genesis",
NetworkID: config.Devtestnet,
}
@@ -95,8 +95,6 @@ func TestRateLimiting(t *testing.T) {
defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
- counter := newMessageCounter(t, 5)
- netA.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counter}})
netA.Start()
addrA, postListen := netA.Address()
require.Truef(t, postListen, "Listening network failed to start")
@@ -107,14 +105,15 @@ func TestRateLimiting(t *testing.T) {
clientsCount := int(defaultConfig.ConnectionsRateLimitingCount + 5)
networks := make([]*WebsocketNetwork, clientsCount)
- phonebooks := make([]*ThreadsafePhonebook, clientsCount)
+ phonebooks := make([]Phonebook, clientsCount)
for i := 0; i < clientsCount; i++ {
networks[i] = makeTestWebsocketNodeWithConfig(t, noAddressConfig)
networks[i].config.GossipFanout = 1
- phonebooks[i] = MakeThreadsafePhonebook()
- phonebooks[i].ReplacePeerList([]string{addrA})
- networks[i].phonebook = MakeMultiPhonebook()
- networks[i].phonebook.AddOrUpdatePhonebook("default", phonebooks[i])
+ phonebooks[i] = MakePhonebook(networks[i].config.ConnectionsRateLimitingCount,
+ time.Duration(networks[i].config.ConnectionsRateLimitingWindowSeconds)*time.Second)
+ phonebooks[i].ReplacePeerList([]string{addrA}, "default")
+ networks[i].phonebook = MakePhonebook(1, 1*time.Millisecond)
+ networks[i].phonebook.ReplacePeerList([]string{addrA}, "default")
defer func(net *WebsocketNetwork, i int) {
t.Logf("stopping network %d", i)
net.Stop()
diff --git a/network/topics.go b/network/topics.go
new file mode 100644
index 0000000000..5aaeaaa9cf
--- /dev/null
+++ b/network/topics.go
@@ -0,0 +1,143 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package network
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/algorand/go-algorand/crypto"
+)
+
+// Constant strings used as keys for topics
+const (
+ requestHashKey = "RequestHash"
+ ErrorKey = "Error" // used for passing an error message
+)
+
+// Topic is a key-value pair
+type Topic struct {
+ key string
+ data []byte
+}
+
+// MakeTopic Creates a Topic
+func MakeTopic(key string, data []byte) Topic {
+ return Topic{key: key, data: data}
+}
+
+// Topics is an array of type Topic
+// The maximum number of topics allowed is 32
+// Each topic key can be 64 characters long and cannot be size 0
+type Topics []Topic
+
+// MarshallTopics serializes the topics into a byte array
+func (ts Topics) MarshallTopics() (b []byte) {
+
+ // Calculate the total buffer size required to store the topics
+ bufferSize := binary.MaxVarintLen32 // store topic array size
+
+ for _, val := range ts {
+ bufferSize += 2 * binary.MaxVarintLen32 // store key size and the data size
+ bufferSize += len(val.key)
+ bufferSize += len(val.data)
+ }
+
+ buffer := make([]byte, bufferSize)
+ bidx := binary.PutUvarint(buffer, uint64(len(ts)))
+ for _, val := range ts {
+ // write the key size
+ n := binary.PutUvarint(buffer[bidx:], uint64(len(val.key)))
+ bidx += n
+ // write the key
+ n = copy(buffer[bidx:], []byte(val.key))
+ bidx += n
+
+ // write the data size
+ n = binary.PutUvarint(buffer[bidx:], uint64(len(val.data)))
+ bidx += n
+ // write the data
+ n = copy(buffer[bidx:], val.data)
+ bidx += n
+ }
+ return buffer[:bidx]
+}
+
+// UnmarshallTopics unmarshalls the topics from the byte array
+func UnmarshallTopics(buffer []byte) (ts Topics, err error) {
+ // Get the number of topics
+ var idx int
+ numTopics, nr := binary.Uvarint(buffer[idx:])
+ if nr <= 0 {
+ return nil, fmt.Errorf("UnmarshallTopics: could not read the number of topics")
+ }
+ if numTopics > 32 { // numTopics is uint64
+ return nil, fmt.Errorf("UnmarshallTopics: number of topics %d is greater than 32", numTopics)
+ }
+ idx += nr
+ topics := make([]Topic, numTopics)
+
+ for x := 0; x < int(numTopics); x++ {
+ // read the key length
+ strlen, nr := binary.Uvarint(buffer[idx:])
+ if nr <= 0 {
+ return nil, fmt.Errorf("UnmarshallTopics: could not read the key length")
+ }
+ idx += nr
+
+ // read the key
+ if len(buffer) < idx+int(strlen) || strlen > 64 || strlen == 0 {
+ return nil, fmt.Errorf("UnmarshallTopics: could not read the key")
+ }
+ topics[x].key = string(buffer[idx : idx+int(strlen)])
+ idx += int(strlen)
+
+ // read the data length
+ dataLen, nr := binary.Uvarint(buffer[idx:])
+ if nr <= 0 {
+ return nil, fmt.Errorf("UnmarshallTopics: could not read the data length")
+ }
+ idx += nr
+
+ // read the data
+ if len(buffer) < idx+int(dataLen) {
+ return nil, fmt.Errorf("UnmarshallTopics: data larger than buffer size")
+ }
+ topics[x].data = make([]byte, dataLen)
+ copy(topics[x].data, buffer[idx:idx+int(dataLen)])
+ idx += int(dataLen)
+ }
+ return topics, nil
+}
+
+// hashTopics returns the hash of serialized topics.
+// Expects the nonce to be already added as a topic
+func hashTopics(topics []byte) (partialHash uint64) {
+ digest := crypto.Hash(topics)
+ partialHash = digest.TrimUint64()
+ return partialHash
+}
+
+// GetValue returns the value of the key if the key is found in the topics
+func (ts *Topics) GetValue(key string) (val []byte, found bool) {
+ for _, t := range *ts {
+ if t.key == key {
+ return t.data, true
+ }
+ }
+ return
+}
diff --git a/network/topics_test.go b/network/topics_test.go
new file mode 100644
index 0000000000..83ab1f6b01
--- /dev/null
+++ b/network/topics_test.go
@@ -0,0 +1,123 @@
+// Copyright (C) 2019-2020 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package network
+
+import (
+ "encoding/binary"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// Test the marshall/unmarshall of Topics
+func TestTopics(t *testing.T) {
+
+ topics := Topics{
+ Topic{
+ key: "key1",
+ data: []byte("value 1"),
+ },
+ Topic{
+ key: "Key2",
+ data: []byte("value of key2"),
+ },
+ }
+
+ // Check if the topics were initialized correctly
+ require.Equal(t, 2, len(topics))
+
+ require.Equal(t, "key1", topics[0].key)
+ require.Equal(t, "value 1", string(topics[0].data))
+
+ require.Equal(t, "Key2", topics[1].key)
+ val, found := topics.GetValue("Key2")
+ require.Equal(t, true, found)
+ require.Equal(t, "value of key2", string(val))
+
+ // Check if can be marshalled without errors
+ buffer := topics.MarshallTopics()
+
+ // Check if can be unmarshalled without errors
+ unMarshalled, e := UnmarshallTopics(buffer)
+ require.Empty(t, e)
+
+ // Check if the unmarshalled is equal to the original
+ require.Equal(t, len(topics), len(unMarshalled))
+
+ require.Equal(t, topics[0].key, unMarshalled[0].key)
+ require.Equal(t, topics[0].data, unMarshalled[0].data)
+
+ require.Equal(t, topics[1].key, unMarshalled[1].key)
+ require.Equal(t, topics[1].data, unMarshalled[1].data)
+}
+
+// TestCurruptedTopics checks the errors
+// Makes sure UnmarshallTopics will not attempt to read beyond the buffer limits
+func TestCurruptedTopics(t *testing.T) {
+
+ var buffer []byte
+
+ // empty buffer
+ buffer = make([]byte, 0)
+ _, err := UnmarshallTopics(buffer)
+ require.Equal(t, err, fmt.Errorf("UnmarshallTopics: could not read the number of topics"))
+
+ // more than 32 topics
+ buffer = make([]byte, binary.MaxVarintLen32)
+ binary.PutUvarint(buffer, 33)
+ _, err = UnmarshallTopics(buffer)
+ require.Equal(t, err, fmt.Errorf("UnmarshallTopics: number of topics %d is greater than 32", 33))
+
+ // no room for the key length
+ buffer = make([]byte, 1)
+ binary.PutUvarint(buffer, 1)
+ _, err = UnmarshallTopics(buffer)
+ require.Equal(t, err, fmt.Errorf("UnmarshallTopics: could not read the key length"))
+
+ // key length > buffer size
+ buffer = make([]byte, 2)
+ binary.PutUvarint(buffer, 1)
+ binary.PutUvarint(buffer[1:], 5)
+ _, err = UnmarshallTopics(buffer)
+ require.Equal(t, err, fmt.Errorf("UnmarshallTopics: could not read the key"))
+
+ // key length > buffer size 64
+ buffer = make([]byte, 100)
+ binary.PutUvarint(buffer, 1)
+ binary.PutUvarint(buffer[1:], 65)
+ _, err = UnmarshallTopics(buffer)
+ require.Equal(t, err, fmt.Errorf("UnmarshallTopics: could not read the key"))
+
+ // no room for the data length
+ buffer = make([]byte, 3)
+ binary.PutUvarint(buffer, 1) // 1 topic
+ binary.PutUvarint(buffer[1:], 1) // 1 char key
+ _, err = UnmarshallTopics(buffer)
+ require.Equal(t, err, fmt.Errorf("UnmarshallTopics: could not read the data length"))
+
+ // datalen > buffer size
+ buffer = make([]byte, 5)
+ binary.PutUvarint(buffer, 1) // 1 topic
+ binary.PutUvarint(buffer[1:], 1) // 1 char key
+ // buffer size is 5. Room for 1 byte data.
+ // [/*topics:*/1, /*key len:*/ 1, /*key:*/ 0, /*data len:*/ 2, /*1 byte space for data*/ 0]
+ // 2 byte data size should error
+ binary.PutUvarint(buffer[3:], 2)
+ _, err = UnmarshallTopics(buffer)
+ require.Equal(t, err, fmt.Errorf("UnmarshallTopics: data larger than buffer size"))
+}
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index a369b9332f..4675fdf098 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -27,6 +27,7 @@ import (
"math/rand"
"net"
"net/http"
+ "net/textproto"
"net/url"
"path"
"regexp"
@@ -37,6 +38,7 @@ import (
"sync"
"sync/atomic"
"time"
+ //"os"
"github.com/algorand/go-deadlock"
"github.com/algorand/websocket"
@@ -50,6 +52,7 @@ import (
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/protocol"
tools_network "github.com/algorand/go-algorand/tools/network"
+ "github.com/algorand/go-algorand/tools/network/dnssec"
"github.com/algorand/go-algorand/util/metrics"
)
@@ -173,6 +176,15 @@ type GossipNode interface {
// ClearHandlers deregisters all the existing message handlers.
ClearHandlers()
+
+ // GetRoundTripper returns a Transport that would limit the number of outgoing connections.
+ GetRoundTripper() http.RoundTripper
+
+ // OnNetworkAdvance notifies the network library that the agreement protocol was able to make a notable progress.
+ // this is the only indication that we have that we haven't formed a clique, where all incoming messages
+ // arrive very quickly, but might be missing some votes. The usage of this call is expected to have similar
+ // characteristics as with a watchdog timer.
+ OnNetworkAdvance()
}
// IncomingMessage represents a message arriving from some peer in our p2p network
@@ -205,6 +217,7 @@ type OutgoingMessage struct {
Action ForwardingPolicy
Tag Tag
Payload []byte
+ Topics Topics
}
// ForwardingPolicy is an enum indicating to whom we should send a message
@@ -219,6 +232,9 @@ const (
// Broadcast - forward to everyone (except the sender)
Broadcast
+
+ // Respond - reply to the sender
+ Respond
)
// MessageHandler takes a IncomingMessage (e.g., vote, transaction), processes it, and returns what (if anything)
@@ -246,7 +262,7 @@ type TaggedMessageHandler struct {
// Propagate is a convenience function to save typing in the common case of a message handler telling us to propagate an incoming message
// "return network.Propagate(msg)" instead of "return network.OutgoingMsg{network.Broadcast, msg.Tag, msg.Data}"
func Propagate(msg IncomingMessage) OutgoingMessage {
- return OutgoingMessage{Broadcast, msg.Tag, msg.Data}
+ return OutgoingMessage{Broadcast, msg.Tag, msg.Data, nil}
}
// GossipNetworkPath is the URL path to connect to the websocket gossip node at.
@@ -281,7 +297,7 @@ type WebsocketNetwork struct {
broadcastQueueHighPrio chan broadcastRequest
broadcastQueueBulk chan broadcastRequest
- phonebook *MultiPhonebook
+ phonebook Phonebook
GenesisID string
NetworkID protocol.NetworkID
@@ -320,6 +336,24 @@ type WebsocketNetwork struct {
// lastPeerConnectionsSent is the last time the peer connections were sent ( or attempted to be sent ) to the telemetry server.
lastPeerConnectionsSent time.Time
+
+ // connPerfMonitor is used on outgoing connections to measure their relative message timing
+ connPerfMonitor *connectionPerformanceMonitor
+
+ // lastNetworkAdvanceMu syncronized teh access to lastNetworkAdvance
+ lastNetworkAdvanceMu deadlock.Mutex
+
+ // lastNetworkAdvance contains the last timestamp where the agreement protocol was able to make a notable progress.
+ // it used as a watchdog to help us detect connectivity issues ( such as cliques )
+ lastNetworkAdvance time.Time
+
+ // number of throttled outgoing connections "slots" needed to be populated.
+ throttledOutgoingConnections int32
+
+ // transport and dialer are customized to limit the number of
+ // connection in compliance with connectionsRateLimitingCount.
+ transport rateLimitingTransport
+ dialer Dialer
}
type broadcastRequest struct {
@@ -499,7 +533,8 @@ func (wn *WebsocketNetwork) GetPeers(options ...PeerOption) []Peer {
var addrs []string
addrs = wn.phonebook.GetAddresses(1000)
for _, addr := range addrs {
- outPeers = append(outPeers, &wsPeerCore{net: wn, rootURL: addr})
+ peerCore := makePeerCore(wn, addr, wn.GetRoundTripper(), "" /*origin address*/)
+ outPeers = append(outPeers, &peerCore)
}
case PeersConnectedIn:
wn.peersLock.RLock()
@@ -515,6 +550,12 @@ func (wn *WebsocketNetwork) GetPeers(options ...PeerOption) []Peer {
}
func (wn *WebsocketNetwork) setup() {
+ var preferredResolver *dnssec.Resolver
+ if wn.config.DNSSecurityRelayAddrEnforced() {
+ preferredResolver = &dnssec.DefaultResolver
+ }
+ wn.dialer = makeRateLimitingDialer(wn.phonebook, preferredResolver)
+ wn.transport = makeRateLimitingTransport(wn.phonebook, 10*time.Second, &wn.dialer)
wn.upgrader.ReadBufferSize = 4096
wn.upgrader.WriteBufferSize = 4096
@@ -571,6 +612,13 @@ func (wn *WebsocketNetwork) setup() {
if wn.config.EnableIncomingMessageFilter {
wn.incomingMsgFilter = makeMessageFilter(wn.config.IncomingMessageFilterBucketCount, wn.config.IncomingMessageFilterBucketSize)
}
+ wn.connPerfMonitor = makeConnectionPerformanceMonitor([]Tag{protocol.AgreementVoteTag, protocol.TxnTag})
+ wn.lastNetworkAdvance = time.Now().UTC()
+ wn.handlers.log = wn.log
+
+ if wn.config.NetworkProtocolVersion != "" {
+ SupportedProtocolVersions = []string{wn.config.NetworkProtocolVersion}
+ }
}
func (wn *WebsocketNetwork) rlimitIncomingConnections() error {
@@ -643,6 +691,13 @@ func (wn *WebsocketNetwork) Start() {
// wrap the limited connection listener with a requests tracker listener
wn.listener = wn.requestsTracker.Listener(listener)
wn.log.Debugf("listening on %s", wn.listener.Addr().String())
+ wn.throttledOutgoingConnections = int32(wn.config.GossipFanout / 2)
+ } else {
+ // on non-relay, all the outgoing connections are throttled.
+ wn.throttledOutgoingConnections = int32(wn.config.GossipFanout)
+ }
+ if wn.config.DisableOutgoingConnectionThrottling {
+ wn.throttledOutgoingConnections = 0
}
if wn.config.TLSCertFile != "" && wn.config.TLSKeyFile != "" {
wn.scheme = "https"
@@ -650,8 +705,12 @@ func (wn *WebsocketNetwork) Start() {
wn.scheme = "http"
}
wn.meshUpdateRequests <- meshRequest{false, nil}
- wn.RegisterHandlers(pingHandlers)
- wn.RegisterHandlers(prioHandlers)
+ if wn.config.EnablePingHandler {
+ wn.RegisterHandlers(pingHandlers)
+ }
+ if wn.prioScheme != nil {
+ wn.RegisterHandlers(prioHandlers)
+ }
if wn.listener != nil {
wn.wg.Add(1)
go wn.httpdThread()
@@ -736,40 +795,39 @@ func (wn *WebsocketNetwork) setHeaders(header http.Header) {
localInstanceName := wn.log.GetInstanceName()
header.Set(TelemetryIDHeader, localTelemetryGUID)
header.Set(InstanceNameHeader, localInstanceName)
- header.Set(ProtocolVersionHeader, ProtocolVersion)
header.Set(AddressHeader, wn.PublicAddress())
header.Set(NodeRandomHeader, wn.RandomID)
}
// checkServerResponseVariables check that the version and random-id in the request headers matches the server ones.
// it returns true if it's a match, and false otherwise.
-func (wn *WebsocketNetwork) checkServerResponseVariables(header http.Header, addr string) bool {
- otherVersion := header.Get(ProtocolVersionHeader)
- if otherVersion != ProtocolVersion {
- wn.log.Infof("new peer %s version mismatch, mine=%s theirs=%s, headers %#v", addr, ProtocolVersion, otherVersion, header)
- return false
+func (wn *WebsocketNetwork) checkServerResponseVariables(otherHeader http.Header, addr string) (bool, string) {
+ matchingVersion, otherVersion := wn.checkProtocolVersionMatch(otherHeader)
+ if matchingVersion == "" {
+ wn.log.Infof("new peer %s version mismatch, mine=%v theirs=%s, headers %#v", addr, SupportedProtocolVersions, otherVersion, otherHeader)
+ return false, ""
}
- otherRandom := header.Get(NodeRandomHeader)
+ otherRandom := otherHeader.Get(NodeRandomHeader)
if otherRandom == wn.RandomID || otherRandom == "" {
// This is pretty harmless and some configurations of phonebooks or DNS records make this likely. Quietly filter it out.
if otherRandom == "" {
// missing header.
- wn.log.Warnf("new peer %s did not include random ID header in request. mine=%s headers %#v", addr, wn.RandomID, header)
+ wn.log.Warnf("new peer %s did not include random ID header in request. mine=%s headers %#v", addr, wn.RandomID, otherHeader)
} else {
wn.log.Debugf("new peer %s has same node random id, am I talking to myself? %s", addr, wn.RandomID)
}
- return false
+ return false, ""
}
- otherGenesisID := header.Get(GenesisHeader)
+ otherGenesisID := otherHeader.Get(GenesisHeader)
if wn.GenesisID != otherGenesisID {
if otherGenesisID != "" {
- wn.log.Warnf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", addr, wn.GenesisID, otherGenesisID, header)
+ wn.log.Warnf("new peer %#v genesis mismatch, mine=%#v theirs=%#v, headers %#v", addr, wn.GenesisID, otherGenesisID, otherHeader)
} else {
- wn.log.Warnf("new peer %#v did not include genesis header in response. mine=%#v headers %#v", addr, wn.GenesisID, header)
+ wn.log.Warnf("new peer %#v did not include genesis header in response. mine=%#v headers %#v", addr, wn.GenesisID, otherHeader)
}
- return false
+ return false, ""
}
- return true
+ return true, matchingVersion
}
// getCommonHeaders retreives the common headers for both incoming and outgoing connections from the provided headers.
@@ -814,6 +872,28 @@ func (wn *WebsocketNetwork) checkIncomingConnectionLimits(response http.Response
return http.StatusOK
}
+// checkProtocolVersionMatch test ProtocolAcceptVersionHeader and ProtocolVersionHeader headers from the request/response and see if it can find a match.
+func (wn *WebsocketNetwork) checkProtocolVersionMatch(otherHeaders http.Header) (matchingVersion string, otherVersion string) {
+ otherAcceptedVersions := otherHeaders[textproto.CanonicalMIMEHeaderKey(ProtocolAcceptVersionHeader)]
+ for _, otherAcceptedVersion := range otherAcceptedVersions {
+ // do we have a matching version ?
+ for _, supportedProtocolVersion := range SupportedProtocolVersions {
+ if supportedProtocolVersion == otherAcceptedVersion {
+ matchingVersion = supportedProtocolVersion
+ return matchingVersion, ""
+ }
+ }
+ }
+
+ otherVersion = otherHeaders.Get(ProtocolVersionHeader)
+ for _, supportedProtocolVersion := range SupportedProtocolVersions {
+ if supportedProtocolVersion == otherVersion {
+ return supportedProtocolVersion, otherVersion
+ }
+ }
+ return "", otherVersion
+}
+
// checkIncomingConnectionVariables checks the variables that were provided on the request, and compares them to the
// local server supported parameters. If all good, it returns http.StatusOK; otherwise, it write the error to the ResponseWriter
// and returns the http status.
@@ -835,19 +915,6 @@ func (wn *WebsocketNetwork) checkIncomingConnectionVariables(response http.Respo
return http.StatusPreconditionFailed
}
- otherVersion := request.Header.Get(ProtocolVersionHeader)
- if otherVersion != ProtocolVersion {
- wn.log.Infof("new peer %s version mismatch, mine=%s theirs=%s, headers %#v", request.RemoteAddr, ProtocolVersion, otherVersion, request.Header)
- networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "mismatching protocol version"})
- response.WriteHeader(http.StatusPreconditionFailed)
- message := fmt.Sprintf("Requested version %s = %s mismatches server version", ProtocolVersionHeader, otherVersion)
- n, err := response.Write([]byte(message))
- if err != nil {
- wn.log.Warnf("ws failed to write response '%s' : n = %d err = %v", message, n, err)
- }
- return http.StatusPreconditionFailed
- }
-
otherRandom := request.Header.Get(NodeRandomHeader)
if otherRandom == "" {
// This is pretty harmless and some configurations of phonebooks or DNS records make this likely. Quietly filter it out.
@@ -887,6 +954,19 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
return
}
+ matchingVersion, otherVersion := wn.checkProtocolVersionMatch(request.Header)
+ if matchingVersion == "" {
+ wn.log.Infof("new peer %s version mismatch, mine=%v theirs=%s, headers %#v", request.RemoteAddr, SupportedProtocolVersions, otherVersion, request.Header)
+ networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "mismatching protocol version"})
+ response.WriteHeader(http.StatusPreconditionFailed)
+ message := fmt.Sprintf("Requested version %s not in %v mismatches server version", otherVersion, SupportedProtocolVersions)
+ n, err := response.Write([]byte(message))
+ if err != nil {
+ wn.log.Warnf("ws failed to write response '%s' : n = %d err = %v", message, n, err)
+ }
+ return
+ }
+
if wn.checkIncomingConnectionVariables(response, request) != http.StatusOK {
// we've already logged and written all response(s).
return
@@ -895,15 +975,16 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
// if UseXForwardedForAddressField is not empty, attempt to override the otherPublicAddr with the X Forwarded For origin
trackedRequest.otherPublicAddr = trackedRequest.remoteAddr
- requestHeader := make(http.Header)
- wn.setHeaders(requestHeader)
- requestHeader.Set(GenesisHeader, wn.GenesisID)
+ responseHeader := make(http.Header)
+ wn.setHeaders(responseHeader)
+ responseHeader.Set(ProtocolVersionHeader, matchingVersion)
+ responseHeader.Set(GenesisHeader, wn.GenesisID)
var challenge string
if wn.prioScheme != nil {
challenge = wn.prioScheme.NewPrioChallenge()
- requestHeader.Set(PriorityChallengeHeader, challenge)
+ responseHeader.Set(PriorityChallengeHeader, challenge)
}
- conn, err := wn.upgrader.Upgrade(response, request, requestHeader)
+ conn, err := wn.upgrader.Upgrade(response, request, responseHeader)
if err != nil {
wn.log.Info("ws upgrade fail ", err)
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "ws upgrade fail"})
@@ -916,17 +997,14 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
}
peer := &wsPeer{
- wsPeerCore: wsPeerCore{
- net: wn,
- rootURL: trackedRequest.otherPublicAddr,
- originAddress: trackedRequest.remoteHost,
- },
+ wsPeerCore: makePeerCore(wn, trackedRequest.otherPublicAddr, wn.GetRoundTripper(), trackedRequest.remoteHost),
conn: conn,
outgoing: false,
InstanceName: trackedRequest.otherInstanceName,
incomingMsgFilter: wn.incomingMsgFilter,
prioChallenge: challenge,
createTime: trackedRequest.created,
+ version: matchingVersion,
}
peer.TelemetryGUID = trackedRequest.otherTelemetryGUID
peer.init(wn.config, wn.outgoingMessagesBufferSize)
@@ -967,6 +1045,7 @@ func (wn *WebsocketNetwork) messageHandlerThread() {
}
//wn.log.Debugf("msg handling %#v [%d]byte", msg.Tag, len(msg.Data))
start := time.Now()
+
// now, send to global handlers
outmsg := wn.handlers.Handle(msg)
handled := time.Now()
@@ -980,6 +1059,8 @@ func (wn *WebsocketNetwork) messageHandlerThread() {
go wn.disconnectThread(msg.Sender, disconnectBadData)
case Broadcast:
wn.Broadcast(wn.ctx, msg.Tag, msg.Data, false, msg.Sender)
+ case Respond:
+ msg.Sender.(*wsPeer).Respond(wn.ctx, msg, outmsg.Topics)
default:
}
case <-inactivityCheckTicker.C:
@@ -1027,7 +1108,7 @@ func (wn *WebsocketNetwork) checkSlowWritingPeers() {
func (wn *WebsocketNetwork) sendFilterMessage(msg IncomingMessage) {
digest := generateMessageDigest(msg.Tag, msg.Data)
//wn.log.Debugf("send filter %s(%d) %v", msg.Tag, len(msg.Data), digest)
- wn.Broadcast(context.Background(), protocol.MsgSkipTag, digest[:], false, msg.Sender)
+ wn.Broadcast(context.Background(), protocol.MsgDigestSkipTag, digest[:], false, msg.Sender)
}
func (wn *WebsocketNetwork) broadcastThread() {
@@ -1093,7 +1174,7 @@ func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool,
copy(mbytes[len(tbytes):], request.data)
var digest crypto.Digest
- if request.tag != protocol.MsgSkipTag && len(request.data) >= messageFilterSize {
+ if request.tag != protocol.MsgDigestSkipTag && len(request.data) >= messageFilterSize {
digest = crypto.Hash(mbytes)
}
@@ -1131,6 +1212,19 @@ func (wn *WebsocketNetwork) NumPeers() int {
return len(wn.peers)
}
+// outgoingPeers returns an array of the outgoing peers.
+func (wn *WebsocketNetwork) outgoingPeers() (peers []Peer) {
+ wn.peersLock.RLock()
+ defer wn.peersLock.RUnlock()
+ peers = make([]Peer, 0, len(wn.peers))
+ for _, peer := range wn.peers {
+ if peer.outgoing {
+ peers = append(peers, peer)
+ }
+ }
+ return
+}
+
func (wn *WebsocketNetwork) numOutgoingPeers() int {
wn.peersLock.RLock()
defer wn.peersLock.RUnlock()
@@ -1180,6 +1274,7 @@ func (wn *WebsocketNetwork) connectedForIP(host string) (totalConnections int) {
}
const meshThreadInterval = time.Minute
+const cliqueResolveInterval = 5 * time.Minute
type meshRequest struct {
disconnect bool
@@ -1219,40 +1314,25 @@ func (wn *WebsocketNetwork) meshThread() {
dnsAddrs := wn.getDNSAddrs(dnsBootstrap)
if len(dnsAddrs) > 0 {
wn.log.Debugf("got %d dns addrs, %#v", len(dnsAddrs), dnsAddrs[:imin(5, len(dnsAddrs))])
- dnsPhonebook := wn.phonebook.GetPhonebook(dnsBootstrap)
- if dnsPhonebook == nil {
- // create one, if we don't have one already.
- dnsPhonebook = MakeThreadsafePhonebook()
- wn.phonebook.AddOrUpdatePhonebook(dnsBootstrap, dnsPhonebook)
- }
- if tsPhonebook, ok := dnsPhonebook.(*ThreadsafePhonebook); ok {
- tsPhonebook.ReplacePeerList(dnsAddrs)
- }
+ wn.phonebook.ReplacePeerList(dnsAddrs, dnsBootstrap)
} else {
wn.log.Infof("got no DNS addrs for network %s", wn.NetworkID)
}
}
- desired := wn.config.GossipFanout
- numOutgoing := wn.numOutgoingPeers() + wn.numOutgoingPending()
- need := desired - numOutgoing
- if need > 0 {
- // get more than we need so that we can ignore duplicates
- newAddrs := wn.phonebook.GetAddresses(desired + numOutgoing)
- for _, na := range newAddrs {
- if na == wn.config.PublicAddress {
- continue
- }
- gossipAddr, ok := wn.tryConnectReserveAddr(na)
- if ok {
- wn.wg.Add(1)
- go wn.tryConnect(na, gossipAddr)
- need--
- if need == 0 {
- break
- }
- }
+
+ // as long as the call to checkExistingConnectionsNeedDisconnecting is deleting existing connections, we want to
+ // kick off the creation of new connections.
+ for {
+ if wn.checkNewConnectionsNeeded() {
+ // new connections were created.
+ break
+ }
+ if !wn.checkExistingConnectionsNeedDisconnecting() {
+ // no connection were removed.
+ break
}
}
+
if request.done != nil {
close(request.done)
}
@@ -1264,6 +1344,123 @@ func (wn *WebsocketNetwork) meshThread() {
}
}
+// checkNewConnectionsNeeded checks to see if we need to have more connections to meet the GossipFanout target.
+// if we do, it will spin async connection go routines.
+// it returns false if no connections are needed, and true otherwise.
+// note that the determination of needed connection could be inaccurate, and it might return false while
+// more connection should be created.
+func (wn *WebsocketNetwork) checkNewConnectionsNeeded() bool {
+ desired := wn.config.GossipFanout
+ numOutgoingTotal := wn.numOutgoingPeers() + wn.numOutgoingPending()
+ need := desired - numOutgoingTotal
+ if need <= 0 {
+ return false
+ }
+ // get more than we need so that we can ignore duplicates
+ newAddrs := wn.phonebook.GetAddresses(desired + numOutgoingTotal)
+ for _, na := range newAddrs {
+ if na == wn.config.PublicAddress {
+ // filter out self-public address, so we won't try to connect to outselves.
+ continue
+ }
+ gossipAddr, ok := wn.tryConnectReserveAddr(na)
+ if ok {
+ wn.wg.Add(1)
+ go wn.tryConnect(na, gossipAddr)
+ need--
+ if need == 0 {
+ break
+ }
+ }
+ }
+ return true
+}
+
+// checkExistingConnectionsNeedDisconnecting check to see if existing connection need to be dropped due to
+// performance issues and/or network being stalled.
+func (wn *WebsocketNetwork) checkExistingConnectionsNeedDisconnecting() bool {
+ // we already connected ( or connecting.. ) to GossipFanout peers.
+ // get the actual peers.
+ outgoingPeers := wn.outgoingPeers()
+ if len(outgoingPeers) < wn.config.GossipFanout {
+ // reset the performance monitor.
+ wn.connPerfMonitor.Reset([]Peer{})
+ return wn.checkNetworkAdvanceDisconnect()
+ }
+
+ if !wn.connPerfMonitor.ComparePeers(outgoingPeers) {
+ // different set of peers. restart monitoring.
+ wn.connPerfMonitor.Reset(outgoingPeers)
+ }
+
+ // same set of peers.
+ peerStat := wn.connPerfMonitor.GetPeersStatistics()
+ if peerStat == nil {
+ // performance metrics are not yet ready.
+ return wn.checkNetworkAdvanceDisconnect()
+ }
+
+ // update peers with the performance metrics we've gathered.
+ var leastPerformingPeer *wsPeer = nil
+ for _, stat := range peerStat.peerStatistics {
+ wsPeer := stat.peer.(*wsPeer)
+ wsPeer.peerMessageDelay = stat.peerDelay
+ wn.log.Infof("network performance monitor - peer '%s' delay %d first message portion %d%%", wsPeer.GetAddress(), stat.peerDelay, int(stat.peerFirstMessage*100))
+ if wsPeer.throttledOutgoingConnection && leastPerformingPeer == nil {
+ leastPerformingPeer = wsPeer
+ }
+ }
+ if leastPerformingPeer == nil {
+ return wn.checkNetworkAdvanceDisconnect()
+ }
+ wn.disconnect(leastPerformingPeer, disconnectLeastPerformingPeer)
+ wn.connPerfMonitor.Reset([]Peer{})
+
+ return true
+}
+
+// checkNetworkAdvanceDisconnect is using the lastNetworkAdvance indicator to see if the network is currently "stuck".
+// if it's seems to be "stuck", a randomally picked peer would be disconnected.
+func (wn *WebsocketNetwork) checkNetworkAdvanceDisconnect() bool {
+ lastNetworkAdvance := wn.getLastNetworkAdvance()
+ if time.Now().UTC().Sub(lastNetworkAdvance) < cliqueResolveInterval {
+ return false
+ }
+ outgoingPeers := wn.outgoingPeers()
+ if len(outgoingPeers) == 0 {
+ return false
+ }
+ if wn.numOutgoingPending() > 0 {
+ // we're currently trying to extend the list of outgoing connections. no need to
+ // disconnect any existing connection to free up room for another connection.
+ return false
+ }
+ var peer *wsPeer
+ disconnectPeerIdx := crypto.RandUint63() % uint64(len(outgoingPeers))
+ peer = outgoingPeers[disconnectPeerIdx].(*wsPeer)
+
+ wn.disconnect(peer, disconnectCliqueResolve)
+ wn.connPerfMonitor.Reset([]Peer{})
+ wn.OnNetworkAdvance()
+ return true
+}
+
+func (wn *WebsocketNetwork) getLastNetworkAdvance() time.Time {
+ wn.lastNetworkAdvanceMu.Lock()
+ defer wn.lastNetworkAdvanceMu.Unlock()
+ return wn.lastNetworkAdvance
+}
+
+// OnNetworkAdvance notifies the network library that the agreement protocol was able to make a notable progress.
+// this is the only indication that we have that we haven't formed a clique, where all incoming messages
+// arrive very quickly, but might be missing some votes. The usage of this call is expected to have similar
+// characteristics as with a watchdog timer.
+func (wn *WebsocketNetwork) OnNetworkAdvance() {
+ wn.lastNetworkAdvanceMu.Lock()
+ defer wn.lastNetworkAdvanceMu.Unlock()
+ wn.lastNetworkAdvance = time.Now().UTC()
+}
+
// sendPeerConnectionsTelemetryStatus sends a snapshot of the currently connected peers
// to the telemetry server. Internally, it's using a timer to ensure that it would only
// send the information once every hour ( configurable via PeerConnectionsUpdateInterval )
@@ -1286,6 +1483,7 @@ func (wn *WebsocketNetwork) sendPeerConnectionsTelemetryStatus() {
if peer.outgoing {
connDetail.Address = justHost(peer.conn.RemoteAddr().String())
connDetail.Endpoint = peer.GetAddress()
+ connDetail.MessageDelay = peer.peerMessageDelay
connectionDetails.OutgoingPeers = append(connectionDetails.OutgoingPeers, connDetail)
} else {
connDetail.Address = peer.OriginAddress()
@@ -1406,7 +1604,7 @@ func (wn *WebsocketNetwork) peersToPing() []*wsPeer {
}
func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) []string {
- srvPhonebook, err := tools_network.ReadFromSRV("algobootstrap", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress)
+ srvPhonebook, err := tools_network.ReadFromSRV("algobootstrap", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced())
if err != nil {
// only log this warning on testnet or devnet
if wn.NetworkID == config.Devnet || wn.NetworkID == config.Testnet {
@@ -1417,9 +1615,15 @@ func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) []string {
return srvPhonebook
}
-// ProtocolVersionHeader HTTP header for protocol version. TODO: this may be unneeded redundance since we also have url versioning "/v1/..."
+// ProtocolVersionHeader HTTP header for protocol version.
const ProtocolVersionHeader = "X-Algorand-Version"
+// ProtocolAcceptVersionHeader HTTP header for accept protocol version. Client use this to advertise supported protocol versions.
+const ProtocolAcceptVersionHeader = "X-Algorand-Accept-Version"
+
+// SupportedProtocolVersions contains the list of supported protocol versions by this node ( in order of preference ).
+var SupportedProtocolVersions = []string{"2.1", "1"}
+
// ProtocolVersion is the current version attached to the ProtocolVersionHeader header
const ProtocolVersion = "1"
@@ -1526,10 +1730,10 @@ func (wn *WebsocketNetwork) numOutgoingPending() int {
return len(wn.tryConnectAddrs)
}
-var websocketDialer = websocket.Dialer{
- Proxy: http.ProxyFromEnvironment,
- HandshakeTimeout: 45 * time.Second,
- EnableCompression: false,
+// GetRoundTripper returns an http.Transport that limits the number of connection
+// to comply with connectionsRateLimitingCount.
+func (wn *WebsocketNetwork) GetRoundTripper() http.RoundTripper {
+ return &wn.transport
}
// tryConnect opens websocket connection and checks initial connection parameters.
@@ -1544,9 +1748,22 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
defer wn.wg.Done()
requestHeader := make(http.Header)
wn.setHeaders(requestHeader)
+ for _, supportedProtocolVersion := range SupportedProtocolVersions {
+ requestHeader.Add(ProtocolAcceptVersionHeader, supportedProtocolVersion)
+ }
+ // for backward compatability, include the ProtocolVersion header as well.
+ requestHeader.Set(ProtocolVersionHeader, ProtocolVersion)
SetUserAgentHeader(requestHeader)
myInstanceName := wn.log.GetInstanceName()
requestHeader.Set(InstanceNameHeader, myInstanceName)
+ var websocketDialer = websocket.Dialer{
+ Proxy: http.ProxyFromEnvironment,
+ HandshakeTimeout: 45 * time.Second,
+ EnableCompression: false,
+ NetDialContext: wn.dialer.DialContext,
+ NetDial: wn.dialer.Dial,
+ }
+
conn, response, err := websocketDialer.DialContext(wn.ctx, gossipAddr, requestHeader)
if err != nil {
if err == websocket.ErrBadHandshake {
@@ -1583,15 +1800,30 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
}
// no need to test the response.StatusCode since we know it's going to be http.StatusSwitchingProtocols, as it's already being tested inside websocketDialer.DialContext.
- // checking the headers here is abit redundent; the server has already verified that the headers match. But we will need this in the future -
- // once our server would support multiple protocols, we would need to verify here that we use the correct protocol, out of the "proposed" protocols we have provided in the
- // request headers.
- if !wn.checkServerResponseVariables(response.Header, gossipAddr) {
+ // we need to examine the headers here to extract which protocol version we should be using.
+ responseHeaderOk, matchingVersion := wn.checkServerResponseVariables(response.Header, gossipAddr)
+ if !responseHeaderOk {
// The error was already logged, so no need to log again.
return
}
- peer := &wsPeer{wsPeerCore: wsPeerCore{net: wn, rootURL: addr}, conn: conn, outgoing: true, incomingMsgFilter: wn.incomingMsgFilter, createTime: time.Now()}
+ throttledConnection := false
+ if atomic.AddInt32(&wn.throttledOutgoingConnections, int32(-1)) >= 0 {
+ throttledConnection = true
+ } else {
+ atomic.AddInt32(&wn.throttledOutgoingConnections, int32(1))
+ }
+
+ peer := &wsPeer{
+ wsPeerCore: makePeerCore(wn, addr, wn.GetRoundTripper(), "" /* origin */),
+ conn: conn,
+ outgoing: true,
+ incomingMsgFilter: wn.incomingMsgFilter,
+ createTime: time.Now(),
+ connMonitor: wn.connPerfMonitor,
+ throttledOutgoingConnection: throttledConnection,
+ version: matchingVersion,
+ }
peer.TelemetryGUID, peer.InstanceName, _ = getCommonHeaders(response.Header)
peer.init(wn.config, wn.outgoingMessagesBufferSize)
wn.addPeer(peer)
@@ -1603,6 +1835,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
HostName: peer.TelemetryGUID,
Incoming: false,
InstanceName: peer.InstanceName,
+ Endpoint: peer.GetAddress(),
})
peers.Set(float64(wn.NumPeers()), nil)
@@ -1624,18 +1857,25 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
}
// NewWebsocketNetwork constructor for websockets based gossip network
-func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebook Phonebook, genesisID string, networkID protocol.NetworkID) (wn *WebsocketNetwork, err error) {
- outerPhonebook := MakeMultiPhonebook()
- outerPhonebook.AddOrUpdatePhonebook("default", phonebook)
- wn = &WebsocketNetwork{log: log, config: config, phonebook: outerPhonebook, GenesisID: genesisID, NetworkID: networkID}
+func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (wn *WebsocketNetwork, err error) {
+ phonebook := MakePhonebook(config.ConnectionsRateLimitingCount,
+ time.Duration(config.ConnectionsRateLimitingWindowSeconds)*time.Second)
+ phonebook.ReplacePeerList(phonebookAddresses, config.DNSBootstrapID)
+ wn = &WebsocketNetwork{
+ log: log,
+ config: config,
+ phonebook: phonebook,
+ GenesisID: genesisID,
+ NetworkID: networkID,
+ }
wn.setup()
return wn, nil
}
// NewWebsocketGossipNode constructs a websocket network node and returns it as a GossipNode interface implementation
-func NewWebsocketGossipNode(log logging.Logger, config config.Local, phonebook Phonebook, genesisID string, networkID protocol.NetworkID) (gn GossipNode, err error) {
- return NewWebsocketNetwork(log, config, phonebook, genesisID, networkID)
+func NewWebsocketGossipNode(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (gn GossipNode, err error) {
+ return NewWebsocketNetwork(log, config, phonebookAddresses, genesisID, networkID)
}
// SetPrioScheme specifies the network priority scheme for a network node
@@ -1652,7 +1892,11 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
// first logging, then take the lock and do the actual accounting.
// definitely don't change this to do the logging while holding the lock.
localAddr, _ := wn.Address()
- wn.log.With("event", "Disconnected").With("remote", peer.rootURL).With("local", localAddr).Infof("Peer %s disconnected: %s", peer.rootURL, reason)
+ logEntry := wn.log.With("event", "Disconnected").With("remote", peer.rootURL).With("local", localAddr)
+ if peer.outgoing && peer.peerMessageDelay > 0 {
+ logEntry = logEntry.With("messageDelay", peer.peerMessageDelay)
+ }
+ logEntry.Infof("Peer %s disconnected: %s", peer.rootURL, reason)
peerAddr := peer.OriginAddress()
// we might be able to get addr out of conn, or it might be closed
if peerAddr == "" && peer.conn != nil {
@@ -1671,15 +1915,20 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
peerAddr = justHost(peer.rootURL)
}
}
+ eventDetails := telemetryspec.PeerEventDetails{
+ Address: peerAddr,
+ HostName: peer.TelemetryGUID,
+ Incoming: !peer.outgoing,
+ InstanceName: peer.InstanceName,
+ }
+ if peer.outgoing {
+ eventDetails.Endpoint = peer.GetAddress()
+ eventDetails.MessageDelay = peer.peerMessageDelay
+ }
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.DisconnectPeerEvent,
telemetryspec.DisconnectPeerEventDetails{
- PeerEventDetails: telemetryspec.PeerEventDetails{
- Address: peerAddr,
- HostName: peer.TelemetryGUID,
- Incoming: !peer.outgoing,
- InstanceName: peer.InstanceName,
- },
- Reason: string(reason),
+ PeerEventDetails: eventDetails,
+ Reason: string(reason),
})
peers.Set(float64(wn.NumPeers()), nil)
@@ -1691,6 +1940,9 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
if peer.peerIndex < len(wn.peers) && wn.peers[peer.peerIndex] == peer {
heap.Remove(peersHeap{wn}, peer.peerIndex)
wn.prioTracker.removePeer(peer)
+ if peer.throttledOutgoingConnection {
+ atomic.AddInt32(&wn.throttledOutgoingConnections, int32(1))
+ }
}
wn.countPeersSetGauges()
}
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index b4d6780f57..40059ee1dd 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -87,6 +87,16 @@ func (e *oneEntryPhonebook) UpdateRetryAfter(addr string, retryAfter time.Time)
}
}
+func (e *oneEntryPhonebook) GetConnectionWaitTime(addr string) (addrInPhonebook bool,
+ waitTime time.Duration, provisionalTime time.Time) {
+ var t time.Time
+ return false, 0, t
+}
+
+func (e *oneEntryPhonebook) UpdateConnectionTime(addr string, t time.Time) bool {
+ return false
+}
+
var defaultConfig config.Local
func init() {
@@ -106,7 +116,7 @@ func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local) *Websocket
wn := &WebsocketNetwork{
log: log,
config: conf,
- phonebook: MakeMultiPhonebook(),
+ phonebook: MakePhonebook(1, 1*time.Millisecond),
GenesisID: "go-test-network-genesis",
NetworkID: config.Devtestnet,
}
@@ -188,8 +198,6 @@ func newMessageCounter(t testing.TB, target int) *messageCounterHandler {
return &messageCounterHandler{target: target, done: make(chan struct{}), t: t}
}
-const debugTag = protocol.Tag("DD")
-
func TestWebsocketNetworkStartStop(t *testing.T) {
netA := makeTestWebsocketNode(t)
netA.Start()
@@ -218,12 +226,12 @@ func TestWebsocketNetworkBasic(t *testing.T) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 2)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -231,8 +239,8 @@ func TestWebsocketNetworkBasic(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
t.Log("b ready")
- netA.Broadcast(context.Background(), debugTag, []byte("foo"), false, nil)
- netA.Broadcast(context.Background(), debugTag, []byte("bar"), false, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil)
select {
case <-counterDone:
@@ -252,12 +260,12 @@ func TestWebsocketNetworkUnicast(t *testing.T) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 2)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -268,9 +276,9 @@ func TestWebsocketNetworkUnicast(t *testing.T) {
require.Equal(t, 1, len(netA.peers))
require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn)))
peerB := netA.peers[0]
- err := peerB.Unicast(context.Background(), []byte("foo"), debugTag)
+ err := peerB.Unicast(context.Background(), []byte("foo"), protocol.TxnTag)
assert.NoError(t, err)
- err = peerB.Unicast(context.Background(), []byte("bar"), debugTag)
+ err = peerB.Unicast(context.Background(), []byte("bar"), protocol.TxnTag)
assert.NoError(t, err)
select {
@@ -294,12 +302,12 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 2)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -307,8 +315,8 @@ func TestWebsocketNetworkNoAddress(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
t.Log("b ready")
- netA.Broadcast(context.Background(), debugTag, []byte("foo"), false, nil)
- netA.Broadcast(context.Background(), debugTag, []byte("bar"), false, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil)
select {
case <-counterDone:
@@ -330,8 +338,8 @@ func lineNetwork(t *testing.T, numNodes int) (nodes []*WebsocketNetwork, counter
if i > 0 {
addrPrev, postListen := nodes[i-1].Address()
require.True(t, postListen)
- nodes[i].phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrPrev})
- nodes[i].RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: &counters[i]}})
+ nodes[i].phonebook.ReplacePeerList([]string{addrPrev}, "default")
+ nodes[i].RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: &counters[i]}})
}
nodes[i].Start()
counters[i].t = t
@@ -386,7 +394,7 @@ func TestLineNetwork(t *testing.T) {
sendTime := time.Now().UnixNano()
var timeblob [8]byte
binary.LittleEndian.PutUint64(timeblob[:], uint64(sendTime))
- nodes[0].Broadcast(context.Background(), debugTag, timeblob[:], true, nil)
+ nodes[0].Broadcast(context.Background(), protocol.TxnTag, timeblob[:], true, nil)
}
select {
case <-counterDone:
@@ -677,7 +685,7 @@ func makeTestFilterWebsocketNode(t *testing.T, nodename string) *WebsocketNetwor
wn := &WebsocketNetwork{
log: logging.TestingLog(t).With("node", nodename),
config: dc,
- phonebook: MakeMultiPhonebook(),
+ phonebook: MakePhonebook(1, 1*time.Millisecond),
GenesisID: "go-test-network-genesis",
NetworkID: config.Devtestnet,
}
@@ -698,12 +706,12 @@ func TestDupFilter(t *testing.T) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := &messageCounterHandler{t: t, limit: 1, done: make(chan struct{})}
netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.AgreementVoteTag, MessageHandler: counter}})
- debugTag2 := protocol.Tag("D2")
+ debugTag2 := protocol.ProposalPayloadTag
counter2 := &messageCounterHandler{t: t, limit: 1, done: make(chan struct{})}
netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag2, MessageHandler: counter2}})
@@ -711,7 +719,7 @@ func TestDupFilter(t *testing.T) {
require.True(t, postListen)
netC := makeTestFilterWebsocketNode(t, "c")
netC.config.GossipFanout = 1
- netC.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrB})
+ netC.phonebook.ReplacePeerList([]string{addrB}, "default")
netC.Start()
defer netC.Stop()
@@ -777,9 +785,8 @@ func TestGetPeers(t *testing.T) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- phba := &oneEntryPhonebook{addr: addrA}
- phbMulti := MakeMultiPhonebook()
- phbMulti.AddOrUpdatePhonebook("phba", phba)
+ phbMulti := MakePhonebook(1, 1*time.Millisecond)
+ phbMulti.ReplacePeerList([]string{addrA}, "phba")
netB.phonebook = phbMulti
netB.Start()
defer netB.Stop()
@@ -790,8 +797,7 @@ func TestGetPeers(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
t.Log("b ready")
- ph := ArrayPhonebook{Entries: phonebookEntries{"a": phonebookData{}, "b": phonebookData{}, "c": phonebookData{}}}
- phbMulti.AddOrUpdatePhonebook("ph", &ph)
+ phbMulti.ReplacePeerList([]string{"a", "b", "c"}, "ph")
//addrB, _ := netB.Address()
@@ -844,12 +850,12 @@ func BenchmarkWebsocketNetworkBasic(t *testing.B) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
returns := make(chan uint64, 100)
bhandler := benchmarkHandler{returns}
- netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: &bhandler}})
+ netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: &bhandler}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -872,7 +878,7 @@ func BenchmarkWebsocketNetworkBasic(t *testing.B) {
}
msg := make([]byte, msgSize)
binary.LittleEndian.PutUint64(msg, uint64(i))
- err := netA.Broadcast(context.Background(), debugTag, msg, true, nil)
+ err := netA.Broadcast(context.Background(), protocol.TxnTag, msg, true, nil)
if err != nil {
t.Errorf("error on broadcast: %v", err)
return
@@ -924,7 +930,7 @@ func TestWebsocketNetworkPrio(t *testing.T) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
@@ -968,8 +974,8 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) {
netB := makeTestWebsocketNode(t)
netB.SetPrioScheme(&prioB)
netB.config.GossipFanout = 1
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
- netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counterB}})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
+ netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: counterB}})
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
@@ -981,8 +987,8 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) {
netC := makeTestWebsocketNode(t)
netC.SetPrioScheme(&prioC)
netC.config.GossipFanout = 1
- netC.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
- netC.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counterC}})
+ netC.phonebook.ReplacePeerList([]string{addrA}, "default")
+ netC.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: counterC}})
netC.Start()
defer func() { t.Log("stopping C"); netC.Stop(); t.Log("C done") }()
@@ -999,7 +1005,7 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) {
}
waitReady(t, netA, time.After(time.Second))
- netA.Broadcast(context.Background(), debugTag, nil, true, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, nil, true, nil)
select {
case <-counterBdone:
@@ -1049,7 +1055,7 @@ func TestWebsocketNetworkManyIdle(t *testing.T) {
for i := 0; i < numClients; i++ {
client := makeTestWebsocketNodeWithConfig(t, clientConf)
client.config.GossipFanout = 1
- client.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: relayAddr})
+ client.phonebook.ReplacePeerList([]string{relayAddr}, "default")
client.Start()
defer client.Stop()
@@ -1102,29 +1108,35 @@ func TestWebsocketNetwork_checkServerResponseVariables(t *testing.T) {
header.Set(ProtocolVersionHeader, ProtocolVersion)
header.Set(NodeRandomHeader, wn.RandomID+"tag")
header.Set(GenesisHeader, wn.GenesisID)
- require.Equal(t, true, wn.checkServerResponseVariables(header, "addressX"))
+ responseVariableOk, matchingVersion := wn.checkServerResponseVariables(header, "addressX")
+ require.Equal(t, true, responseVariableOk)
+ require.Equal(t, matchingVersion, ProtocolVersion)
noVersionHeader := http.Header{}
noVersionHeader.Set(NodeRandomHeader, wn.RandomID+"tag")
noVersionHeader.Set(GenesisHeader, wn.GenesisID)
- require.Equal(t, false, wn.checkServerResponseVariables(noVersionHeader, "addressX"))
+ responseVariableOk, matchingVersion = wn.checkServerResponseVariables(noVersionHeader, "addressX")
+ require.Equal(t, false, responseVariableOk)
noRandomHeader := http.Header{}
noRandomHeader.Set(ProtocolVersionHeader, ProtocolVersion)
noRandomHeader.Set(GenesisHeader, wn.GenesisID)
- require.Equal(t, false, wn.checkServerResponseVariables(noRandomHeader, "addressX"))
+ responseVariableOk, _ = wn.checkServerResponseVariables(noRandomHeader, "addressX")
+ require.Equal(t, false, responseVariableOk)
sameRandomHeader := http.Header{}
sameRandomHeader.Set(ProtocolVersionHeader, ProtocolVersion)
sameRandomHeader.Set(NodeRandomHeader, wn.RandomID)
sameRandomHeader.Set(GenesisHeader, wn.GenesisID)
- require.Equal(t, false, wn.checkServerResponseVariables(sameRandomHeader, "addressX"))
+ responseVariableOk, _ = wn.checkServerResponseVariables(sameRandomHeader, "addressX")
+ require.Equal(t, false, responseVariableOk)
differentGenesisIDHeader := http.Header{}
differentGenesisIDHeader.Set(ProtocolVersionHeader, ProtocolVersion)
differentGenesisIDHeader.Set(NodeRandomHeader, wn.RandomID+"tag")
differentGenesisIDHeader.Set(GenesisHeader, wn.GenesisID+"tag")
- require.Equal(t, false, wn.checkServerResponseVariables(differentGenesisIDHeader, "addressX"))
+ responseVariableOk, _ = wn.checkServerResponseVariables(differentGenesisIDHeader, "addressX")
+ require.Equal(t, false, responseVariableOk)
}
func (wn *WebsocketNetwork) broadcastWithTimestamp(tag protocol.Tag, data []byte, when time.Time) error {
@@ -1156,12 +1168,12 @@ func TestDelayedMessageDrop(t *testing.T) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
counter := newMessageCounter(t, 5)
counterDone := counter.done
- netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counter}})
+ netB.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: counter}})
readyTimeout := time.NewTimer(2 * time.Second)
waitReady(t, netA, readyTimeout.C)
@@ -1169,7 +1181,7 @@ func TestDelayedMessageDrop(t *testing.T) {
currentTime := time.Now()
for i := 0; i < 10; i++ {
- err := netA.broadcastWithTimestamp(debugTag, []byte("foo"), currentTime.Add(time.Hour*time.Duration(i-5)))
+ err := netA.broadcastWithTimestamp(protocol.TxnTag, []byte("foo"), currentTime.Add(time.Hour*time.Duration(i-5)))
require.NoErrorf(t, err, "No error was expected")
}
@@ -1186,7 +1198,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
wn := &WebsocketNetwork{
log: log,
config: defaultConfig,
- phonebook: MakeMultiPhonebook(),
+ phonebook: MakePhonebook(1, 1*time.Millisecond),
GenesisID: "go-test-network-genesis",
NetworkID: config.Devtestnet,
slowWritingPeerMonitorInterval: time.Millisecond * 50,
@@ -1206,7 +1218,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
addrA, postListen := netA.Address()
require.True(t, postListen)
t.Log(addrA)
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
@@ -1240,7 +1252,7 @@ func TestForceMessageRelaying(t *testing.T) {
wn := &WebsocketNetwork{
log: log,
config: defaultConfig,
- phonebook: MakeMultiPhonebook(),
+ phonebook: MakePhonebook(1, 1*time.Millisecond),
GenesisID: "go-test-network-genesis",
NetworkID: config.Devtestnet,
}
@@ -1254,7 +1266,7 @@ func TestForceMessageRelaying(t *testing.T) {
counter := newMessageCounter(t, 5)
counterDone := counter.done
- netA.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counter}})
+ netA.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: counter}})
netA.Start()
addrA, postListen := netA.Address()
require.Truef(t, postListen, "Listening network failed to start")
@@ -1263,14 +1275,14 @@ func TestForceMessageRelaying(t *testing.T) {
noAddressConfig.NetAddress = ""
netB := makeTestWebsocketNodeWithConfig(t, noAddressConfig)
netB.config.GossipFanout = 1
- netB.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
netB.Start()
defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
noAddressConfig.ForceRelayMessages = true
netC := makeTestWebsocketNodeWithConfig(t, noAddressConfig)
netC.config.GossipFanout = 1
- netC.phonebook.AddOrUpdatePhonebook("default", &oneEntryPhonebook{addr: addrA})
+ netC.phonebook.ReplacePeerList([]string{addrA}, "default")
netC.Start()
defer func() { t.Log("stopping C"); netC.Stop(); t.Log("C done") }()
@@ -1281,9 +1293,9 @@ func TestForceMessageRelaying(t *testing.T) {
// send 5 messages from both netB and netC to netA
for i := 0; i < 5; i++ {
- err := netB.Relay(context.Background(), debugTag, []byte{1, 2, 3}, true, nil)
+ err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3}, true, nil)
require.NoError(t, err)
- err = netC.Relay(context.Background(), debugTag, []byte{1, 2, 3}, true, nil)
+ err = netC.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3}, true, nil)
require.NoError(t, err)
}
@@ -1299,13 +1311,13 @@ func TestForceMessageRelaying(t *testing.T) {
netA.ClearHandlers()
counter = newMessageCounter(t, 10)
counterDone = counter.done
- netA.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counter}})
+ netA.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: protocol.TxnTag, MessageHandler: counter}})
// hack the relayMessages on the netB so that it would start sending messages.
netB.relayMessages = true
// send additional 10 messages from netB
for i := 0; i < 10; i++ {
- err := netB.Relay(context.Background(), debugTag, []byte{1, 2, 3}, true, nil)
+ err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3}, true, nil)
require.NoError(t, err)
}
@@ -1323,3 +1335,209 @@ func TestSetUserAgentHeader(t *testing.T) {
require.Equal(t, 1, len(headers))
t.Log(headers)
}
+
+func TestCheckProtocolVersionMatch(t *testing.T) {
+ // note - this test changes the SupportedProtocolVersions global variable ( SupportedProtocolVersions ) and therefore cannot be parallelized.
+ originalSupportedProtocolVersions := SupportedProtocolVersions
+ defer func() {
+ SupportedProtocolVersions = originalSupportedProtocolVersions
+ }()
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel))
+ wn := &WebsocketNetwork{
+ log: log,
+ config: defaultConfig,
+ phonebook: MakePhonebook(1, 1*time.Millisecond),
+ GenesisID: "go-test-network-genesis",
+ NetworkID: config.Devtestnet,
+ }
+ wn.setup()
+
+ SupportedProtocolVersions = []string{"2", "1"}
+
+ header1 := make(http.Header)
+ header1.Add(ProtocolAcceptVersionHeader, "1")
+ header1.Add(ProtocolVersionHeader, "3")
+ matchingVersion, otherVersion := wn.checkProtocolVersionMatch(header1)
+ require.Equal(t, "1", matchingVersion)
+ require.Equal(t, "", otherVersion)
+
+ header2 := make(http.Header)
+ header2.Add(ProtocolAcceptVersionHeader, "3")
+ header2.Add(ProtocolAcceptVersionHeader, "4")
+ header2.Add(ProtocolVersionHeader, "1")
+ matchingVersion, otherVersion = wn.checkProtocolVersionMatch(header2)
+ require.Equal(t, "1", matchingVersion)
+ require.Equal(t, "1", otherVersion)
+
+ header3 := make(http.Header)
+ header3.Add(ProtocolVersionHeader, "3")
+ matchingVersion, otherVersion = wn.checkProtocolVersionMatch(header3)
+ require.Equal(t, "", matchingVersion)
+ require.Equal(t, "3", otherVersion)
+}
+
+func handleTopicRequest(msg IncomingMessage) (out OutgoingMessage) {
+
+ topics, err := UnmarshallTopics(msg.Data)
+ if err != nil {
+ return
+ }
+
+ val1b, f := topics.GetValue("val1")
+ if !f {
+ return
+ }
+ val2b, f := topics.GetValue("val2")
+ if !f {
+ return
+ }
+ val1 := int(val1b[0])
+ val2 := int(val2b[0])
+
+ respTopics := Topics{
+ Topic{
+ key: "value",
+ data: []byte{byte(val1 + val2)},
+ },
+ }
+ return OutgoingMessage{
+ Action: Respond,
+ Tag: protocol.TopicMsgRespTag,
+ Topics: respTopics,
+ }
+}
+
+// Set up two nodes, test topics send/recieve is working
+func TestWebsocketNetworkTopicRoundtrip(t *testing.T) {
+ var topicMsgReqTag Tag = protocol.UniCatchupReqTag
+ netA := makeTestWebsocketNode(t)
+ netA.config.GossipFanout = 1
+ netA.Start()
+ defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ netB := makeTestWebsocketNode(t)
+ netB.config.GossipFanout = 1
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+ t.Log(addrA)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
+ netB.Start()
+ defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+
+ netB.RegisterHandlers([]TaggedMessageHandler{
+ TaggedMessageHandler{
+ Tag: topicMsgReqTag,
+ MessageHandler: HandlerFunc(handleTopicRequest),
+ },
+ })
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ t.Log("a ready")
+ waitReady(t, netB, readyTimeout.C)
+ t.Log("b ready")
+
+ peerA := netA.peers[0]
+
+ topics := Topics{
+ Topic{
+ key: "command",
+ data: []byte("add"),
+ },
+ Topic{
+ key: "val1",
+ data: []byte{1},
+ },
+ Topic{
+ key: "val2",
+ data: []byte{4},
+ },
+ }
+
+ resp, err := peerA.Request(context.Background(), topicMsgReqTag, topics)
+ assert.NoError(t, err)
+
+ sum, found := resp.Topics.GetValue("value")
+ assert.Equal(t, true, found)
+ assert.Equal(t, 5, int(sum[0]))
+}
+
+// Set up two nodes, have one of them request a certain message tag mask, and verify the other follow that.
+func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
+ netA := makeTestWebsocketNode(t)
+ netA.config.GossipFanout = 1
+ netA.config.EnablePingHandler = false
+
+ netA.Start()
+ defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+ netB := makeTestWebsocketNode(t)
+ netB.config.GossipFanout = 1
+ netB.config.EnablePingHandler = false
+ addrA, postListen := netA.Address()
+ require.True(t, postListen)
+ t.Log(addrA)
+ netB.phonebook.ReplacePeerList([]string{addrA}, "default")
+ netB.Start()
+ defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+
+ incomingMsgSync := deadlock.Mutex{}
+ msgCounters := make(map[protocol.Tag]int)
+ messageArriveWg := sync.WaitGroup{}
+ msgHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ incomingMsgSync.Lock()
+ defer incomingMsgSync.Unlock()
+ msgCounters[msg.Tag] = msgCounters[msg.Tag] + 1
+ messageArriveWg.Done()
+ return
+ }
+ messageFilterArriveWg := sync.WaitGroup{}
+ messageFilterArriveWg.Add(1)
+ waitMessageArriveHandler := func(msg IncomingMessage) (out OutgoingMessage) {
+ messageFilterArriveWg.Done()
+ return
+ }
+
+ // register all the handlers.
+ taggedHandlers := []TaggedMessageHandler{}
+ for tag := range defaultSendMessageTags {
+ taggedHandlers = append(taggedHandlers, TaggedMessageHandler{
+ Tag: tag,
+ MessageHandler: HandlerFunc(msgHandler),
+ })
+ }
+ netB.RegisterHandlers(taggedHandlers)
+ netA.RegisterHandlers([]TaggedMessageHandler{
+ TaggedMessageHandler{
+ Tag: protocol.AgreementVoteTag,
+ MessageHandler: HandlerFunc(waitMessageArriveHandler),
+ }})
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ waitReady(t, netB, readyTimeout.C)
+
+ // have netB asking netA to send it only AgreementVoteTag and ProposalPayloadTag
+ netB.Broadcast(context.Background(), protocol.MsgOfInterestTag, MarshallMessageOfInterest([]protocol.Tag{protocol.AgreementVoteTag, protocol.ProposalPayloadTag}), true, nil)
+ // send another message which we can track, so that we'll know that the first message was delivered.
+ netB.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ messageFilterArriveWg.Wait()
+
+ messageArriveWg.Add(5 * 2) // we're expecting exactly 10 messages.
+ // send 5 messages of few types.
+ for i := 0; i < 5; i++ {
+ netA.Broadcast(context.Background(), protocol.AgreementVoteTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.TxnTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.UniEnsBlockResTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ netA.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
+ }
+ // wait until all the expected messages arrive.
+ messageArriveWg.Wait()
+ for tag, count := range msgCounters {
+ if tag == protocol.AgreementVoteTag || tag == protocol.ProposalPayloadTag {
+ require.Equal(t, 5, count)
+ } else {
+ require.Equal(t, 0, count)
+ }
+ }
+}
diff --git a/network/wsPeer.go b/network/wsPeer.go
index 379f02198e..c01a61f3b8 100644
--- a/network/wsPeer.go
+++ b/network/wsPeer.go
@@ -18,6 +18,7 @@ package network
import (
"context"
+ "encoding/binary"
"fmt"
"io"
"math/rand"
@@ -60,6 +61,25 @@ var duplicateNetworkMessageReceivedBytesTotal = metrics.MakeCounter(metrics.Dupl
var outgoingNetworkMessageFilteredOutTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutTotal)
var outgoingNetworkMessageFilteredOutBytesTotal = metrics.MakeCounter(metrics.OutgoingNetworkMessageFilteredOutBytesTotal)
+// defaultSendMessageTags is the default list of messages which a peer would
+// allow to be sent without receiving any explicit request.
+var defaultSendMessageTags = map[protocol.Tag]bool{
+ protocol.AgreementVoteTag: true,
+ protocol.MsgDigestSkipTag: true,
+ protocol.NetPrioResponseTag: true,
+ protocol.PingTag: true,
+ protocol.PingReplyTag: true,
+ protocol.ProposalPayloadTag: true,
+ protocol.TopicMsgRespTag: true,
+ protocol.MsgOfInterestTag: true,
+ protocol.TxnTag: true,
+ protocol.UniCatchupReqTag: true,
+ protocol.UniEnsBlockReqTag: true,
+ protocol.UniEnsBlockResTag: true,
+ protocol.UniCatchupResTag: true,
+ protocol.VoteBundleTag: true,
+}
+
// interface allows substituting debug implementation for *websocket.Conn
type wsPeerWebsocketConn interface {
RemoteAddr() net.Addr
@@ -72,15 +92,16 @@ type wsPeerWebsocketConn interface {
type sendMessage struct {
data []byte
- enqueued time.Time // the time at which the message was first generated
- peerEnqueued time.Time // the time at which the peer was attempting to enqueue the message
+ enqueued time.Time // the time at which the message was first generated
+ peerEnqueued time.Time // the time at which the peer was attempting to enqueue the message
+ msgTags map[protocol.Tag]bool // when msgTags is speficied ( i.e. non-nil ), the send goroutine is to replace the message tag filter with this one. No data would be accompanied to this message.
}
// wsPeerCore also works for non-connected peers we want to do HTTP GET from
type wsPeerCore struct {
net *WebsocketNetwork
rootURL string
- originAddress string
+ originAddress string // incoming connection remote host
client http.Client
}
@@ -92,6 +113,13 @@ const disconnectReadError disconnectReason = "ReadError"
const disconnectWriteError disconnectReason = "WriteError"
const disconnectIdleConn disconnectReason = "IdleConnection"
const disconnectSlowConn disconnectReason = "SlowConnection"
+const disconnectLeastPerformingPeer disconnectReason = "LeastPerformingPeer"
+const disconnectCliqueResolve disconnectReason = "CliqueResolving"
+
+// Response is the structure holding the response from the server
+type Response struct {
+ Topics Topics
+}
type wsPeer struct {
// lastPacketTime contains the UnixNano at the last time a successfull communication was made with the peer.
@@ -148,6 +176,34 @@ type wsPeer struct {
// createTime is the time at which the connection was established with the peer.
createTime time.Time
+
+ // peer version ( this is one of the version supported by the current node and listed in SupportedProtocolVersions )
+ version string
+
+ // Nonce used to uniquely identify requests
+ requestNonce uint64
+
+ // responseChannels used by the client to wait on the response of the request
+ responseChannels map[uint64]chan *Response
+
+ // responseChannelsMutex guards the operations of responseChannels
+ responseChannelsMutex deadlock.RWMutex
+
+ // sendMessageTag is a map of allowed message to send to a peer. We don't use any syncronization on this map, and the
+ // only gurentee is that it's being accessed only during startup and/or by the sending loop go routine.
+ sendMessageTag map[protocol.Tag]bool
+
+ // connMonitor used to measure the relative performance of the connection
+ // compared to the other outgoing connections. Incoming connections would have this
+ // field set to nil.
+ connMonitor *connectionPerformanceMonitor
+
+ // peerMessageDelay is calculated by the connection monitor; it's the relative avarage per-message delay.
+ peerMessageDelay int64
+
+ // throttledOutgoingConnection determines if this outgoing connection will be throttled bassed on it's
+ // performance or not. Throttled connections are more likely to be short-lived connections.
+ throttledOutgoingConnection bool
}
// HTTPPeer is what the opaque Peer might be.
@@ -167,6 +223,20 @@ type UnicastPeer interface {
GetAddress() string
// Unicast sends the given bytes to this specific peer. Does not wait for message to be sent.
Unicast(ctx context.Context, data []byte, tag protocol.Tag) error
+ // Version returns the matching version from network.SupportedProtocolVersions
+ Version() string
+ Request(ctx context.Context, tag Tag, topics Topics) (resp *Response, e error)
+ Respond(ctx context.Context, reqMsg IncomingMessage, topics Topics) (e error)
+}
+
+// Create a wsPeerCore object
+func makePeerCore(net *WebsocketNetwork, rootURL string, roundTripper http.RoundTripper, originAddress string) wsPeerCore {
+ return wsPeerCore{
+ net: net,
+ rootURL: rootURL,
+ originAddress: originAddress,
+ client: http.Client{Transport: roundTripper},
+ }
}
// GetAddress returns the root url to use to connect to this peer.
@@ -186,6 +256,11 @@ func (wp *wsPeerCore) PrepareURL(rawURL string) string {
return strings.Replace(rawURL, "{genesisID}", wp.net.GenesisID, -1)
}
+// Version returns the matching version from network.SupportedProtocolVersions
+func (wp *wsPeer) Version() string {
+ return wp.version
+}
+
// Unicast sends the given bytes to this specific peer. Does not wait for message to be sent.
// (Implements UnicastPeer)
func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) error {
@@ -196,7 +271,7 @@ func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) err
copy(mbytes, tbytes)
copy(mbytes[len(tbytes):], msg)
var digest crypto.Digest
- if tag != protocol.MsgSkipTag && len(msg) >= messageFilterSize {
+ if tag != protocol.MsgDigestSkipTag && len(msg) >= messageFilterSize {
digest = crypto.Hash(mbytes)
}
@@ -209,6 +284,35 @@ func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) err
return err
}
+// Respond sends the response of a request message
+func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, responseTopics Topics) (e error) {
+
+ // Get the hash/key of the request message
+ requestHash := hashTopics(reqMsg.Data)
+
+ // Add the request hash
+ requestHashData := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(requestHashData, requestHash)
+ responseTopics = append(responseTopics, Topic{key: requestHashKey, data: requestHashData})
+
+ // Serialize the topics
+ serializedMsg := responseTopics.MarshallTopics()
+
+ // Send serializedMsg
+ select {
+ case wp.sendBufferBulk <- sendMessage{
+ data: append([]byte(protocol.TopicMsgRespTag), serializedMsg...),
+ enqueued: time.Now(),
+ peerEnqueued: time.Now()}:
+ case <-wp.closing:
+ wp.net.log.Debugf("peer closing %s", wp.conn.RemoteAddr().String())
+ return
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return nil
+}
+
// setup values not trivially assigned
func (wp *wsPeer) init(config config.Local, sendBufferLength int) {
wp.net.log.Debugf("wsPeer init outgoing=%v %#v", wp.outgoing, wp.rootURL)
@@ -216,6 +320,8 @@ func (wp *wsPeer) init(config config.Local, sendBufferLength int) {
wp.sendBufferHighPrio = make(chan sendMessage, sendBufferLength)
wp.sendBufferBulk = make(chan sendMessage, sendBufferLength)
atomic.StoreInt64(&wp.lastPacketTime, time.Now().UnixNano())
+ wp.responseChannels = make(map[uint64]chan *Response)
+ wp.sendMessageTag = defaultSendMessageTags
// processed is a channel that messageHandlerThread writes to
// when it's done with one of our messages, so that we can queue
@@ -300,7 +406,46 @@ func (wp *wsPeer) readLoop() {
networkReceivedBytesTotal.AddUint64(uint64(len(msg.Data)+2), nil)
networkMessageReceivedTotal.AddUint64(1, nil)
msg.Sender = wp
- if msg.Tag == protocol.MsgSkipTag {
+
+ // for outgoing connections, we want to notify the connection monitor that we've received
+ // a message. The connection monitor would update it's statistics accordingly.
+ if wp.connMonitor != nil {
+ wp.connMonitor.Notify(&msg)
+ }
+
+ switch msg.Tag {
+ case protocol.MsgOfInterestTag:
+ // try to decode the message-of-interest
+ if wp.handleMessageOfInterest(msg) {
+ return
+ }
+ continue
+ case protocol.TopicMsgRespTag: // Handle Topic message
+ topics, err := UnmarshallTopics(msg.Data)
+ if err != nil {
+ wp.net.log.Warnf("wsPeer readLoop: could not read the message from: %s %s", wp.conn.RemoteAddr().String(), err)
+ continue
+ }
+ requestHash, found := topics.GetValue(requestHashKey)
+ if !found {
+ wp.net.log.Warnf("wsPeer readLoop: message from %s is missing the %s", wp.conn.RemoteAddr().String(), requestHashKey)
+ continue
+ }
+ hashKey, _ := binary.Uvarint(requestHash)
+ channel, found := wp.getAndRemoveResponseChannel(hashKey)
+ if !found {
+ wp.net.log.Warnf("wsPeer readLoop: received a message response from %s for a stale request", wp.conn.RemoteAddr().String())
+ continue
+ }
+
+ select {
+ case channel <- &Response{Topics: topics}:
+ // do nothing. writing was successfull.
+ default:
+ wp.net.log.Warnf("wsPeer readLoop: channel blocked. Could not pass the response to the requester", wp.conn.RemoteAddr().String())
+ }
+ continue
+ case protocol.MsgDigestSkipTag:
// network maintenance message handled immediately instead of handing off to general handlers
wp.handleFilterMessage(msg)
continue
@@ -334,6 +479,42 @@ func (wp *wsPeer) readLoop() {
}
}
+func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (shutdown bool) {
+ shutdown = false
+ // decode the message, and ensure it's a valid message.
+ msgTagsMap, err := unmarshallMessageOfInterest(msg.Data)
+ if err != nil {
+ wp.net.log.Warnf("wsPeer handleMessageOfInterest: could not unmarshall message from: %s %v", wp.conn.RemoteAddr().String(), err)
+ return
+ }
+ sm := sendMessage{
+ data: nil,
+ enqueued: time.Now(),
+ peerEnqueued: time.Now(),
+ msgTags: msgTagsMap,
+ }
+
+ // try to send the message to the send loop. The send loop will store the message locally and would use it.
+ // the rationale here is that this message is rarely sent, and we would benefit from having it being lock-free.
+ select {
+ case wp.sendBufferHighPrio <- sm:
+ return
+ case <-wp.closing:
+ wp.net.log.Debugf("peer closing %s", wp.conn.RemoteAddr().String())
+ shutdown = true
+ default:
+ }
+
+ select {
+ case wp.sendBufferHighPrio <- sm:
+ case wp.sendBufferBulk <- sm:
+ case <-wp.closing:
+ wp.net.log.Debugf("peer closing %s", wp.conn.RemoteAddr().String())
+ shutdown = true
+ }
+ return
+}
+
func (wp *wsPeer) readLoopCleanup() {
wp.internalClose(disconnectReadError)
wp.wg.Done()
@@ -360,6 +541,19 @@ func (wp *wsPeer) writeLoopSend(msg sendMessage) (exit bool) {
// just drop it, don't break the connection
return false
}
+ if msg.msgTags != nil {
+ // when msg.msgTags is non-nil, the read loop has received a message-of-interest message that we want to apply.
+ // in order to avoid any locking, it sent it to this queue so that we could set it as the new outgoing message tag filter.
+ wp.sendMessageTag = msg.msgTags
+ return false
+ }
+ // the tags are always 2 char long; note that this is safe since it's only being used for messages that we have generated locally.
+ tag := protocol.Tag(msg.data[:2])
+ if !wp.sendMessageTag[tag] {
+ // the peer isn't interested in this message.
+ return false
+ }
+
// check if this message was waiting in the queue for too long. If this is the case, return "true" to indicate that we want to close the connection.
msgWaitDuration := time.Now().Sub(msg.enqueued)
if msgWaitDuration > maxMessageQueueDuration {
@@ -512,3 +706,71 @@ func (wp *wsPeer) CheckSlowWritingPeer(now time.Time) bool {
timeSinceMessageCreated := now.Sub(time.Unix(0, ongoingMessageTime))
return timeSinceMessageCreated > maxMessageQueueDuration
}
+
+// getRequestNonce returns the byte representation of ever increasing uint64
+// The value is stored on wsPeer
+func (wp *wsPeer) getRequestNonce() []byte {
+ buf := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(buf, atomic.AddUint64(&wp.requestNonce, 1))
+ return buf
+}
+
+// Request submits the request to the server, waits for a response
+func (wp *wsPeer) Request(ctx context.Context, tag Tag, topics Topics) (resp *Response, e error) {
+
+ // Add nonce as a topic
+ nonce := wp.getRequestNonce()
+ topics = append(topics, Topic{key: "nonce", data: nonce})
+
+ // serialize the topics
+ serializedMsg := topics.MarshallTopics()
+
+ // Get the topics' hash
+ hash := hashTopics(serializedMsg)
+
+ // Make a response channel to wait on the server response
+ responseChannel := wp.makeResponseChannel(hash)
+ defer wp.getAndRemoveResponseChannel(hash)
+
+ // Send serializedMsg
+ select {
+ case wp.sendBufferBulk <- sendMessage{
+ data: append([]byte(tag), serializedMsg...),
+ enqueued: time.Now(),
+ peerEnqueued: time.Now()}:
+ case <-wp.closing:
+ e = fmt.Errorf("peer closing %s", wp.conn.RemoteAddr().String())
+ return
+ case <-ctx.Done():
+ return resp, ctx.Err()
+ }
+
+ // wait for the channel.
+ select {
+ case resp = <-responseChannel:
+ return resp, nil
+ case <-wp.closing:
+ e = fmt.Errorf("peer closing %s", wp.conn.RemoteAddr().String())
+ return
+ case <-ctx.Done():
+ return resp, ctx.Err()
+ }
+}
+
+func (wp *wsPeer) makeResponseChannel(key uint64) (responseChannel chan *Response) {
+ newChan := make(chan *Response, 1)
+ wp.responseChannelsMutex.Lock()
+ defer wp.responseChannelsMutex.Unlock()
+ wp.responseChannels[key] = newChan
+ return newChan
+}
+
+// getAndRemoveResponseChannel returns the channel and deletes the channel from the map
+func (wp *wsPeer) getAndRemoveResponseChannel(key uint64) (respChan chan *Response, found bool) {
+ wp.responseChannelsMutex.Lock()
+ defer wp.responseChannelsMutex.Unlock()
+ respChan, found = wp.responseChannels[key]
+ delete(wp.responseChannels, key)
+
+ return
+}
diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go
index 8524412a7e..f6acda7138 100644
--- a/network/wsPeer_test.go
+++ b/network/wsPeer_test.go
@@ -17,6 +17,7 @@
package network
import (
+ "encoding/binary"
"testing"
"time"
@@ -37,3 +38,42 @@ func TestCheckSlowWritingPeer(t *testing.T) {
require.Equal(t, peer.CheckSlowWritingPeer(now), true)
}
+
+// TestGetRequestNonce tests if unique values are generated each time
+func TestGetRequestNonce(t *testing.T) {
+ numValues := 1000
+ peer := wsPeer{}
+ valueChannel := make(chan uint64, numValues)
+ for x := 0; x < numValues; x++ {
+ go func() {
+ ans := peer.getRequestNonce()
+ val, _ := binary.Uvarint(ans)
+ valueChannel <- val
+ }()
+ }
+
+ // Timeout
+ maxWait := time.After(2 * time.Second)
+
+ // check if all the values are unique
+ seenValue := make([]bool, numValues+1)
+ for x := 0; x < numValues; x++ {
+ select {
+ case val := <-valueChannel:
+ require.Equal(t, false, seenValue[val])
+ seenValue[val] = true
+ case <-maxWait:
+ break
+ }
+ }
+ // Check if all the values were generated
+ for x := 1; x <= numValues; x++ {
+ require.Equal(t, true, seenValue[x])
+ }
+}
+
+func TestDefaultMessageTagsLength(t *testing.T) {
+ for tag := range defaultSendMessageTags {
+ require.Equal(t, 2, len(tag))
+ }
+}
diff --git a/node/impls.go b/node/impls.go
index 92176bf68c..1310c63408 100644
--- a/node/impls.go
+++ b/node/impls.go
@@ -31,6 +31,7 @@ import (
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
+ "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/util/execpool"
)
@@ -93,7 +94,7 @@ func (i *blockFactoryImpl) AssembleBlock(round basics.Round, deadline time.Time)
newEmptyBlk := bookkeeping.MakeBlock(prev)
- eval, err := i.l.StartEvaluator(newEmptyBlk.BlockHeader, i.tp, i.verificationPool)
+ eval, err := i.l.StartEvaluator(newEmptyBlk.BlockHeader)
if err != nil {
return nil, fmt.Errorf("could not make proposals at round %d: could not start evaluator: %v", round, err)
}
@@ -142,28 +143,38 @@ func (vb validatedBlock) Block() bookkeeping.Block {
type agreementLedger struct {
*data.Ledger
UnmatchedPendingCertificates chan catchup.PendingUnmatchedCertificate
+ n network.GossipNode
}
-func makeAgreementLedger(ledger *data.Ledger) agreementLedger {
+func makeAgreementLedger(ledger *data.Ledger, net network.GossipNode) agreementLedger {
return agreementLedger{
Ledger: ledger,
UnmatchedPendingCertificates: make(chan catchup.PendingUnmatchedCertificate, 1),
+ n: net,
}
}
// EnsureBlock implements agreement.LedgerWriter.EnsureBlock.
func (l agreementLedger) EnsureBlock(e bookkeeping.Block, c agreement.Certificate) {
l.Ledger.EnsureBlock(&e, c)
+ // let the network know that we've made some progress.
+ l.n.OnNetworkAdvance()
}
// EnsureValidatedBlock implements agreement.LedgerWriter.EnsureValidatedBlock.
func (l agreementLedger) EnsureValidatedBlock(ve agreement.ValidatedBlock, c agreement.Certificate) {
l.Ledger.EnsureValidatedBlock(ve.(validatedBlock).vb, c)
+ // let the network know that we've made some progress.
+ l.n.OnNetworkAdvance()
}
// EnsureDigest implements agreement.LedgerWriter.EnsureDigest.
-func (l agreementLedger) EnsureDigest(cert agreement.Certificate, quit chan struct{}, verifier *agreement.AsyncVoteVerifier) {
- certRoundReachedCh := l.Wait(cert.Round)
+func (l agreementLedger) EnsureDigest(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) {
+ // let the network know that we've made some progress.
+ // this might be controverasl since we haven't received the entire block, but we did get the
+ // certificate, which means that network connections are likely to be just fine.
+ l.n.OnNetworkAdvance()
+
// clear out the pending certificates ( if any )
select {
case pendingCert := <-l.UnmatchedPendingCertificates:
@@ -171,35 +182,10 @@ func (l agreementLedger) EnsureDigest(cert agreement.Certificate, quit chan stru
default:
}
- // if the quit channel is closed, we want to exit here before placing the request on the UnmatchedPendingCertificates
- // channel.
- select {
- case <-quit:
- logging.Base().Debugf("EnsureDigest was asked to quit before we enqueue the certificate request")
- return
- default:
- }
-
// The channel send to UnmatchedPendingCertificates is guaranteed to be non-blocking since due to the fact that -
// 1. the channel capacity is 1
// 2. we just cleared a single item off this channel ( if there was any )
// 3. the EnsureDigest method is being called with the agreeement service guarantee
// 4. no other senders to this channel exists
- // we want to have this as a select statement to check if we neeed to exit before enqueueing the task to the catchup service.
l.UnmatchedPendingCertificates <- catchup.PendingUnmatchedCertificate{Cert: cert, VoteVerifier: verifier}
-
- defer func() {
- // clear out the content of the UnmatchedPendingCertificates channel if we somehow managed to get this round aquired by a different method ( i.e. regular catchup )
- select {
- case <-l.UnmatchedPendingCertificates:
- default:
- }
- }()
-
- select {
- case <-quit:
- logging.Base().Debugf("EnsureDigest was asked to quit before we could acquire the block")
- case <-certRoundReachedCh:
- // great! we've reached the desired round.
- }
}
diff --git a/node/msgp_gen.go b/node/msgp_gen.go
new file mode 100644
index 0000000000..7257a3e397
--- /dev/null
+++ b/node/msgp_gen.go
@@ -0,0 +1,413 @@
+package node
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "github.com/algorand/msgp/msgp"
+)
+
+// MarshalMsg implements msgp.Marshaler
+func (z *netPrioResponse) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(1)
+ var zb0001Mask uint8 /* 2 bits */
+ if (*z).Nonce == "" {
+ zb0001Len--
+ zb0001Mask |= 0x1
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x1) == 0 { // if not empty
+ // string "Nonce"
+ o = append(o, 0xa5, 0x4e, 0x6f, 0x6e, 0x63, 0x65)
+ o = msgp.AppendString(o, (*z).Nonce)
+ }
+ }
+ return
+}
+
+func (_ *netPrioResponse) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*netPrioResponse)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *netPrioResponse) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).Nonce, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Nonce")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = netPrioResponse{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Nonce":
+ (*z).Nonce, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Nonce")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *netPrioResponse) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*netPrioResponse)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *netPrioResponse) Msgsize() (s int) {
+ s = 1 + 6 + msgp.StringPrefixSize + len((*z).Nonce)
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *netPrioResponse) MsgIsZero() bool {
+ return ((*z).Nonce == "")
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *netPrioResponseSigned) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(4)
+ var zb0001Mask uint8 /* 5 bits */
+ if (*z).Response.Nonce == "" {
+ zb0001Len--
+ zb0001Mask |= 0x1
+ }
+ if (*z).Round.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).Sender.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).Sig.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x8
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x1) == 0 { // if not empty
+ // string "Response"
+ o = append(o, 0xa8, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65)
+ // omitempty: check for empty values
+ zb0002Len := uint32(1)
+ var zb0002Mask uint8 /* 2 bits */
+ if (*z).Response.Nonce == "" {
+ zb0002Len--
+ zb0002Mask |= 0x1
+ }
+ // variable map header, size zb0002Len
+ o = append(o, 0x80|uint8(zb0002Len))
+ if (zb0002Mask & 0x1) == 0 { // if not empty
+ // string "Nonce"
+ o = append(o, 0xa5, 0x4e, 0x6f, 0x6e, 0x63, 0x65)
+ o = msgp.AppendString(o, (*z).Response.Nonce)
+ }
+ }
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "Round"
+ o = append(o, 0xa5, 0x52, 0x6f, 0x75, 0x6e, 0x64)
+ o, err = (*z).Round.MarshalMsg(o)
+ if err != nil {
+ err = msgp.WrapError(err, "Round")
+ return
+ }
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "Sender"
+ o = append(o, 0xa6, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72)
+ o, err = (*z).Sender.MarshalMsg(o)
+ if err != nil {
+ err = msgp.WrapError(err, "Sender")
+ return
+ }
+ }
+ if (zb0001Mask & 0x8) == 0 { // if not empty
+ // string "Sig"
+ o = append(o, 0xa3, 0x53, 0x69, 0x67)
+ o, err = (*z).Sig.MarshalMsg(o)
+ if err != nil {
+ err = msgp.WrapError(err, "Sig")
+ return
+ }
+ }
+ }
+ return
+}
+
+func (_ *netPrioResponseSigned) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*netPrioResponseSigned)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *netPrioResponseSigned) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ var zb0003 int
+ var zb0004 bool
+ zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Response")
+ return
+ }
+ if zb0003 > 0 {
+ zb0003--
+ (*z).Response.Nonce, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Response", "struct-from-array", "Nonce")
+ return
+ }
+ }
+ if zb0003 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0003)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Response", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Response")
+ return
+ }
+ if zb0004 {
+ (*z).Response = netPrioResponse{}
+ }
+ for zb0003 > 0 {
+ zb0003--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Response")
+ return
+ }
+ switch string(field) {
+ case "Nonce":
+ (*z).Response.Nonce, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Response", "Nonce")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Response")
+ return
+ }
+ }
+ }
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Round")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Sender.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Sender")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Sig.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Sig")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = netPrioResponseSigned{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "Response":
+ var zb0005 int
+ var zb0006 bool
+ zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Response")
+ return
+ }
+ if zb0005 > 0 {
+ zb0005--
+ (*z).Response.Nonce, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Response", "struct-from-array", "Nonce")
+ return
+ }
+ }
+ if zb0005 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0005)
+ if err != nil {
+ err = msgp.WrapError(err, "Response", "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err, "Response")
+ return
+ }
+ if zb0006 {
+ (*z).Response = netPrioResponse{}
+ }
+ for zb0005 > 0 {
+ zb0005--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Response")
+ return
+ }
+ switch string(field) {
+ case "Nonce":
+ (*z).Response.Nonce, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Response", "Nonce")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err, "Response")
+ return
+ }
+ }
+ }
+ }
+ case "Round":
+ bts, err = (*z).Round.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Round")
+ return
+ }
+ case "Sender":
+ bts, err = (*z).Sender.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Sender")
+ return
+ }
+ case "Sig":
+ bts, err = (*z).Sig.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Sig")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *netPrioResponseSigned) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*netPrioResponseSigned)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *netPrioResponseSigned) Msgsize() (s int) {
+ s = 1 + 9 + 1 + 6 + msgp.StringPrefixSize + len((*z).Response.Nonce) + 6 + (*z).Round.Msgsize() + 7 + (*z).Sender.Msgsize() + 4 + (*z).Sig.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *netPrioResponseSigned) MsgIsZero() bool {
+ return ((*z).Response.Nonce == "") && ((*z).Round.MsgIsZero()) && ((*z).Sender.MsgIsZero()) && ((*z).Sig.MsgIsZero())
+}
diff --git a/node/msgp_gen_test.go b/node/msgp_gen_test.go
new file mode 100644
index 0000000000..170a47393a
--- /dev/null
+++ b/node/msgp_gen_test.go
@@ -0,0 +1,134 @@
+package node
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/msgp/msgp"
+)
+
+func TestMarshalUnmarshalnetPrioResponse(t *testing.T) {
+ v := netPrioResponse{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingnetPrioResponse(t *testing.T) {
+ protocol.RunEncodingTest(t, &netPrioResponse{})
+}
+
+func BenchmarkMarshalMsgnetPrioResponse(b *testing.B) {
+ v := netPrioResponse{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgnetPrioResponse(b *testing.B) {
+ v := netPrioResponse{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalnetPrioResponse(b *testing.B) {
+ v := netPrioResponse{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalnetPrioResponseSigned(t *testing.T) {
+ v := netPrioResponseSigned{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingnetPrioResponseSigned(t *testing.T) {
+ protocol.RunEncodingTest(t, &netPrioResponseSigned{})
+}
+
+func BenchmarkMarshalMsgnetPrioResponseSigned(b *testing.B) {
+ v := netPrioResponseSigned{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgnetPrioResponseSigned(b *testing.B) {
+ v := netPrioResponseSigned{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalnetPrioResponseSigned(b *testing.B) {
+ v := netPrioResponseSigned{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/node/netprio.go b/node/netprio.go
index dc6a4466da..57913e6422 100644
--- a/node/netprio.go
+++ b/node/netprio.go
@@ -27,10 +27,14 @@ import (
)
type netPrioResponse struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
Nonce string
}
type netPrioResponseSigned struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
Response netPrioResponse
Round basics.Round
Sender basics.Address
@@ -38,7 +42,7 @@ type netPrioResponseSigned struct {
}
func (npr netPrioResponse) ToBeHashed() (protocol.HashID, []byte) {
- return protocol.NetPrioResponse, protocol.Encode(npr)
+ return protocol.NetPrioResponse, protocol.Encode(&npr)
}
// NewPrioChallenge implements the network.NetPrioScheme interface
@@ -104,7 +108,7 @@ func (node *AlgorandFullNode) MakePrioResponse(challenge string) []byte {
rs.Sender = maxPart.Address()
rs.Sig = signer.Sign(ephID, rs.Response)
- return protocol.Encode(rs)
+ return protocol.Encode(&rs)
}
// VerifyPrioResponse implements the network.NetPrioScheme interface
diff --git a/node/node.go b/node/node.go
index 8b7bbd2a20..45041e8e40 100644
--- a/node/node.go
+++ b/node/node.go
@@ -84,9 +84,8 @@ type AlgorandFullNode struct {
cancelCtx context.CancelFunc
config config.Local
- ledger *data.Ledger
- net network.GossipNode
- phonebook *network.ThreadsafePhonebook
+ ledger *data.Ledger
+ net network.GossipNode
transactionPool *pools.TransactionPool
txHandler *data.TxHandler
@@ -140,7 +139,7 @@ type TxnWithStatus struct {
// MakeFull sets up an Algorand full node
// (i.e., it returns a node that participates in consensus)
-func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookDir string, genesis bookkeeping.Genesis) (*AlgorandFullNode, error) {
+func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAddresses []string, genesis bookkeeping.Genesis) (*AlgorandFullNode, error) {
node := new(AlgorandFullNode)
node.rootDir = rootDir
@@ -148,16 +147,9 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookDir
node.log = log.With("name", cfg.NetAddress)
node.genesisID = genesis.ID()
node.genesisHash = crypto.HashObj(genesis)
- node.phonebook = network.MakeThreadsafePhonebook()
-
- addrs, err := config.LoadPhonebook(phonebookDir)
- if err != nil {
- log.Debugf("Cannot load static phonebook: %v", err)
- }
- node.phonebook.ReplacePeerList(addrs)
// tie network, block fetcher, and agreement services together
- p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, node.phonebook, genesis.ID(), genesis.Network)
+ p2pNode, err := network.NewWebsocketNetwork(node.log, node.config, phonebookAddresses, genesis.ID(), genesis.Network)
if err != nil {
log.Errorf("could not create websocket node: %v", err)
return nil, err
@@ -230,7 +222,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookDir
blockFactory := makeBlockFactory(node.ledger, node.transactionPool, node.config.EnableProcessBlockStats, node.highPriorityCryptoVerificationPool)
blockValidator := blockValidatorImpl{l: node.ledger, tp: node.transactionPool, verificationPool: node.highPriorityCryptoVerificationPool}
- agreementLedger := makeAgreementLedger(node.ledger)
+ agreementLedger := makeAgreementLedger(node.ledger, node.net)
agreementParameters := agreement.Parameters{
Logger: log,
@@ -418,7 +410,7 @@ func (node *AlgorandFullNode) BroadcastSignedTxGroup(txgroup []transactions.Sign
var enc []byte
var txids []transactions.Txid
for _, tx := range txgroup {
- enc = append(enc, protocol.Encode(tx)...)
+ enc = append(enc, protocol.Encode(&tx)...)
txids = append(txids, tx.ID())
}
err = node.net.Broadcast(context.TODO(), protocol.TxnTag, enc, true, nil)
@@ -592,16 +584,6 @@ func (node *AlgorandFullNode) PoolStats() PoolStats {
}
}
-// ExtendPeerList dynamically adds a peer to a node's peer list.
-func (node *AlgorandFullNode) ExtendPeerList(peers ...string) {
- node.phonebook.ExtendPeerList(peers)
-}
-
-// ReplacePeerList replaces the current peer list with a different one
-func (node *AlgorandFullNode) ReplacePeerList(peers ...string) {
- node.phonebook.ReplacePeerList(peers)
-}
-
// SuggestedFee returns the suggested fee per byte recommended to ensure a new transaction is processed in a timely fashion.
// Caller should set fee to max(MinTxnFee, SuggestedFee() * len(encoded SignedTxn))
func (node *AlgorandFullNode) SuggestedFee() basics.MicroAlgos {
diff --git a/node/node_test.go b/node/node_test.go
index d4ede125a4..922e89d547 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -55,7 +55,7 @@ var defaultConfig = config.Local{
IncomingConnectionsLimit: -1,
}
-func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationPool execpool.BacklogPool) ([]*AlgorandFullNode, []string, []string) {
+func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationPool execpool.BacklogPool, customConsensus config.ConsensusProtocols) ([]*AlgorandFullNode, []string, []string) {
util.RaiseRlimit(1000)
f, _ := os.Create(t.Name() + ".log")
logging.Base().SetJSONFormatter()
@@ -84,9 +84,11 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
// because we explicitly generated the sqlite database above (in
// installFullNode).
g := bookkeeping.Genesis{
- SchemaID: "go-test-node-genesis",
- Proto: proto,
- Network: config.Devtestnet,
+ SchemaID: "go-test-node-genesis",
+ Proto: proto,
+ Network: config.Devtestnet,
+ FeeSink: sinkAddr.String(),
+ RewardsPool: poolAddr.String(),
}
for i := range wallets {
@@ -115,6 +117,7 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
panic(err)
}
root, err := account.GenerateRoot(access)
+ access.Close()
if err != nil {
panic(err)
}
@@ -125,6 +128,7 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
panic(err)
}
part, err := account.FillDBWithParticipationKeys(access, root.Address(), firstRound, lastRound, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution)
+ access.Close()
if err != nil {
panic(err)
}
@@ -144,6 +148,12 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
for i, rootDirectory := range rootDirs {
genesisDir := filepath.Join(rootDirectory, g.ID())
ledgerFilenamePrefix := filepath.Join(genesisDir, config.LedgerFilenamePrefix)
+ if customConsensus != nil {
+ err := config.SaveConfigurableConsensus(genesisDir, customConsensus)
+ require.Nil(t, err)
+ }
+ err1 := config.LoadConfigurableConsensusProtocols(genesisDir)
+ require.Nil(t, err1)
nodeID := fmt.Sprintf("Node%d", i)
const inMem = false
const archival = true
@@ -156,7 +166,7 @@ func setupFullNodes(t *testing.T, proto protocol.ConsensusVersion, verificationP
cfg, err := config.LoadConfigFromDisk(rootDirectory)
require.NoError(t, err)
- node, err := MakeFull(logging.Base().With("source", t.Name()+strconv.Itoa(i)), rootDirectory, cfg, "", g)
+ node, err := MakeFull(logging.Base().With("source", t.Name()+strconv.Itoa(i)), rootDirectory, cfg, []string{}, g)
nodes[i] = node
require.NoError(t, err)
}
@@ -170,7 +180,7 @@ func TestSyncingFullNode(t *testing.T) {
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- nodes, wallets, rootDirs := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool)
+ nodes, wallets, rootDirs := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool, nil)
for i := 0; i < len(nodes); i++ {
defer os.Remove(wallets[i])
defer os.RemoveAll(rootDirs[i])
@@ -227,7 +237,7 @@ func TestInitialSync(t *testing.T) {
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- nodes, wallets, rootdirs := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool)
+ nodes, wallets, rootdirs := setupFullNodes(t, protocol.ConsensusCurrentVersion, backlogPool, nil)
for i := 0; i < len(nodes); i++ {
defer os.Remove(wallets[i])
defer os.RemoveAll(rootdirs[i])
@@ -255,12 +265,46 @@ func TestInitialSync(t *testing.T) {
}
func TestSimpleUpgrade(t *testing.T) {
- t.Skip("Randomly failing: node_test.go:~283 : no block notification for account. Re-enable after agreement bug-fix pass")
+ t.Skip("Randomly failing: node_test.go:~330 : no block notification for account. Re-enable after agreement bug-fix pass")
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- nodes, wallets, rootDirs := setupFullNodes(t, protocol.ConsensusTest0, backlogPool)
+ // ConsensusTest0 is a version of ConsensusV0 used for testing
+ // (it has different approved upgrade paths).
+ const consensusTest0 = protocol.ConsensusVersion("test0")
+
+ // ConsensusTest1 is an extension of ConsensusTest0 that
+ // supports a sorted-list balance commitment.
+ const consensusTest1 = protocol.ConsensusVersion("test1")
+
+ configurableConsensus := make(config.ConsensusProtocols)
+
+ testParams0 := config.Consensus[protocol.ConsensusCurrentVersion]
+ testParams0.SupportGenesisHash = false
+ testParams0.UpgradeVoteRounds = 2
+ testParams0.UpgradeThreshold = 1
+ testParams0.DefaultUpgradeWaitRounds = 2
+ testParams0.MaxVersionStringLen = 64
+ testParams0.MaxTxnBytesPerBlock = 1000000
+ testParams0.DefaultKeyDilution = 10000
+ testParams0.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{
+ consensusTest1: 0,
+ }
+ configurableConsensus[consensusTest0] = testParams0
+
+ testParams1 := config.Consensus[protocol.ConsensusCurrentVersion]
+ testParams1.SupportGenesisHash = false
+ testParams1.UpgradeVoteRounds = 10
+ testParams1.UpgradeThreshold = 8
+ testParams1.DefaultUpgradeWaitRounds = 10
+ testParams1.MaxVersionStringLen = 64
+ testParams1.MaxTxnBytesPerBlock = 1000000
+ testParams1.DefaultKeyDilution = 10000
+ testParams1.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ configurableConsensus[consensusTest1] = testParams1
+
+ nodes, wallets, rootDirs := setupFullNodes(t, consensusTest0, backlogPool, configurableConsensus)
for i := 0; i < len(nodes); i++ {
defer os.Remove(wallets[i])
defer os.RemoveAll(rootDirs[i])
@@ -301,7 +345,7 @@ func TestSimpleUpgrade(t *testing.T) {
roundsCheckedForUpgrade++
for i := range wallets {
- require.Equal(t, protocol.ConsensusTest0, blocks[i].CurrentProtocol)
+ require.Equal(t, consensusTest0, blocks[i].CurrentProtocol)
}
}
@@ -310,7 +354,7 @@ func TestSimpleUpgrade(t *testing.T) {
roundsCheckedForUpgrade++
for i := range wallets {
- require.Equal(t, protocol.ConsensusTest1, blocks[i].CurrentProtocol)
+ require.Equal(t, consensusTest1, blocks[i].CurrentProtocol)
}
}
}
@@ -347,7 +391,7 @@ func connectPeers(nodes []*AlgorandFullNode) {
}
for _, node := range nodes {
- node.ExtendPeerList(neighbors...)
+ // node.ExtendPeerList(neighbors...)
node.net.RequestConnectOutgoing(false, nil)
}
}
@@ -362,9 +406,9 @@ func delayStartNode(node *AlgorandFullNode, peers []*AlgorandFullNode, delay tim
}()
wg.Wait()
- node0Addr := node.config.NetAddress
+ // node0Addr := node.config.NetAddress
for _, peer := range peers {
- peer.ExtendPeerList(node0Addr)
+ // peer.ExtendPeerList(node0Addr)
peer.net.RequestConnectOutgoing(false, nil)
}
}
diff --git a/nodecontrol/algodControl.go b/nodecontrol/algodControl.go
index 7563b95c92..53f6ebf0de 100644
--- a/nodecontrol/algodControl.go
+++ b/nodecontrol/algodControl.go
@@ -375,3 +375,14 @@ func (nc NodeController) readGenesisJSON(genesisFile string) (genesisLedger book
err = protocol.DecodeJSON(genesisText, &genesisLedger)
return
}
+
+// SetConsensus applies a new consensus settings which would get deployed before
+// any of the nodes starts
+func (nc NodeController) SetConsensus(consensus config.ConsensusProtocols) error {
+ return config.SaveConfigurableConsensus(nc.algodDataDir, consensus)
+}
+
+// GetConsensus rebuild the consensus version from the data directroy
+func (nc NodeController) GetConsensus() (config.ConsensusProtocols, error) {
+ return config.PreloadConfigurableConsensusProtocols(nc.algodDataDir)
+}
diff --git a/protocol/codec.go b/protocol/codec.go
index 6407e07db3..1a6549fb88 100644
--- a/protocol/codec.go
+++ b/protocol/codec.go
@@ -19,6 +19,7 @@ package protocol
import (
"fmt"
"io"
+ "os"
"sync"
"github.com/algorand/go-codec/codec"
@@ -109,31 +110,15 @@ func EncodeMsgp(obj msgp.Marshaler) []byte {
}
// Encode returns a msgpack-encoded byte buffer for a given object.
-func Encode(obj interface{}) []byte {
- msgp, ok := obj.(msgp.Marshaler)
- if ok && msgp.CanMarshalMsg(msgp) {
- return EncodeMsgp(msgp)
+func Encode(obj msgp.Marshaler) []byte {
+ if obj.CanMarshalMsg(obj) {
+ return EncodeMsgp(obj)
}
- return EncodeReflect(obj)
-}
-// CountingWriter is an implementation of io.Writer that tracks the number
-// of bytes written (but discards the actual bytes).
-type CountingWriter struct {
- N int
-}
-
-func (cw *CountingWriter) Write(b []byte) (int, error) {
- blen := len(b)
- cw.N += blen
- return blen, nil
-}
-
-// EncodeLen returns len(Encode(obj))
-func EncodeLen(obj interface{}) int {
- var cw CountingWriter
- EncodeStream(&cw, obj)
- return cw.N
+ // Use fmt instead of logging to avoid import loops;
+ // the expectation is that this should never happen.
+ fmt.Fprintf(os.Stderr, "Encoding %T using go-codec; stray embedded field?\n", obj)
+ return EncodeReflect(obj)
}
// EncodeStream is like Encode but writes to an io.Writer instead.
@@ -190,11 +175,15 @@ func DecodeMsgp(b []byte, objptr msgp.Unmarshaler) (err error) {
// Decode attempts to decode a msgpack-encoded byte buffer
// into an object instance pointed to by objptr.
-func Decode(b []byte, objptr interface{}) error {
- msgp, ok := objptr.(msgp.Unmarshaler)
- if ok && msgp.CanUnmarshalMsg(msgp) {
- return DecodeMsgp(b, msgp)
+func Decode(b []byte, objptr msgp.Unmarshaler) error {
+ if objptr.CanUnmarshalMsg(objptr) {
+ return DecodeMsgp(b, objptr)
}
+
+ // Use fmt instead of logging to avoid import loops;
+ // the expectation is that this should never happen.
+ fmt.Fprintf(os.Stderr, "Decoding %T using go-codec; stray embedded field?\n", objptr)
+
return DecodeReflect(b, objptr)
}
diff --git a/protocol/codec_test.go b/protocol/codec_test.go
index 4058f95212..d8c7bc4642 100644
--- a/protocol/codec_test.go
+++ b/protocol/codec_test.go
@@ -53,7 +53,7 @@ type HelperStruct2 struct {
func TestOmitEmpty(t *testing.T) {
var x TestStruct
- enc := Encode(&x)
+ enc := EncodeReflect(&x)
require.Equal(t, 1, len(enc))
}
@@ -72,7 +72,7 @@ func TestEncodeOrder(t *testing.T) {
b.A = 1
b.B = "foo"
- require.Equal(t, Encode(&a), Encode(&b))
+ require.Equal(t, EncodeReflect(&a), EncodeReflect(&b))
var c struct {
A int `codec:"x"`
@@ -102,8 +102,8 @@ func TestEncodeOrder(t *testing.T) {
e.R = 1
e.Q = "foo"
- require.Equal(t, Encode(&c), Encode(&d))
- require.Equal(t, Encode(&c), Encode(&e))
+ require.Equal(t, EncodeReflect(&c), EncodeReflect(&d))
+ require.Equal(t, EncodeReflect(&c), EncodeReflect(&e))
}
type InlineChild struct {
@@ -118,5 +118,26 @@ func TestEncodeInline(t *testing.T) {
a := InlineChild{X: 5}
b := InlineParent{InlineChild: a}
- require.Equal(t, Encode(a), Encode(b))
+ require.Equal(t, EncodeReflect(a), EncodeReflect(b))
+}
+
+type embeddedMsgp struct {
+ TxType
+ A uint64
+}
+
+func TestEncodeEmbedded(t *testing.T) {
+ var x embeddedMsgp
+
+ x.TxType = PaymentTx
+ x.A = 5
+
+ require.Equal(t, Encode(x), Encode(&x))
+ require.Equal(t, Encode(x.TxType), Encode(&x.TxType))
+ require.NotEqual(t, Encode(&x), Encode(&x.TxType))
+
+ var y embeddedMsgp
+
+ require.NoError(t, Decode(Encode(&x), &y))
+ require.Equal(t, x, y)
}
diff --git a/protocol/consensus.go b/protocol/consensus.go
index 38d9a3da2d..8b8d26467a 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -113,6 +113,11 @@ const ConsensusV20 = ConsensusVersion(
"https://github.com/algorandfoundation/specs/tree/4a9db6a25595c6fd097cf9cc137cc83027787eaa",
)
+// ConsensusV21 fixes a bug in credential.lowestOutput
+const ConsensusV21 = ConsensusVersion(
+ "https://github.com/algorandfoundation/specs/tree/8096e2df2da75c3339986317f9abe69d4fa86b4b",
+)
+
// ConsensusFuture is a protocol that should not appear in any production
// network, but is used to test features before they are released.
const ConsensusFuture = ConsensusVersion(
@@ -125,46 +130,7 @@ const ConsensusFuture = ConsensusVersion(
// ConsensusCurrentVersion is the latest version and should be used
// when a specific version is not provided.
-const ConsensusCurrentVersion = ConsensusV20
-
-// ConsensusTest0 is a version of ConsensusV0 used for testing
-// (it has different approved upgrade paths).
-const ConsensusTest0 = ConsensusVersion("test0")
-
-// ConsensusTest1 is an extension of ConsensusTest0 that
-// supports a sorted-list balance commitment.
-const ConsensusTest1 = ConsensusVersion("test1")
-
-// ConsensusTestBigBlocks is a version of ConsensusV0 used for testing
-// with big block size (large MaxTxnBytesPerBlock).
-// at the time versioning was introduced.
-const ConsensusTestBigBlocks = ConsensusVersion("test-big-blocks")
-
-// ConsensusTestRapidRewardRecalculation is a version of ConsensusCurrentVersion
-// that decreases the RewardRecalculationInterval greatly.
-const ConsensusTestRapidRewardRecalculation = ConsensusVersion("test-fast-reward-recalculation")
-
-// ConsensusTestShorterLookback is a version of ConsensusCurrentVersion
-// that decreases the MaxBalLookback greatly.
-const ConsensusTestShorterLookback = ConsensusVersion("test-shorter-lookback")
-
-// ConsensusTestUnupgradedProtocol is a version of ConsensusCurrentVersion
-// that allows the control of the upgrade from ConsensusTestUnupgradedProtocol to
-// ConsensusTestUnupgradedProtocol
-const ConsensusTestUnupgradedProtocol = ConsensusVersion("test-unupgraded-protocol")
-
-// ConsensusTestUnupgradedToProtocol is a version of ConsensusCurrentVersion
-// It is used as an upgrade from ConsensusTestUnupgradedProtocol
-const ConsensusTestUnupgradedToProtocol = ConsensusVersion("test-unupgradedto-protocol")
-
-
-// ConsensusTestFastUpgrade is meant for testing of protocol upgrades:
-// during testing, it is equivalent to another protocol with the exception
-// of the upgrade parameters, which allow for upgrades to take place after
-// only a few rounds.
-func ConsensusTestFastUpgrade(proto ConsensusVersion) ConsensusVersion {
- return "test-fast-upgrade-" + proto
-}
+const ConsensusCurrentVersion = ConsensusV21
// Error is used to indicate that an unsupported protocol has been detected.
type Error ConsensusVersion
diff --git a/protocol/encodebench_test.go b/protocol/encodebench_test.go
index 06b542b86b..5eff5235b5 100644
--- a/protocol/encodebench_test.go
+++ b/protocol/encodebench_test.go
@@ -40,12 +40,6 @@ func BenchmarkCodecEncoder(b *testing.B) {
}
})
- b.Run("NilLen", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- EncodeLen(nil)
- }
- })
-
b.Run("NilReset", func(b *testing.B) {
enc := codec.NewEncoderBytes(nil, CodecHandle)
for i := 0; i < b.N; i++ {
@@ -64,41 +58,9 @@ func BenchmarkCodecEncoder(b *testing.B) {
}
})
- b.Run("NilCount", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var n CountingWriter
- enc := codec.NewEncoder(&n, CodecHandle)
- enc.MustEncode(nil)
- }
- })
-
- b.Run("NilCountReset", func(b *testing.B) {
- enc := codec.NewEncoder(nil, CodecHandle)
- var n CountingWriter
- for i := 0; i < b.N; i++ {
- enc.Reset(&n)
- enc.MustEncode(nil)
- }
- })
-
b.Run("Encode", func(b *testing.B) {
for i := 0; i < b.N; i++ {
- Encode(s)
- }
- })
-
- b.Run("EncodeStream", func(b *testing.B) {
- var n CountingWriter
- for i := 0; i < b.N; i++ {
- EncodeStream(&n, s)
- }
- })
-
- b.Run("EncodeStreamReuse", func(b *testing.B) {
- var n CountingWriter
- enc := codec.NewEncoder(&n, CodecHandle)
- for i := 0; i < b.N; i++ {
- enc.MustEncode(s)
+ EncodeReflect(s)
}
})
}
diff --git a/protocol/msgp_gen.go b/protocol/msgp_gen.go
index 55f3a4aede..2961c4fc72 100644
--- a/protocol/msgp_gen.go
+++ b/protocol/msgp_gen.go
@@ -15,6 +15,9 @@ func (z ConsensusVersion) MarshalMsg(b []byte) (o []byte, err error) {
func (_ ConsensusVersion) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(ConsensusVersion)
+ if !ok {
+ _, ok = (z).(*ConsensusVersion)
+ }
return ok
}
@@ -58,6 +61,9 @@ func (z Error) MarshalMsg(b []byte) (o []byte, err error) {
func (_ Error) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(Error)
+ if !ok {
+ _, ok = (z).(*Error)
+ }
return ok
}
@@ -101,6 +107,9 @@ func (z HashID) MarshalMsg(b []byte) (o []byte, err error) {
func (_ HashID) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(HashID)
+ if !ok {
+ _, ok = (z).(*HashID)
+ }
return ok
}
@@ -144,6 +153,9 @@ func (z NetworkID) MarshalMsg(b []byte) (o []byte, err error) {
func (_ NetworkID) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(NetworkID)
+ if !ok {
+ _, ok = (z).(*NetworkID)
+ }
return ok
}
@@ -187,6 +199,9 @@ func (z Tag) MarshalMsg(b []byte) (o []byte, err error) {
func (_ Tag) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(Tag)
+ if !ok {
+ _, ok = (z).(*Tag)
+ }
return ok
}
@@ -230,6 +245,9 @@ func (z TxType) MarshalMsg(b []byte) (o []byte, err error) {
func (_ TxType) CanMarshalMsg(z interface{}) bool {
_, ok := (z).(TxType)
+ if !ok {
+ _, ok = (z).(*TxType)
+ }
return ok
}
diff --git a/protocol/tags.go b/protocol/tags.go
index 04ceee02aa..9fff2316a5 100644
--- a/protocol/tags.go
+++ b/protocol/tags.go
@@ -24,11 +24,13 @@ type Tag string
const (
UnknownMsgTag Tag = "??"
AgreementVoteTag Tag = "AV"
- MsgSkipTag Tag = "MS"
+ MsgDigestSkipTag Tag = "MS"
NetPrioResponseTag Tag = "NP"
PingTag Tag = "pi"
PingReplyTag Tag = "pj"
ProposalPayloadTag Tag = "PP"
+ TopicMsgRespTag Tag = "TS"
+ MsgOfInterestTag Tag = "MI"
TxnTag Tag = "TX"
UniCatchupReqTag Tag = "UC"
UniEnsBlockReqTag Tag = "UE"
diff --git a/protocol/transcode/core_test.go b/protocol/transcode/core_test.go
index 17ef93b63b..31e177c397 100644
--- a/protocol/transcode/core_test.go
+++ b/protocol/transcode/core_test.go
@@ -171,7 +171,7 @@ func TestIdempotence(t *testing.T) {
for i := 0; i < niter; i++ {
o := randomMap(6, 3)
- testIdempotentRoundtrip(t, protocol.Encode(o))
+ testIdempotentRoundtrip(t, protocol.EncodeReflect(o))
}
}
@@ -185,7 +185,7 @@ func TestIdempotenceMultiobject(t *testing.T) {
nobj := crypto.RandUint64() % 8
buf := []byte{}
for j := 0; j < int(nobj); j++ {
- buf = append(buf, protocol.Encode(randomMap(6, 3))...)
+ buf = append(buf, protocol.EncodeReflect(randomMap(6, 3))...)
}
testIdempotentRoundtrip(t, buf)
}
@@ -240,6 +240,6 @@ func TestIdempotenceStruct(t *testing.T) {
p.M[fmt.Sprintf("K%dK", crypto.RandUint64())] = fmt.Sprintf("V%dV", crypto.RandUint64())
}
- testIdempotentRoundtrip(t, protocol.Encode(p))
+ testIdempotentRoundtrip(t, protocol.EncodeReflect(&p))
}
}
diff --git a/release/release-banner.jpg b/release/release-banner.jpg
new file mode 100644
index 0000000000..77c901d75a
Binary files /dev/null and b/release/release-banner.jpg differ
diff --git a/rpcs/fetcher_test.go b/rpcs/fetcher_test.go
deleted file mode 100644
index 67766eb646..0000000000
--- a/rpcs/fetcher_test.go
+++ /dev/null
@@ -1,413 +0,0 @@
-// Copyright (C) 2019-2020 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package rpcs
-
-import (
- "context"
- "errors"
- "net/http"
- "net/rpc"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
- "github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util/bloom"
-)
-
-type MockRunner struct {
- ran bool
- done chan *rpc.Call
- failWithNil bool
- failWithError bool
- txgroups [][]transactions.SignedTxn
-}
-
-type MockRPCClient struct {
- client *MockRunner
- closed bool
- rootURL string
- log logging.Logger
-}
-
-func (client *MockRPCClient) Close() error {
- client.closed = true
- return nil
-}
-
-func (client *MockRPCClient) Address() string {
- return "mock.address."
-}
-func (client *MockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups [][]transactions.SignedTxn, err error) {
- client.log.Info("MockRPCClient.Sync")
- select {
- case <-ctx.Done():
- return nil, errors.New("cancelled")
- default:
- }
- if client.client.failWithNil {
- return nil, errors.New("old failWithNil")
- }
- if client.client.failWithError {
- return nil, errors.New("failing call")
- }
- return client.client.txgroups, nil
-}
-func (client *MockRPCClient) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
- return nil, nil
-}
-
-// network.HTTPPeer interface
-func (client *MockRPCClient) GetAddress() string {
- return client.rootURL
-}
-func (client *MockRPCClient) GetHTTPClient() *http.Client {
- return nil
-}
-func (client *MockRPCClient) PrepareURL(x string) string {
- return strings.Replace(x, "{genesisID}", "test genesisID", -1)
-}
-
-type MockClientAggregator struct {
- peers []network.Peer
- Registrar
-}
-
-func (mca *MockClientAggregator) GetPeers(options ...network.PeerOption) []network.Peer {
- return mca.peers
-}
-
-const numberOfPeers = 10
-
-func makeMockClientAggregator(t *testing.T, failWithNil bool, failWithError bool) *MockClientAggregator {
- clients := make([]network.Peer, 0)
- for i := 0; i < numberOfPeers; i++ {
- runner := MockRunner{failWithNil: failWithNil, failWithError: failWithError, done: make(chan *rpc.Call)}
- clients = append(clients, &MockRPCClient{client: &runner, log: logging.TestingLog(t)})
- }
- t.Logf("len(mca.clients) = %d", len(clients))
- return &MockClientAggregator{peers: clients}
-}
-
-func getAllClientsSelectedForRound(t *testing.T, fetcher *NetworkFetcher, round basics.Round) map[FetcherClient]basics.Round {
- selected := make(map[FetcherClient]basics.Round, 0)
- for i := 0; i < 1000; i++ {
- c, err := fetcher.selectClient(round)
- if err != nil {
- return selected
- }
- selected[c.(FetcherClient)] = fetcher.roundUpperBound[c]
- }
- return selected
-}
-
-func TestSelectValidRemote(t *testing.T) {
- network := makeMockClientAggregator(t, false, false)
- factory := MakeNetworkFetcherFactory(network, numberOfPeers, nil)
- factory.log = logging.TestingLog(t)
- fetcher := factory.New()
- require.Equal(t, numberOfPeers, len(fetcher.(*NetworkFetcher).peers))
-
- var oldClient FetcherClient
- var newClient FetcherClient
- i := 0
- for _, client := range fetcher.(*NetworkFetcher).peers {
- if i == 0 {
- oldClient = client
- r := basics.Round(2)
- fetcher.(*NetworkFetcher).roundUpperBound[client] = r
- } else if i == 1 {
- newClient = client
- r := basics.Round(4)
- fetcher.(*NetworkFetcher).roundUpperBound[client] = r
- } else if i > 2 {
- r := basics.Round(3)
- fetcher.(*NetworkFetcher).roundUpperBound[client] = r
- } // skip i == 2
- i++
- }
-
- require.Equal(t, numberOfPeers, len(fetcher.(*NetworkFetcher).availablePeers(1)))
- selected := getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 1)
- require.Equal(t, numberOfPeers, len(selected))
- _, hasOld := selected[oldClient]
- require.True(t, hasOld)
-
- _, hasNew := selected[newClient]
- require.True(t, hasNew)
-
- require.Equal(t, numberOfPeers-1, len(fetcher.(*NetworkFetcher).availablePeers(2)))
- selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 2)
- require.Equal(t, numberOfPeers-1, len(selected))
- _, hasOld = selected[oldClient]
- require.False(t, hasOld)
- _, hasNew = selected[newClient]
- require.True(t, hasNew)
-
- require.Equal(t, 2, len(fetcher.(*NetworkFetcher).availablePeers(3)))
- selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 3)
- require.Equal(t, 2, len(selected))
- _, hasOld = selected[oldClient]
- require.False(t, hasOld)
- _, hasNew = selected[newClient]
- require.True(t, hasNew)
-
- require.Equal(t, 1, len(fetcher.(*NetworkFetcher).availablePeers(4)))
- selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 4)
- require.Equal(t, 1, len(selected))
- _, hasOld = selected[oldClient]
- require.False(t, hasOld)
- _, hasNew = selected[newClient]
- require.False(t, hasNew)
-}
-
-type dummyFetcher struct {
- failWithNil bool
- failWithError bool
- fetchTimeout time.Duration
-}
-
-// FetcherClient interface
-func (df *dummyFetcher) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
- if df.failWithNil {
- return nil, nil
- }
- if df.failWithError {
- return nil, errors.New("failing call")
- }
-
- timer := time.NewTimer(df.fetchTimeout)
- defer timer.Stop()
-
- // Fill in the dummy response with the correct round
- dummyBlock := EncodedBlockCert{
- Block: bookkeeping.Block{
- BlockHeader: bookkeeping.BlockHeader{
- Round: r,
- },
- },
- Certificate: agreement.Certificate{
- Round: r,
- },
- }
-
- encodedData := protocol.Encode(dummyBlock)
-
- select {
- case <-timer.C:
- case <-ctx.Done():
- return nil, ctx.Err()
- }
-
- return encodedData, nil
-}
-
-// FetcherClient interface
-func (df *dummyFetcher) Address() string {
- //logging.Base().Debug("dummyFetcher Address")
- return "dummyFetcher address"
-}
-
-// FetcherClient interface
-func (df *dummyFetcher) Close() error {
- //logging.Base().Debug("dummyFetcher Close")
- return nil
-}
-
-func makeDummyFetchers(failWithNil bool, failWithError bool, timeout time.Duration) []FetcherClient {
- out := make([]FetcherClient, numberOfPeers)
- for i := range out {
- out[i] = &dummyFetcher{failWithNil, failWithError, timeout}
- }
- return out
-}
-
-func TestFetchBlock(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 100*time.Millisecond),
- log: logging.TestingLog(t),
- }
-
- var err error
- var block *bookkeeping.Block
- var cert *agreement.Certificate
- var client FetcherClient
-
- fetched := false
- for i := 0; i < numberOfPeers; i++ {
- start := time.Now()
- block, cert, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NoError(t, err)
- require.NotNil(t, client)
- end := time.Now()
- require.True(t, end.Sub(start) > 100*time.Millisecond)
- require.True(t, end.Sub(start) < 100*time.Millisecond+5*time.Second) // we want to have a higher margin here, as the machine we're running on might be slow.
- if err == nil {
- require.NotEqual(t, nil, block)
- require.NotEqual(t, nil, cert)
- _, _, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NotNil(t, client)
- require.NoError(t, err)
- fetched = true
- }
- }
- require.True(t, fetched)
-}
-
-func TestFetchBlockFail(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(true, false, 100*time.Millisecond),
- log: logging.TestingLog(t),
- }
-
- for i := 0; i < numberOfPeers; i++ {
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- _, _, _, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.Error(t, err)
- }
- require.True(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
-}
-
-func TestFetchBlockAborted(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 2*time.Second),
- log: logging.TestingLog(t),
- }
-
- ctx, cf := context.WithCancel(context.Background())
- defer cf()
- go func() {
- cf()
- }()
- start := time.Now()
- _, _, client, err := fetcher.FetchBlock(ctx, basics.Round(1))
- end := time.Now()
- require.True(t, strings.Contains(err.Error(), context.Canceled.Error()))
- require.Nil(t, client)
- require.True(t, end.Sub(start) < 10*time.Second)
-}
-
-func TestFetchBlockTimeout(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 10*time.Second),
- log: logging.TestingLog(t),
- }
- start := time.Now()
- ctx, cf := context.WithTimeout(context.Background(), 500*time.Millisecond)
- defer cf()
- _, _, client, err := fetcher.FetchBlock(ctx, basics.Round(1))
- end := time.Now()
- require.True(t, strings.Contains(err.Error(), context.DeadlineExceeded.Error()))
- require.Nil(t, client)
- require.True(t, end.Sub(start) >= 500*time.Millisecond)
- require.True(t, end.Sub(start) < 10*time.Second)
-}
-
-func TestFetchBlockErrorCall(t *testing.T) {
- fetcher := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, true, 10*time.Millisecond),
- log: logging.TestingLog(t),
- }
-
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- _, _, client, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.Error(t, err)
- require.Nil(t, client)
-}
-
-func TestFetchBlockComposedNoOp(t *testing.T) {
- f := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 1*time.Millisecond),
- log: logging.TestingLog(t),
- }
- fetcher := &ComposedFetcher{fetchers: []Fetcher{f, nil}}
-
- var err error
- var block *bookkeeping.Block
- var cert *agreement.Certificate
- var client FetcherClient
-
- fetched := false
- for i := 0; i < numberOfPeers; i++ {
- start := time.Now()
- block, cert, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NoError(t, err)
- require.NotNil(t, client)
- end := time.Now()
- require.True(t, end.Sub(start) >= 1*time.Millisecond)
- require.True(t, end.Sub(start) < 1*time.Millisecond+10*time.Second) // we take a very high margin here for the fetcher to complete.
- if err == nil {
- require.NotEqual(t, nil, block)
- require.NotEqual(t, nil, cert)
- _, _, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NotNil(t, client)
- require.NoError(t, err)
- fetched = true
- }
- }
- require.True(t, fetched)
-}
-
-// Make sure composed fetchers are hit in priority order
-func TestFetchBlockComposedFail(t *testing.T) {
- f := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(true, false, 1*time.Millisecond),
- log: logging.TestingLog(t),
- }
- f2 := &NetworkFetcher{
- roundUpperBound: make(map[FetcherClient]basics.Round),
- activeFetches: make(map[FetcherClient]int),
- peers: makeDummyFetchers(false, false, 1*time.Millisecond),
- log: logging.TestingLog(t),
- }
- fetcher := &ComposedFetcher{fetchers: []Fetcher{f, f2}}
-
- for i := 0; i < numberOfPeers; i++ {
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- _, _, _, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.Error(t, err)
- }
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- for i := 0; i < numberOfPeers; i++ {
- require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers)))
- _, _, client, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers))
- require.NotNil(t, client)
- require.NoError(t, err)
- }
-}
diff --git a/rpcs/httpTxSync.go b/rpcs/httpTxSync.go
index 75a6cc0389..ded2d0dd7c 100644
--- a/rpcs/httpTxSync.go
+++ b/rpcs/httpTxSync.go
@@ -39,7 +39,7 @@ import (
type HTTPTxSync struct {
rootURL string
- peers PeerSource
+ peers network.GossipNode
log logging.Logger
@@ -48,7 +48,9 @@ type HTTPTxSync struct {
const requestContentType = "application/x-www-form-urlencoded"
-func responseBytes(response *http.Response, log logging.Logger, limit uint64) (data []byte, err error) {
+// ResponseBytes reads the content of the response object and return the body content
+// while obeying the read size limits
+func ResponseBytes(response *http.Response, log logging.Logger, limit uint64) (data []byte, err error) {
// response.Body is always non-nil
defer response.Body.Close()
if response.ContentLength >= 0 {
@@ -72,7 +74,7 @@ func responseBytes(response *http.Response, log logging.Logger, limit uint64) (d
}
// create a new http sync object.
-func makeHTTPSync(peerSource PeerSource, log logging.Logger, serverResponseSize uint64) *HTTPTxSync {
+func makeHTTPSync(peerSource network.GossipNode, log logging.Logger, serverResponseSize uint64) *HTTPTxSync {
const transactionArrayEncodingOverhead = uint64(16) // manual tests shown that the actual extra packing cost is typically 3 bytes. We'll take 16 byte to ensure we're on the safe side.
return &HTTPTxSync{
peers: peerSource,
@@ -103,7 +105,8 @@ func (hts *HTTPTxSync) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups
hts.rootURL = hpeer.GetAddress()
client := hpeer.GetHTTPClient()
if client == nil {
- client = http.DefaultClient
+ client = &http.Client{}
+ client.Transport = hts.peers.GetRoundTripper()
}
parsedURL, err := network.ParseHostOrURL(hts.rootURL)
if err != nil {
@@ -158,7 +161,7 @@ func (hts *HTTPTxSync) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups
return nil, fmt.Errorf("txSync POST invalid content type '%s'", contentTypes[0])
}
- data, err := responseBytes(response, hts.log, hts.maxTxSyncResponseBytes)
+ data, err := ResponseBytes(response, hts.log, hts.maxTxSyncResponseBytes)
if err != nil {
hts.log.Warn("txSync body read failed: ", err)
return nil, err
@@ -166,7 +169,7 @@ func (hts *HTTPTxSync) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups
hts.log.Debugf("http sync got %d bytes", len(data))
var txns []transactions.SignedTxn
- err = protocol.Decode(data, &txns)
+ err = protocol.DecodeReflect(data, &txns)
if err != nil {
hts.log.Warn("txSync protocol decode: ", err)
}
diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go
index 6237b15eb1..62f6db95d3 100644
--- a/rpcs/ledgerService.go
+++ b/rpcs/ledgerService.go
@@ -18,6 +18,7 @@ package rpcs
import (
"context"
+ "encoding/binary"
"net/http"
"strconv"
@@ -53,12 +54,15 @@ type LedgerService struct {
// EncodedBlockCert defines how GetBlockBytes encodes a block and its certificate
type EncodedBlockCert struct {
+ _struct struct{} `codec:""`
+
Block bookkeeping.Block `codec:"block"`
Certificate agreement.Certificate `codec:"cert"`
}
// PreEncodedBlockCert defines how GetBlockBytes encodes a block and its certificate,
// using a pre-encoded Block and Certificate in msgpack format.
+//msgp:ignore PreEncodedBlockCert
type PreEncodedBlockCert struct {
Block codec.Raw `codec:"block"`
Certificate codec.Raw `codec:"cert"`
@@ -223,39 +227,114 @@ func (ls *LedgerService) ListenForCatchupReq(reqs <-chan network.IncomingMessage
}
}
+const noRoundNumberErrMsg = "can't find the round number"
+const noDataTypeErrMsg = "can't find the data-type"
+const roundNumberParseErrMsg = "unable to parse round number"
+const blockNotAvailabeErrMsg = "requested block is not available"
+const datatypeUnsupportedErrMsg = "requested data type is unsupported"
+
// a blocking function for handling a catchup request
func (ls *LedgerService) handleCatchupReq(ctx context.Context, reqMsg network.IncomingMessage) {
var res WsGetBlockOut
+ target := reqMsg.Sender.(network.UnicastPeer)
+ var respTopics network.Topics
+
+ if target.Version() == "1" {
+
+ defer func() {
+ ls.sendCatchupRes(ctx, target, reqMsg.Tag, res)
+ }()
+ var req WsGetBlockRequest
+ err := protocol.DecodeReflect(reqMsg.Data, &req)
+ if err != nil {
+ res.Error = err.Error()
+ return
+ }
+ res.Round = req.Round
+ encodedBlob, err := RawBlockBytes(ls.ledger, basics.Round(req.Round))
+
+ if err != nil {
+ res.Error = err.Error()
+ return
+ }
+ res.BlockBytes = encodedBlob
+ return
+ }
+ // Else, if version == 2.1
defer func() {
- ls.sendCatchupRes(ctx, reqMsg.Sender.(network.UnicastPeer), reqMsg.Tag, res)
+ target.Respond(ctx, reqMsg, respTopics)
}()
- var req WsGetBlockRequest
- err := protocol.Decode(reqMsg.Data, &req)
+ topics, err := network.UnmarshallTopics(reqMsg.Data)
if err != nil {
- res.Error = err.Error()
+ logging.Base().Infof("LedgerService handleCatchupReq: %s", err.Error())
+ respTopics = network.Topics{
+ network.MakeTopic(network.ErrorKey, []byte(err.Error()))}
+ return
+ }
+ roundBytes, found := topics.GetValue(roundKey)
+ if !found {
+ logging.Base().Infof("LedgerService handleCatchupReq: %s", noRoundNumberErrMsg)
+ respTopics = network.Topics{
+ network.MakeTopic(network.ErrorKey,
+ []byte(noRoundNumberErrMsg))}
+ return
+ }
+ requestType, found := topics.GetValue(requestDataTypeKey)
+ if !found {
+ logging.Base().Infof("LedgerService handleCatchupReq: %s", noDataTypeErrMsg)
+ respTopics = network.Topics{
+ network.MakeTopic(network.ErrorKey,
+ []byte(noDataTypeErrMsg))}
return
}
- res.Round = req.Round
- encodedBlob, err := RawBlockBytes(ls.ledger, basics.Round(req.Round))
- if err != nil {
- res.Error = err.Error()
+ round, read := binary.Uvarint(roundBytes)
+ if read <= 0 {
+ logging.Base().Infof("LedgerService handleCatchupReq: %s", roundNumberParseErrMsg)
+ respTopics = network.Topics{
+ network.MakeTopic(network.ErrorKey,
+ []byte(roundNumberParseErrMsg))}
return
}
- res.BlockBytes = encodedBlob
+ respTopics = topicBlockBytes(ls.ledger, basics.Round(round), string(requestType))
return
}
func (ls *LedgerService) sendCatchupRes(ctx context.Context, target network.UnicastPeer, reqTag protocol.Tag, outMsg WsGetBlockOut) {
t := reqTag.Complement()
logging.Base().Infof("catching down peer: %v, round %v. outcome: %v. ledger: %v", target.GetAddress(), outMsg.Round, outMsg.Error, ls.ledger.LastRound())
- err := target.Unicast(ctx, protocol.Encode(outMsg), t)
+ err := target.Unicast(ctx, protocol.EncodeReflect(outMsg), t)
if err != nil {
logging.Base().Info("failed to respond to catchup req", err)
}
}
+func topicBlockBytes(dataLedger *data.Ledger, round basics.Round, requestType string) network.Topics {
+ blk, cert, err := dataLedger.EncodedBlockCert(round)
+ if err != nil {
+ switch err.(type) {
+ case ledger.ErrNoEntry:
+ default:
+ logging.Base().Infof("LedgerService topicBlockBytes: %s", err)
+ }
+ return network.Topics{
+ network.MakeTopic(network.ErrorKey, []byte(blockNotAvailabeErrMsg))}
+ }
+ switch requestType {
+ case blockAndCertValue:
+ return network.Topics{
+ network.MakeTopic(
+ blockDataKey, blk),
+ network.MakeTopic(
+ certDataKey, cert),
+ }
+ default:
+ return network.Topics{
+ network.MakeTopic(network.ErrorKey, []byte(datatypeUnsupportedErrMsg))}
+ }
+}
+
// RawBlockBytes return the msgpack bytes for a block
func RawBlockBytes(ledger *data.Ledger, round basics.Round) ([]byte, error) {
blk, cert, err := ledger.EncodedBlockCert(round)
@@ -263,7 +342,7 @@ func RawBlockBytes(ledger *data.Ledger, round basics.Round) ([]byte, error) {
return nil, err
}
- return protocol.Encode(PreEncodedBlockCert{
+ return protocol.EncodeReflect(PreEncodedBlockCert{
Block: blk,
Certificate: cert,
}), nil
diff --git a/rpcs/ledgerService_test.go b/rpcs/ledgerService_test.go
index c23d71863e..073691ad14 100644
--- a/rpcs/ledgerService_test.go
+++ b/rpcs/ledgerService_test.go
@@ -18,462 +18,84 @@ package rpcs
import (
"context"
- "net"
- "net/http"
- "net/url"
- "strings"
"testing"
- "time"
- "github.com/gorilla/mux"
"github.com/stretchr/testify/require"
- "github.com/algorand/go-algorand/agreement"
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
)
-const defaultRewardUnit = 1e6
-
-var sinkAddr = basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21}
-var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
-
-type httpTestPeerSource struct {
- peers []network.Peer
- Registrar
-}
-
-func (s *httpTestPeerSource) GetPeers(options ...network.PeerOption) []network.Peer {
- return s.peers
-}
-
-// implement network.HTTPPeer
-type testHTTPPeer struct {
- rootURL string
- client http.Client
-}
-
-func (p *testHTTPPeer) GetAddress() string {
- return p.rootURL
-}
-func (p *testHTTPPeer) PrepareURL(x string) string {
- return strings.Replace(x, "{genesisID}", "test genesisID", -1)
-}
-func (p *testHTTPPeer) GetHTTPClient() *http.Client {
- return &p.client
-}
-func (p *testHTTPPeer) GetHTTPPeer() network.HTTPPeer {
- return p
+type mockUnicastPeer struct {
+ responseTopics network.Topics
}
-func buildTestHTTPPeerSource(rootURL string) PeerSource {
- peer := testHTTPPeer{rootURL: rootURL}
- var wat network.HTTPPeer
- wat = &peer
- logging.Base().Infof("wat %#v", wat)
- return &httpTestPeerSource{peers: []network.Peer{&peer}}
+func (mup *mockUnicastPeer) GetAddress() string {
+ return ""
}
-
-// Build a ledger with genesis and one block, start an HTTPServer around it, use NetworkFetcher to fetch the block.
-// For smaller test, nee ledgerService_test.go TestGetBlockHTTP
-func TestGetBlockHTTP(t *testing.T) {
- // start server
- ledger, next, b, err := buildTestLedger(t)
- if err != nil {
- t.Fatal(err)
- return
- }
- ls := LedgerService{ledger: ledger, genesisID: "test genesisID"}
- nodeA := BasicRPCNode{}
- nodeA.RegisterHTTPHandler(LedgerServiceBlockPath, &ls)
- nodeA.start()
- defer nodeA.stop()
- rootURL := nodeA.rootURL()
-
- // run fetcher
- net := buildTestHTTPPeerSource(rootURL)
- _, ok := net.GetPeers(network.PeersConnectedOut)[0].(network.HTTPPeer)
- require.True(t, ok)
- factory := MakeNetworkFetcherFactory(net, numberOfPeers, nil)
- factory.log = logging.TestingLog(t)
- fetcher := factory.New()
- // we have one peer, the HTTP block server
- require.Equal(t, len(fetcher.(*NetworkFetcher).peers), 1)
-
- var block *bookkeeping.Block
- var cert *agreement.Certificate
- var client FetcherClient
-
- start := time.Now()
- block, cert, client, err = fetcher.FetchBlock(context.Background(), next)
- end := time.Now()
- require.NotNil(t, client)
- require.NoError(t, err)
-
- require.True(t, end.Sub(start) < 10*time.Second)
- require.Equal(t, &b, block)
- if err == nil {
- require.NotEqual(t, nil, block)
- require.NotEqual(t, nil, cert)
- }
-}
-
-type testUnicastPeerSrc struct {
- peers []network.Peer
- handler network.MessageHandler
-}
-
-func (s *testUnicastPeerSrc) GetPeers(options ...network.PeerOption) []network.Peer {
- if options[0] == network.PeersConnectedIn {
- return s.peers
- }
+func (mup *mockUnicastPeer) Unicast(ctx context.Context, data []byte, tag protocol.Tag) error {
return nil
}
-
-func (s *testUnicastPeerSrc) RegisterHTTPHandler(path string, handler http.Handler) {}
-func (s *testUnicastPeerSrc) RegisterHandlers(dispatch []network.TaggedMessageHandler) {
- if dispatch[0].Tag == protocol.UniCatchupResTag {
- s.handler = dispatch[0].MessageHandler
- }
-}
-
-// implement network.UnicastPeer
-type testUnicastPeer struct {
- c chan network.IncomingMessage
- h *testUnicastPeerSrc
+func (mup *mockUnicastPeer) Version() string {
+ return "2.1"
}
-
-func (p *testUnicastPeer) GetAddress() string {
- return "test"
+func (mup *mockUnicastPeer) Request(ctx context.Context, tag network.Tag, topics network.Topics) (resp *network.Response, e error) {
+ return nil, nil
}
-
-func (p *testUnicastPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) error {
- if tag == protocol.UniCatchupReqTag { // we reuse this peer for both inbound and outbound messages
- // deliver to ledger service
- p.c <- network.IncomingMessage{Sender: p, Data: msg, Tag: tag} // fine to block when testing
- } else if tag == protocol.UniCatchupResTag {
- // this is from the ledger service
- p.h.handler.Handle(network.IncomingMessage{Sender: p, Data: msg, Tag: tag})
- }
+func (mup *mockUnicastPeer) Respond(ctx context.Context, reqMsg network.IncomingMessage, topics network.Topics) (e error) {
+ mup.responseTopics = topics
return nil
}
-func makeTestUnicastPeer(target chan network.IncomingMessage, delegate *testUnicastPeerSrc) network.UnicastPeer {
- wsp := testUnicastPeer{}
- wsp.c = target
- wsp.h = delegate
- return &wsp
-}
-
-func buildTestUnicastPeerSrc(t *testing.T, target chan network.IncomingMessage) *testUnicastPeerSrc {
- ps := new(testUnicastPeerSrc)
- up := makeTestUnicastPeer(target, ps)
- ps.peers = []network.Peer{up}
- return ps
-}
-
-// A quick GetBlock over websockets test hitting a mocked websocket server (no actual connection)
-func TestGetBlockWS(t *testing.T) {
- // start server
- ledger, next, b, err := buildTestLedger(t)
- if err != nil {
- t.Fatal(err)
- return
- }
- c := make(chan network.IncomingMessage, 50)
- ls := LedgerService{ledger: ledger, genesisID: "test genesisID", catchupReqs: c}
- ls.Start()
-
- // get ws fetcher
- net := buildTestUnicastPeerSrc(t, c)
- fs := RegisterWsFetcherService(logging.TestingLog(t), net)
-
- _, ok := net.GetPeers(network.PeersConnectedIn)[0].(network.UnicastPeer)
- require.True(t, ok)
- factory := MakeNetworkFetcherFactory(net, numberOfPeers, fs)
- factory.log = logging.TestingLog(t)
- fetcher := factory.NewOverGossip(protocol.UniCatchupReqTag)
- // we have one peer, the Ws block server
- require.Equal(t, fetcher.NumPeers(), 1)
-
- var block *bookkeeping.Block
- var cert *agreement.Certificate
- var client FetcherClient
-
- start := time.Now()
- block, cert, client, err = fetcher.FetchBlock(context.Background(), next)
- require.NotNil(t, client)
- require.NoError(t, err)
- end := time.Now()
- require.True(t, end.Sub(start) < 10*time.Second)
- require.Equal(t, &b, block)
- if err == nil {
- require.NotEqual(t, nil, block)
- require.NotEqual(t, nil, cert)
- }
- fetcher.Close()
-}
-
-type BasicRPCNode struct {
- listener net.Listener
- server http.Server
- rmux *mux.Router
- peers []network.Peer
-}
-
-func (b *BasicRPCNode) RegisterHTTPHandler(path string, handler http.Handler) {
- if b.rmux == nil {
- b.rmux = mux.NewRouter()
- }
- b.rmux.Handle(path, handler)
-}
-
-func (b *BasicRPCNode) RegisterHandlers(dispatch []network.TaggedMessageHandler) {
-}
-
-func (b *BasicRPCNode) start() bool {
- var err error
- b.listener, err = net.Listen("tcp", "")
- if err != nil {
- logging.Base().Error("tcp listen", err)
- return false
- }
- if b.rmux == nil {
- b.rmux = mux.NewRouter()
- }
- b.server.Handler = b.rmux
- go b.server.Serve(b.listener)
- return true
-}
-func (b *BasicRPCNode) rootURL() string {
- addr := b.listener.Addr().String()
- rootURL := url.URL{Scheme: "http", Host: addr, Path: ""}
- return rootURL.String()
-}
-
-func (b *BasicRPCNode) stop() {
- b.server.Close()
-}
-
-func (b *BasicRPCNode) GetPeers(options ...network.PeerOption) []network.Peer {
- return b.peers
-}
-
-func nodePair() (*BasicRPCNode, *BasicRPCNode) {
- nodeA := &BasicRPCNode{}
- nodeA.start()
- nodeB := &BasicRPCNode{}
- nodeB.start()
- nodeB.peers = []network.Peer{&testHTTPPeer{rootURL: nodeA.rootURL()}}
- nodeA.peers = []network.Peer{&testHTTPPeer{rootURL: nodeB.rootURL()}}
- return nodeA, nodeB
-}
-
-func TestGetBlockMocked(t *testing.T) {
- var user basics.Address
- user[0] = 123
+// TestHandleCatchupReqNegative covers the error reporting in handleCatchupReq
+func TestHandleCatchupReqNegative(t *testing.T) {
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesis := make(map[basics.Address]basics.AccountData)
- genesis[user] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ reqMsg := network.IncomingMessage{
+ Sender: &mockUnicastPeer{},
+ Data: nil, // topics
}
- genesis[sinkAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
- genesis[poolAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
+ ls := LedgerService{
+ ledger: nil,
}
- log := logging.TestingLog(t)
- // A network with two nodes, A and B
- nodeA, nodeB := nodePair()
- defer nodeA.stop()
- defer nodeB.stop()
+ // case where topics is nil
+ ls.handleCatchupReq(context.Background(), reqMsg)
+ respTopics := reqMsg.Sender.(*mockUnicastPeer).responseTopics
+ val, found := respTopics.GetValue(network.ErrorKey)
+ require.Equal(t, true, found)
+ require.Equal(t, "UnmarshallTopics: could not read the number of topics", string(val))
- // A is running the ledger service and will respond to fetch requests
- genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
- const inMem = true
- const archival = true
- ledgerA, err := data.LoadLedger(
- log.With("name", "A"), t.Name(), inMem,
- protocol.ConsensusCurrentVersion, genBal, "", crypto.Digest{},
- nil, archival,
- )
- if err != nil {
- t.Errorf("Couldn't make ledger: %v", err)
- }
- RegisterLedgerService(config.GetDefaultLocal(), ledgerA, nodeA, "test genesisID")
+ // case where round number is missing
+ reqTopics := network.Topics{}
+ reqMsg.Data = reqTopics.MarshallTopics()
+ ls.handleCatchupReq(context.Background(), reqMsg)
+ respTopics = reqMsg.Sender.(*mockUnicastPeer).responseTopics
- next := ledgerA.NextRound()
- genHash := crypto.Digest{0x42}
- tx := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: user,
- Fee: basics.MicroAlgos{Raw: proto.MinTxnFee},
- FirstValid: next,
- LastValid: next,
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: user,
- Amount: basics.MicroAlgos{Raw: 2},
- },
- }
- signedtx := transactions.SignedTxn{
- Txn: tx,
- }
+ val, found = respTopics.GetValue(network.ErrorKey)
+ require.Equal(t, true, found)
+ require.Equal(t, noRoundNumberErrMsg, string(val))
- var b bookkeeping.Block
- prev, err := ledgerA.Block(ledgerA.LastRound())
- require.NoError(t, err)
- b.RewardsLevel = prev.RewardsLevel
- b.BlockHeader.Round = next
- b.BlockHeader.GenesisHash = genHash
- b.CurrentProtocol = protocol.ConsensusCurrentVersion
- txib, err := b.EncodeSignedTxn(signedtx, transactions.ApplyData{})
- require.NoError(t, err)
- b.Payset = []transactions.SignedTxnInBlock{
- txib,
- }
- require.NoError(t, ledgerA.AddBlock(b, agreement.Certificate{Round: next}))
+ // case where data type is missing
+ roundNumberData := make([]byte, 0)
+ reqTopics = network.Topics{network.MakeTopic(roundKey, roundNumberData)}
+ reqMsg.Data = reqTopics.MarshallTopics()
+ ls.handleCatchupReq(context.Background(), reqMsg)
+ respTopics = reqMsg.Sender.(*mockUnicastPeer).responseTopics
- // B tries to fetch block
- factory := MakeNetworkFetcherFactory(nodeB, 10, nil)
- factory.log = logging.TestingLog(t)
- nodeBRPC := factory.New()
- ctx, cf := context.WithTimeout(context.Background(), time.Second)
- defer cf()
- eblock, _, _, err := nodeBRPC.FetchBlock(ctx, next)
- if err != nil {
- t.Errorf("Error fetching block: %v", err)
- }
- block, err := ledgerA.Block(next)
- if err != nil {
- panic(err)
- }
- if eblock.Hash() != block.Hash() {
- t.Errorf("FetchBlock returned wrong block: expected %v; got %v", block.Hash(), eblock)
- }
-}
-
-func TestGetFutureBlock(t *testing.T) {
- log := logging.TestingLog(t)
- // A network with two nodes, A and B
- nodeA, nodeB := nodePair()
- defer nodeA.stop()
- defer nodeB.stop()
-
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesis := make(map[basics.Address]basics.AccountData)
- genesis[sinkAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
- genesis[poolAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
-
- gen := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
- // A is running the ledger service and will respond to fetch requests
- const inMem = true
- const archival = true
- ledgerA, err := data.LoadLedger(
- log.With("name", "A"), t.Name(), inMem,
- protocol.ConsensusCurrentVersion, gen, "", crypto.Digest{},
- nil, archival,
- )
- if err != nil {
- t.Errorf("Couldn't make ledger: %v", err)
- }
- RegisterLedgerService(config.GetDefaultLocal(), ledgerA, nodeA, "test genesisID")
-
- // B tries to fetch block 4
- factory := MakeNetworkFetcherFactory(nodeB, 10, nil)
- factory.log = logging.TestingLog(t)
- nodeBRPC := factory.New()
- ctx, cf := context.WithTimeout(context.Background(), time.Second)
- defer cf()
- _, _, client, err := nodeBRPC.FetchBlock(ctx, ledgerA.NextRound())
- require.Error(t, err)
- require.Nil(t, client)
-}
-
-func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bookkeeping.Block, err error) {
- var user basics.Address
- user[0] = 123
-
- proto := config.Consensus[protocol.ConsensusCurrentVersion]
- genesis := make(map[basics.Address]basics.AccountData)
- genesis[user] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
- genesis[sinkAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
- genesis[poolAddr] = basics.AccountData{
- Status: basics.Online,
- MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2},
- }
-
- log := logging.TestingLog(t)
- genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr)
- genHash := crypto.Digest{0x42}
- const inMem = true
- const archival = true
- ledger, err = data.LoadLedger(
- log, t.Name(), inMem, protocol.ConsensusCurrentVersion, genBal, "", genHash,
- nil, archival,
- )
- if err != nil {
- t.Fatal("couldn't build ledger", err)
- return
- }
- next = ledger.NextRound()
- tx := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: user,
- Fee: basics.MicroAlgos{Raw: proto.MinTxnFee},
- FirstValid: next,
- LastValid: next,
- GenesisHash: genHash,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: user,
- Amount: basics.MicroAlgos{Raw: 2},
- },
- }
- signedtx := transactions.SignedTxn{
- Txn: tx,
- }
+ val, found = respTopics.GetValue(network.ErrorKey)
+ require.Equal(t, true, found)
+ require.Equal(t, noDataTypeErrMsg, string(val))
- prev, err := ledger.Block(ledger.LastRound())
- require.NoError(t, err)
- b.RewardsLevel = prev.RewardsLevel
- b.BlockHeader.Round = next
- b.BlockHeader.GenesisHash = genHash
- b.CurrentProtocol = protocol.ConsensusCurrentVersion
- txib, err := b.EncodeSignedTxn(signedtx, transactions.ApplyData{})
- require.NoError(t, err)
- b.Payset = []transactions.SignedTxnInBlock{
- txib,
+ // case where round number is corrupted
+ roundNumberData = make([]byte, 0)
+ reqTopics = network.Topics{network.MakeTopic(roundKey, roundNumberData),
+ network.MakeTopic(requestDataTypeKey, []byte(blockAndCertValue)),
}
+ reqMsg.Data = reqTopics.MarshallTopics()
+ ls.handleCatchupReq(context.Background(), reqMsg)
+ respTopics = reqMsg.Sender.(*mockUnicastPeer).responseTopics
- require.NoError(t, ledger.AddBlock(b, agreement.Certificate{Round: next}))
- return
+ val, found = respTopics.GetValue(network.ErrorKey)
+ require.Equal(t, true, found)
+ require.Equal(t, roundNumberParseErrMsg, string(val))
}
diff --git a/rpcs/msgp_gen.go b/rpcs/msgp_gen.go
new file mode 100644
index 0000000000..84ae94b9e3
--- /dev/null
+++ b/rpcs/msgp_gen.go
@@ -0,0 +1,126 @@
+package rpcs
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "github.com/algorand/msgp/msgp"
+)
+
+// MarshalMsg implements msgp.Marshaler
+func (z *EncodedBlockCert) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "block"
+ o = append(o, 0x82, 0xa5, 0x62, 0x6c, 0x6f, 0x63, 0x6b)
+ o, err = (*z).Block.MarshalMsg(o)
+ if err != nil {
+ err = msgp.WrapError(err, "Block")
+ return
+ }
+ // string "cert"
+ o = append(o, 0xa4, 0x63, 0x65, 0x72, 0x74)
+ o, err = (*z).Certificate.MarshalMsg(o)
+ if err != nil {
+ err = msgp.WrapError(err, "Certificate")
+ return
+ }
+ return
+}
+
+func (_ *EncodedBlockCert) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*EncodedBlockCert)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *EncodedBlockCert) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Block.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Block")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).Certificate.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Certificate")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = EncodedBlockCert{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "block":
+ bts, err = (*z).Block.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Block")
+ return
+ }
+ case "cert":
+ bts, err = (*z).Certificate.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Certificate")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *EncodedBlockCert) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*EncodedBlockCert)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *EncodedBlockCert) Msgsize() (s int) {
+ s = 1 + 6 + (*z).Block.Msgsize() + 5 + (*z).Certificate.Msgsize()
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *EncodedBlockCert) MsgIsZero() bool {
+ return ((*z).Block.MsgIsZero()) && ((*z).Certificate.MsgIsZero())
+}
diff --git a/rpcs/msgp_gen_test.go b/rpcs/msgp_gen_test.go
new file mode 100644
index 0000000000..7559695a81
--- /dev/null
+++ b/rpcs/msgp_gen_test.go
@@ -0,0 +1,72 @@
+package rpcs
+
+// Code generated by github.com/algorand/msgp DO NOT EDIT.
+
+import (
+ "testing"
+
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/msgp/msgp"
+)
+
+func TestMarshalUnmarshalEncodedBlockCert(t *testing.T) {
+ v := EncodedBlockCert{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingEncodedBlockCert(t *testing.T) {
+ protocol.RunEncodingTest(t, &EncodedBlockCert{})
+}
+
+func BenchmarkMarshalMsgEncodedBlockCert(b *testing.B) {
+ v := EncodedBlockCert{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgEncodedBlockCert(b *testing.B) {
+ v := EncodedBlockCert{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts, _ = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts, _ = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalEncodedBlockCert(b *testing.B) {
+ v := EncodedBlockCert{}
+ bts, _ := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/rpcs/txService.go b/rpcs/txService.go
index 9e1fa23388..76eae00295 100644
--- a/rpcs/txService.go
+++ b/rpcs/txService.go
@@ -130,7 +130,7 @@ func (txs *TxService) ServeHTTP(response http.ResponseWriter, request *http.Requ
return
}
txns := txs.getFilteredTxns(filter)
- txblob := protocol.Encode(txns)
+ txblob := protocol.EncodeReflect(txns)
txs.log.Debugf("sending %d txns in %d bytes", len(txns), len(txblob))
response.Header().Set("Content-Length", strconv.Itoa(len(txblob)))
response.Header().Set("Content-Type", responseContentType)
diff --git a/rpcs/txService_test.go b/rpcs/txService_test.go
index 9ae78ab5e7..4346a7619f 100644
--- a/rpcs/txService_test.go
+++ b/rpcs/txService_test.go
@@ -17,16 +17,23 @@
package rpcs
import (
+ "net"
+ "net/http"
+ "net/url"
"os"
+ "strings"
"sync"
"sync/atomic"
"testing"
"time"
+ "github.com/gorilla/mux"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/components/mocks"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/util/bloom"
)
@@ -35,6 +42,89 @@ func TestMain(m *testing.M) {
os.Exit(m.Run())
}
+type httpTestPeerSource struct {
+ peers []network.Peer
+ mocks.MockNetwork
+}
+
+func (s *httpTestPeerSource) GetPeers(options ...network.PeerOption) []network.Peer {
+ return s.peers
+}
+
+// implement network.HTTPPeer
+type testHTTPPeer string
+
+func (p testHTTPPeer) GetAddress() string {
+ return string(p)
+}
+func (p *testHTTPPeer) PrepareURL(x string) string {
+ return strings.Replace(x, "{genesisID}", "test genesisID", -1)
+}
+func (p *testHTTPPeer) GetHTTPClient() *http.Client {
+ return &http.Client{}
+}
+func (p *testHTTPPeer) GetHTTPPeer() network.HTTPPeer {
+ return p
+}
+
+type basicRPCNode struct {
+ listener net.Listener
+ server http.Server
+ rmux *mux.Router
+ peers []network.Peer
+ mocks.MockNetwork
+}
+
+func (b *basicRPCNode) RegisterHTTPHandler(path string, handler http.Handler) {
+ if b.rmux == nil {
+ b.rmux = mux.NewRouter()
+ }
+ b.rmux.Handle(path, handler)
+}
+
+func (b *basicRPCNode) RegisterHandlers(dispatch []network.TaggedMessageHandler) {
+}
+
+func (b *basicRPCNode) start() bool {
+ var err error
+ b.listener, err = net.Listen("tcp", "")
+ if err != nil {
+ logging.Base().Error("tcp listen", err)
+ return false
+ }
+ if b.rmux == nil {
+ b.rmux = mux.NewRouter()
+ }
+ b.server.Handler = b.rmux
+ go b.server.Serve(b.listener)
+ return true
+}
+func (b *basicRPCNode) rootURL() string {
+ addr := b.listener.Addr().String()
+ rootURL := url.URL{Scheme: "http", Host: addr, Path: ""}
+ return rootURL.String()
+}
+
+func (b *basicRPCNode) stop() {
+ b.server.Close()
+}
+
+func (b *basicRPCNode) GetPeers(options ...network.PeerOption) []network.Peer {
+ return b.peers
+}
+
+func nodePair() (*basicRPCNode, *basicRPCNode) {
+ nodeA := &basicRPCNode{}
+ nodeA.start()
+ nodeB := &basicRPCNode{}
+ nodeB.start()
+ httpPeerA := testHTTPPeer(nodeA.rootURL())
+ httpPeerB := testHTTPPeer(nodeB.rootURL())
+ nodeB.peers = []network.Peer{&httpPeerA}
+ nodeA.peers = []network.Peer{&httpPeerB}
+ return nodeA, nodeB
+}
+
func TestTxSync(t *testing.T) {
// A network with two nodes, A and B
nodeA, nodeB := nodePair()
diff --git a/rpcs/txSyncer.go b/rpcs/txSyncer.go
index db0acef0f0..81560fcf99 100644
--- a/rpcs/txSyncer.go
+++ b/rpcs/txSyncer.go
@@ -42,15 +42,10 @@ type TxSyncClient interface {
Close() error
}
-// PeerSource is a subset of network.GossipNode
-type PeerSource interface {
- GetPeers(options ...network.PeerOption) []network.Peer
-}
-
// TxSyncer fetches pending transactions that are missing from its pool, and feeds them to the handler
type TxSyncer struct {
pool PendingTxAggregate
- clientSource PeerSource
+ clientSource network.GossipNode
handler data.SolicitedTxHandler
ctx context.Context
cancel context.CancelFunc
@@ -63,7 +58,7 @@ type TxSyncer struct {
}
// MakeTxSyncer returns a TxSyncer
-func MakeTxSyncer(pool PendingTxAggregate, clientSource PeerSource, txHandler data.SolicitedTxHandler, syncInterval time.Duration, syncTimeout time.Duration, serverResponseSize int) *TxSyncer {
+func MakeTxSyncer(pool PendingTxAggregate, clientSource network.GossipNode, txHandler data.SolicitedTxHandler, syncInterval time.Duration, syncTimeout time.Duration, serverResponseSize int) *TxSyncer {
ctx, cancel := context.WithCancel(context.Background())
return &TxSyncer{
pool: pool,
@@ -129,7 +124,7 @@ func (syncer *TxSyncer) syncFromClient(client TxSyncClient) error {
defer cf()
txgroups, err := client.Sync(ctx, filter)
if err != nil {
- return fmt.Errorf("TxSyncer.Sync: peer %v error %v", client.Address(), err)
+ return fmt.Errorf("TxSyncer.Sync: peer '%v' error '%v'", client.Address(), err)
}
// test to see if all the transaction that we've received honor the bloom filter constraints
diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go
index 9498d51030..049e44ddf6 100644
--- a/rpcs/txSyncer_test.go
+++ b/rpcs/txSyncer_test.go
@@ -17,20 +17,27 @@
package rpcs
import (
+ "context"
+ "errors"
+ "net/http"
"net/rpc"
+ "strings"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/components/mocks"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/util/bloom"
)
type mockPendingTxAggregate struct {
@@ -85,12 +92,86 @@ func (handler *mockHandler) Handle(txgroup []transactions.SignedTxn) error {
const testSyncInterval = 5 * time.Second
const testSyncTimeout = 4 * time.Second
+type mockRunner struct {
+ ran bool
+ done chan *rpc.Call
+ failWithNil bool
+ failWithError bool
+ txgroups [][]transactions.SignedTxn
+}
+
+type mockRPCClient struct {
+ client *mockRunner
+ closed bool
+ rootURL string
+ log logging.Logger
+}
+
+func (client *mockRPCClient) Close() error {
+ client.closed = true
+ return nil
+}
+
+func (client *mockRPCClient) Address() string {
+ return "mock.address."
+}
+func (client *mockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups [][]transactions.SignedTxn, err error) {
+ client.log.Info("MockRPCClient.Sync")
+ select {
+ case <-ctx.Done():
+ return nil, errors.New("cancelled")
+ default:
+ }
+ if client.client.failWithNil {
+ return nil, errors.New("old failWithNil")
+ }
+ if client.client.failWithError {
+ return nil, errors.New("failing call")
+ }
+ return client.client.txgroups, nil
+}
+func (client *mockRPCClient) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) {
+ return nil, nil
+}
+
+// network.HTTPPeer interface
+func (client *mockRPCClient) GetAddress() string {
+ return client.rootURL
+}
+func (client *mockRPCClient) GetHTTPClient() *http.Client {
+ return nil
+}
+func (client *mockRPCClient) PrepareURL(x string) string {
+ return strings.Replace(x, "{genesisID}", "test genesisID", -1)
+}
+
+type mockClientAggregator struct {
+ mocks.MockNetwork
+ peers []network.Peer
+}
+
+func (mca *mockClientAggregator) GetPeers(options ...network.PeerOption) []network.Peer {
+ return mca.peers
+}
+
+const numberOfPeers = 10
+
+func makeMockClientAggregator(t *testing.T, failWithNil bool, failWithError bool) *mockClientAggregator {
+ clients := make([]network.Peer, 0)
+ for i := 0; i < numberOfPeers; i++ {
+ runner := mockRunner{failWithNil: failWithNil, failWithError: failWithError, done: make(chan *rpc.Call)}
+ clients = append(clients, &mockRPCClient{client: &runner, log: logging.TestingLog(t)})
+ }
+ t.Logf("len(mca.clients) = %d", len(clients))
+ return &mockClientAggregator{peers: clients}
+}
+
func TestSyncFromClient(t *testing.T) {
clientPool := makeMockPendingTxAggregate(2)
serverPool := makeMockPendingTxAggregate(1)
- runner := MockRunner{failWithNil: false, failWithError: false, txgroups: serverPool.Pending()[len(serverPool.Pending())-1:], done: make(chan *rpc.Call)}
- client := MockRPCClient{client: &runner, log: logging.TestingLog(t)}
- clientAgg := MockClientAggregator{peers: []network.Peer{&client}}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: serverPool.Pending()[len(serverPool.Pending())-1:], done: make(chan *rpc.Call)}
+ client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
+ clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncer := MakeTxSyncer(clientPool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
syncer.log = logging.TestingLog(t)
@@ -101,9 +182,9 @@ func TestSyncFromClient(t *testing.T) {
func TestSyncFromUnsupportedClient(t *testing.T) {
pool := makeMockPendingTxAggregate(3)
- runner := MockRunner{failWithNil: true, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
- client := MockRPCClient{client: &runner, log: logging.TestingLog(t)}
- clientAgg := MockClientAggregator{peers: []network.Peer{&client}}
+ runner := mockRunner{failWithNil: true, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
+ client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
+ clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
syncer.log = logging.TestingLog(t)
@@ -114,9 +195,9 @@ func TestSyncFromUnsupportedClient(t *testing.T) {
func TestSyncFromClientAndQuit(t *testing.T) {
pool := makeMockPendingTxAggregate(3)
- runner := MockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
- client := MockRPCClient{client: &runner, log: logging.TestingLog(t)}
- clientAgg := MockClientAggregator{peers: []network.Peer{&client}}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
+ client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
+ clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
syncer.log = logging.TestingLog(t)
@@ -128,9 +209,9 @@ func TestSyncFromClientAndQuit(t *testing.T) {
func TestSyncFromClientAndError(t *testing.T) {
pool := makeMockPendingTxAggregate(3)
- runner := MockRunner{failWithNil: false, failWithError: true, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
- client := MockRPCClient{client: &runner, log: logging.TestingLog(t)}
- clientAgg := MockClientAggregator{peers: []network.Peer{&client}}
+ runner := mockRunner{failWithNil: false, failWithError: true, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
+ client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
+ clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
syncer.log = logging.TestingLog(t)
@@ -140,9 +221,9 @@ func TestSyncFromClientAndError(t *testing.T) {
func TestSyncFromClientAndTimeout(t *testing.T) {
pool := makeMockPendingTxAggregate(3)
- runner := MockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
- client := MockRPCClient{client: &runner, log: logging.TestingLog(t)}
- clientAgg := MockClientAggregator{peers: []network.Peer{&client}}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
+ client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
+ clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncTimeout := time.Duration(0)
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, syncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
@@ -153,15 +234,15 @@ func TestSyncFromClientAndTimeout(t *testing.T) {
func TestSync(t *testing.T) {
pool := makeMockPendingTxAggregate(1)
- nodeA := BasicRPCNode{}
+ nodeA := basicRPCNode{}
txservice := makeTxService(pool, "test genesisID", config.GetDefaultLocal().TxPoolSize, config.GetDefaultLocal().TxSyncServeResponseSize)
nodeA.RegisterHTTPHandler(TxServiceHTTPPath, txservice)
nodeA.start()
nodeAURL := nodeA.rootURL()
- runner := MockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
- client := MockRPCClient{client: &runner, rootURL: nodeAURL, log: logging.TestingLog(t)}
- clientAgg := MockClientAggregator{peers: []network.Peer{&client}}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
+ client := mockRPCClient{client: &runner, rootURL: nodeAURL, log: logging.TestingLog(t)}
+ clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncerPool := makeMockPendingTxAggregate(3)
syncer := MakeTxSyncer(syncerPool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
@@ -173,7 +254,7 @@ func TestSync(t *testing.T) {
func TestNoClientsSync(t *testing.T) {
pool := makeMockPendingTxAggregate(3)
- clientAgg := MockClientAggregator{peers: []network.Peer{}}
+ clientAgg := mockClientAggregator{peers: []network.Peer{}}
handler := mockHandler{}
syncer := MakeTxSyncer(pool, &clientAgg, &handler, testSyncInterval, testSyncTimeout, config.GetDefaultLocal().TxSyncServeResponseSize)
syncer.log = logging.TestingLog(t)
@@ -185,9 +266,9 @@ func TestNoClientsSync(t *testing.T) {
func TestStartAndStop(t *testing.T) {
t.Skip("TODO: replace this test in new client paradigm")
pool := makeMockPendingTxAggregate(3)
- runner := MockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
- client := MockRPCClient{client: &runner, log: logging.TestingLog(t)}
- clientAgg := MockClientAggregator{peers: []network.Peer{&client}}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
+ client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
+ clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncInterval := time.Second
syncTimeout := time.Second
@@ -213,9 +294,9 @@ func TestStartAndStop(t *testing.T) {
func TestStartAndQuit(t *testing.T) {
pool := makeMockPendingTxAggregate(3)
- runner := MockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
- client := MockRPCClient{client: &runner, log: logging.TestingLog(t)}
- clientAgg := MockClientAggregator{peers: []network.Peer{&client}}
+ runner := mockRunner{failWithNil: false, failWithError: false, txgroups: pool.Pending()[len(pool.Pending())-1:], done: make(chan *rpc.Call)}
+ client := mockRPCClient{client: &runner, log: logging.TestingLog(t)}
+ clientAgg := mockClientAggregator{peers: []network.Peer{&client}}
handler := mockHandler{}
syncInterval := time.Second
syncTimeout := time.Second
diff --git a/rpcs/wsFetcherService.go b/rpcs/wsFetcherService.go
index 140e1d9528..9850e1b7ca 100644
--- a/rpcs/wsFetcherService.go
+++ b/rpcs/wsFetcherService.go
@@ -18,6 +18,7 @@ package rpcs
import (
"context"
+ "encoding/binary"
"fmt"
"github.com/algorand/go-deadlock"
@@ -36,6 +37,15 @@ type WsFetcherService struct {
pendingRequests map[string]chan WsGetBlockOut
}
+// Constant strings used as keys for topics
+const (
+ roundKey = "roundKey" // Block round-number topic-key in the request
+ requestDataTypeKey = "requestDataType" // Data-type topic-key in the request (e.g. block, cert, block+cert)
+ blockDataKey = "blockData" // Block-data topic-key in the response
+ certDataKey = "certData" // Cert-data topic-key in the response
+ blockAndCertValue = "blockAndCert" // block+cert request data (as the value of requestDataTypeKey)
+)
+
func makePendingRequestKey(target network.UnicastPeer, round basics.Round, tag protocol.Tag) string {
return fmt.Sprintf("<%s>:%d:%s", target.GetAddress(), round, tag)
@@ -60,7 +70,7 @@ func (fs *WsFetcherService) handleNetworkMsg(msg network.IncomingMessage) (out n
return
}
- if decodeErr := protocol.Decode(msg.Data, &resp); decodeErr != nil {
+ if decodeErr := protocol.DecodeReflect(msg.Data, &resp); decodeErr != nil {
fs.log.Warnf("WsFetcherService(%s): request failed: unable to decode message : %v", uniPeer.GetAddress(), decodeErr)
out.Action = network.Disconnect
return
@@ -104,25 +114,66 @@ func (fs *WsFetcherService) RequestBlock(ctx context.Context, target network.Uni
delete(fs.pendingRequests, waitKey)
fs.mu.Unlock()
}()
+ if target.Version() == "1" {
+ req := WsGetBlockRequest{Round: uint64(round)}
+ err := target.Unicast(ctx, protocol.EncodeReflect(req), tag)
+ if err != nil {
+ return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): unicast failed, %v", round, err)
+ }
+ select {
+ case resp := <-waitCh:
+ return resp, nil
+ case <-ctx.Done():
+ switch ctx.Err() {
+ case context.DeadlineExceeded:
+ return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): request to %s was timed out", round, target.GetAddress())
+ case context.Canceled:
+ return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): request to %s was cancelled by context", round, target.GetAddress())
+ default:
+ return WsGetBlockOut{}, ctx.Err()
+ }
+ }
+ }
- req := WsGetBlockRequest{Round: uint64(round)}
- err := target.Unicast(ctx, protocol.Encode(req), tag)
+ // Else, if version == 2.1
+ roundBin := make([]byte, binary.MaxVarintLen64)
+ binary.PutUvarint(roundBin, uint64(round))
+ topics := network.Topics{
+ network.MakeTopic(requestDataTypeKey,
+ []byte(blockAndCertValue)),
+ network.MakeTopic(
+ roundKey,
+ roundBin),
+ }
+ resp, err := target.Request(ctx, tag, topics)
if err != nil {
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): unicast failed, %v", round, err)
+ return WsGetBlockOut{}, fmt.Errorf("WsFetcherService(%s).RequestBlock(%d): Request failed, %v", target.GetAddress(), round, err)
}
- select {
- case resp := <-waitCh:
- return resp, nil
- case <-ctx.Done():
- switch ctx.Err() {
- case context.DeadlineExceeded:
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): request to %s was timed out", round, target.GetAddress())
- case context.Canceled:
- return WsGetBlockOut{}, fmt.Errorf("WsFetcherService.RequestBlock(%d): request to %s was cancelled by context", round, target.GetAddress())
- default:
- return WsGetBlockOut{}, ctx.Err()
- }
+
+ if errMsg, found := resp.Topics.GetValue(network.ErrorKey); found {
+ return WsGetBlockOut{}, fmt.Errorf("WsFetcherService(%s).RequestBlock(%d): Request failed, %s", target.GetAddress(), round, string(errMsg))
+ }
+
+ blk, found := resp.Topics.GetValue(blockDataKey)
+ if !found {
+ return WsGetBlockOut{}, fmt.Errorf("WsFetcherService(%s): request failed: block data not found", target.GetAddress())
+ }
+ cert, found := resp.Topics.GetValue(certDataKey)
+ if !found {
+ return WsGetBlockOut{}, fmt.Errorf("WsFetcherService(%s): request failed: cert data not found", target.GetAddress())
+ }
+
+ // For backward compatibility, the block and cert are repackaged here.
+ // This can be dropeed once the v1 is dropped.
+ blockCertBytes := protocol.EncodeReflect(PreEncodedBlockCert{
+ Block: blk,
+ Certificate: cert})
+
+ wsBlockOut := WsGetBlockOut{
+ Round: uint64(round),
+ BlockBytes: blockCertBytes,
}
+ return wsBlockOut, nil
}
// RegisterWsFetcherService creates and returns a WsFetcherService that services gossip fetcher responses
diff --git a/scripts/build/beta.sh b/scripts/build/beta.sh
index 7bf500e259..8615d240c1 100755
--- a/scripts/build/beta.sh
+++ b/scripts/build/beta.sh
@@ -26,11 +26,3 @@ make
git add -A
git commit -m "Build ${BUILD_NUMBER}"
git push
-
-TAG=rel/beta-$(scripts/compute_build_number.sh -f)
-if [ ! -z "${SIGNING_KEY_ADDR}" ]; then
- git tag -s -u "${SIGNING_KEY_ADDR}" ${TAG} -m "Genesis Timestamp: $(cat ./genesistimestamp.dat)"
-else
- git tag -a ${TAG} -m "Genesis Timestamp: $(cat ./genesistimestamp.dat)"
-fi
-git push origin ${TAG}
diff --git a/scripts/build/nightly.sh b/scripts/build/nightly.sh
index 58bb7c9666..1887b1798f 100755
--- a/scripts/build/nightly.sh
+++ b/scripts/build/nightly.sh
@@ -29,9 +29,5 @@ git add ./genesistimestamp.dat ./buildnumber.dat
git commit -m "Build ${BUILD_NUMBER} Data"
git push
-TAG=rel/nightly-$(scripts/compute_build_number.sh -f)
-git tag -a ${TAG} -m "Genesis Timestamp: $(cat ./genesistimestamp.dat)"
-git push origin ${TAG}
-
popd
rm -rf ${REPO_DIR}
diff --git a/scripts/build/stable.sh b/scripts/build/stable.sh
index 675b69eee6..b76f890b6c 100755
--- a/scripts/build/stable.sh
+++ b/scripts/build/stable.sh
@@ -25,11 +25,3 @@ make
git add -A
git commit -m "Build ${BUILD_NUMBER}"
git push
-
-TAG=rel/stable-$(scripts/compute_build_number.sh -f)
-if [ ! -z "${SIGNING_KEY_ADDR}" ]; then
- git tag -s -u "${SIGNING_KEY_ADDR}" ${TAG} -m "Genesis Timestamp: $(cat ./genesistimestamp.dat)"
-else
- git tag -a ${TAG} -m "Genesis Timestamp: $(cat ./genesistimestamp.dat)"
-fi
-git push origin ${TAG}
diff --git a/scripts/build_deb.sh b/scripts/build_deb.sh
index 7899b58008..4ff908c758 100755
--- a/scripts/build_deb.sh
+++ b/scripts/build_deb.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+# shellcheck disable=2038,2064
# build_deb.sh - Build a .deb package for one platform.
#
@@ -15,20 +16,21 @@ fi
## Need to run inside fakeroot to make sure the files in
## the Debian package are owned by root.
if [ "$EUID" != "0" ]; then
- exec fakeroot $0 "$@"
+ exec fakeroot "$0" "$@"
fi
OS=linux
ARCH=$1
OUTDIR="$2"
-export GOPATH=$(go env GOPATH)
+GOPATH=$(go env GOPATH)
+export GOPATH
REPO_DIR=$(pwd)
echo "Building debian package for '${OS} - ${ARCH}'"
if [ -z "${NO_BUILD}" ]; then
- env GOOS=${OS} GOARCH=${ARCH} scripts/build_prod.sh
+ env GOOS="${OS}" GOARCH="${ARCH}" scripts/build_prod.sh
else
echo "already built"
true
@@ -44,7 +46,7 @@ DEFAULT_RELEASE_NETWORK=$(./scripts/compute_branch_release_network.sh "${DEFAULT
PKG_ROOT=$(mktemp -d)
trap "rm -rf $PKG_ROOT" 0
-mkdir -p ${PKG_ROOT}/usr/bin
+mkdir -p "${PKG_ROOT}/usr/bin"
if [ "${VARIATION}" = "" ]; then
# NOTE: keep in sync with installer/rpm/algorand.spec
@@ -52,37 +54,37 @@ if [ "${VARIATION}" = "" ]; then
fi
for bin in "${bin_files[@]}"; do
- cp ${GOPATH}/bin/${bin} ${PKG_ROOT}/usr/bin
- chmod 755 ${PKG_ROOT}/usr/bin/${bin}
+ cp "${GOPATH}/bin/${bin}" "${PKG_ROOT}"/usr/bin
+ chmod 755 "${PKG_ROOT}/usr/bin/${bin}"
done
-mkdir -p ${PKG_ROOT}/usr/lib/algorand
+mkdir -p "${PKG_ROOT}/usr/lib/algorand"
lib_files=("updater" "find-nodes.sh")
for lib in "${lib_files[@]}"; do
- cp ${GOPATH}/bin/${lib} ${PKG_ROOT}/usr/lib/algorand
- chmod g-w ${PKG_ROOT}/usr/lib/algorand/${lib}
+ cp "${GOPATH}/bin/${lib}" "${PKG_ROOT}/usr/lib/algorand"
+ chmod g-w "${PKG_ROOT}/usr/lib/algorand/${lib}"
done
data_files=("config.json.example" "system.json")
-mkdir -p ${PKG_ROOT}/var/lib/algorand
+mkdir -p "${PKG_ROOT}/var/lib/algorand"
for data in "${data_files[@]}"; do
- cp installer/${data} ${PKG_ROOT}/var/lib/algorand
+ cp "installer/${data}" "${PKG_ROOT}/var/lib/algorand"
done
if [ ! -z "${RELEASE_GENESIS_PROCESS}" ]; then
genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
for dir in "${genesis_dirs[@]}"; do
- mkdir -p ${PKG_ROOT}/var/lib/algorand/genesis/${dir}
- cp ${REPO_DIR}/installer/genesis/${dir}/genesis.json ${PKG_ROOT}/var/lib/algorand/genesis/${dir}/genesis.json
+ mkdir -p "${PKG_ROOT}/var/lib/algorand/genesis/${dir}"
+ cp "${REPO_DIR}/installer/genesis/${dir}/genesis.json" "${PKG_ROOT}/var/lib/algorand/genesis/${dir}/genesis.json"
#${GOPATH}/bin/buildtools genesis ensure -n ${dir} --source ${REPO_DIR}/gen/${dir}/genesis.json --target ${PKG_ROOT}/var/lib/algorand/genesis/${dir}/genesis.json --releasedir ${REPO_DIR}/installer/genesis
done
# Copy the appropriate network genesis.json for our default (in root ./genesis folder)
- cp ${PKG_ROOT}/var/lib/algorand/genesis/${DEFAULT_RELEASE_NETWORK}/genesis.json ${PKG_ROOT}/var/lib/algorand
+ cp "${PKG_ROOT}/var/lib/algorand/genesis/${DEFAULT_RELEASE_NETWORK}/genesis.json" "${PKG_ROOT}/var/lib/algorand"
elif [[ "${CHANNEL}" == "dev" || "${CHANNEL}" == "stable" || "${CHANNEL}" == "nightly" || "${CHANNEL}" == "beta" ]]; then
- cp ${REPO_DIR}/installer/genesis/${DEFAULTNETWORK}/genesis.json ${PKG_ROOT}/var/lib/algorand/genesis.json
+ cp "${REPO_DIR}/installer/genesis/${DEFAULTNETWORK}/genesis.json" "${PKG_ROOT}/var/lib/algorand/genesis.json"
#${GOPATH}/bin/buildtools genesis ensure -n ${DEFAULTNETWORK} --source ${REPO_DIR}/gen/${DEFAULTNETWORK}/genesis.json --target ${PKG_ROOT}/var/lib/algorand/genesis.json --releasedir ${REPO_DIR}/installer/genesis
else
- cp ${REPO_DIR}/installer/genesis/${DEFAULTNETWORK}/genesis.json ${PKG_ROOT}/var/lib/algorand
+ cp "${REPO_DIR}/installer/genesis/${DEFAULTNETWORK}/genesis.json" "${PKG_ROOT}/var/lib/algorand"
# Disabled because we have static genesis files now
#cp gen/${DEFAULTNETWORK}/genesis.json ${PKG_ROOT}/var/lib/algorand
#if [ -z "${TIMESTAMP}" ]; then
@@ -92,34 +94,34 @@ else
fi
systemd_files=("algorand.service" "algorand@.service")
-mkdir -p ${PKG_ROOT}/lib/systemd/system
+mkdir -p "${PKG_ROOT}/lib/systemd/system"
for svc in "${systemd_files[@]}"; do
- cp installer/${svc} ${PKG_ROOT}/lib/systemd/system
- chmod 644 ${PKG_ROOT}/lib/systemd/system/${svc}
+ cp "installer/${svc}" "${PKG_ROOT}/lib/systemd/system"
+ chmod 644 "${PKG_ROOT}/lib/systemd/system/${svc}"
done
unattended_upgrades_files=("51algorand-upgrades")
-mkdir -p ${PKG_ROOT}/etc/apt/apt.conf.d
+mkdir -p "${PKG_ROOT}/etc/apt/apt.conf.d"
for f in "${unattended_upgrades_files[@]}"; do
- cp installer/${f} ${PKG_ROOT}/etc/apt/apt.conf.d
+ cp "installer/${f}" "${PKG_ROOT}/etc/apt/apt.conf.d"
done
# files should not be group writable but directories should be
-chmod -R g-w ${PKG_ROOT}/var/lib/algorand
-find ${PKG_ROOT}/var/lib/algorand -type d | xargs chmod g+w
+chmod -R g-w "${PKG_ROOT}/var/lib/algorand"
+find "${PKG_ROOT}/var/lib/algorand" -type d | xargs chmod g+w
-mkdir -p ${PKG_ROOT}/DEBIAN
+mkdir -p "${PKG_ROOT}/DEBIAN"
debian_files=("control" "postinst" "prerm" "postrm" "conffiles")
for ctl in "${debian_files[@]}"; do
# Copy first, to preserve permissions, then overwrite to fill in template.
- cp -a installer/debian/${ctl} ${PKG_ROOT}/DEBIAN/${ctl}
- cat installer/debian/${ctl} \
- | sed -e s,@ARCH@,${ARCH}, \
- -e s,@VER@,${VER}, \
- > ${PKG_ROOT}/DEBIAN/${ctl}
+ cp -a "installer/debian/${ctl}" "${PKG_ROOT}/DEBIAN/${ctl}"
+ < installer/debian/"${ctl}" \
+ sed -e s,@ARCH@,"${ARCH}", \
+ -e s,@VER@,"${VER}", \
+ > "${PKG_ROOT}/DEBIAN/${ctl}"
done
# TODO: make `Files:` segments for vendor/... and crypto/libsodium-fork, but reasonably this should be understood to cover all _our_ files and copied in packages continue to be licenced under their own terms
-cat <${PKG_ROOT}/DEBIAN/copyright
+cat < "${PKG_ROOT}/DEBIAN/copyright"
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: Algorand
Upstream-Contact: Algorand developers
@@ -129,9 +131,10 @@ Files: *
Copyright: Algorand developers
License: AGPL-3+
EOF
-sed 's/^$/./g' < COPYING | sed 's/^/ /g' >> ${PKG_ROOT}/DEBIAN/copyright
-mkdir -p ${PKG_ROOT}/usr/share/doc/algorand
-cp -p ${PKG_ROOT}/DEBIAN/copyright ${PKG_ROOT}/usr/share/doc/algorand/copyright
+sed 's/^$/./g' < COPYING | sed 's/^/ /g' >> "${PKG_ROOT}/DEBIAN/copyright"
+mkdir -p "${PKG_ROOT}/usr/share/doc/algorand"
+cp -p "${PKG_ROOT}/DEBIAN/copyright" "${PKG_ROOT}/usr/share/doc/algorand/copyright"
+
+OUTPUT="$OUTDIR/algorand_${VER}_${ARCH}.deb"
+dpkg-deb --build "${PKG_ROOT}" "${OUTPUT}"
-OUTPUT="$OUTDIR"/algorand_${VER}_${ARCH}.deb
-dpkg-deb --build ${PKG_ROOT} ${OUTPUT}
diff --git a/scripts/build_package.sh b/scripts/build_package.sh
index 6f3e41a309..4f1a1b09e7 100755
--- a/scripts/build_package.sh
+++ b/scripts/build_package.sh
@@ -33,6 +33,7 @@ if [ ! -d "${PKG_ROOT}" ]; then
fi
export GOPATH=$(go env GOPATH)
+export GOPATHBIN=${GOPATH%%:*}/bin
REPO_DIR=$(pwd)
echo "Building package for '${OS} - ${ARCH}'"
@@ -59,7 +60,7 @@ mkdir ${PKG_ROOT}/bin
# If you modify this list, also update this list in ./cmd/updater/update.sh backup_binaries()
bin_files=("algocfg" "algod" "algoh" "algokey" "carpenter" "catchupsrv" "ddconfig.sh" "diagcfg" "find-nodes.sh" "goal" "kmd" "msgpacktool" "node_exporter" "update.sh" "updater" "COPYING")
for bin in "${bin_files[@]}"; do
- cp ${GOPATH}/bin/${bin} ${PKG_ROOT}/bin
+ cp ${GOPATHBIN}/${bin} ${PKG_ROOT}/bin
if [ $? -ne 0 ]; then exit 1; fi
done
@@ -87,7 +88,7 @@ if [ ! -z "${RELEASE_GENESIS_PROCESS}" ]; then
for dir in "${genesis_dirs[@]}"; do
mkdir -p ${PKG_ROOT}/genesis/${dir}
cp ${REPO_DIR}/installer/genesis/${dir}/genesis.json ${PKG_ROOT}/genesis/${dir}/
- #${GOPATH}/bin/buildtools genesis ensure -n ${dir} --source ${REPO_DIR}/gen/${dir}/genesis.json --target ${PKG_ROOT}/genesis/${dir}/genesis.json --releasedir ${REPO_DIR}/installer/genesis
+ #${GOPATHBIN}/buildtools genesis ensure -n ${dir} --source ${REPO_DIR}/gen/${dir}/genesis.json --target ${PKG_ROOT}/genesis/${dir}/genesis.json --releasedir ${REPO_DIR}/installer/genesis
if [ $? -ne 0 ]; then exit 1; fi
done
# Copy the appropriate network genesis.json for our default (in root ./genesis folder)
@@ -95,7 +96,7 @@ if [ ! -z "${RELEASE_GENESIS_PROCESS}" ]; then
if [ $? -ne 0 ]; then exit 1; fi
elif [[ "${CHANNEL}" == "dev" || "${CHANNEL}" == "stable" || "${CHANNEL}" == "nightly" || "${CHANNEL}" == "beta" ]]; then
cp ${REPO_DIR}/installer/genesis/${DEFAULTNETWORK}/genesis.json ${PKG_ROOT}/genesis/
- #${GOPATH}/bin/buildtools genesis ensure -n ${DEFAULTNETWORK} --source ${REPO_DIR}/gen/${DEFAULTNETWORK}/genesis.json --target ${PKG_ROOT}/genesis/genesis.json --releasedir ${REPO_DIR}/installer/genesis
+ #${GOPATHBIN}/buildtools genesis ensure -n ${DEFAULTNETWORK} --source ${REPO_DIR}/gen/${DEFAULTNETWORK}/genesis.json --target ${PKG_ROOT}/genesis/genesis.json --releasedir ${REPO_DIR}/installer/genesis
if [ $? -ne 0 ]; then exit 1; fi
else
cp installer/genesis/${DEFAULTNETWORK}/genesis.json ${PKG_ROOT}/genesis
@@ -103,7 +104,7 @@ else
#if [ -z "${TIMESTAMP}" ]; then
# TIMESTAMP=$(date +%s)
#fi
- #${GOPATH}/bin/buildtools genesis timestamp -f ${PKG_ROOT}/genesis/genesis.json -t ${TIMESTAMP}
+ #${GOPATHBIN}/buildtools genesis timestamp -f ${PKG_ROOT}/genesis/genesis.json -t ${TIMESTAMP}
fi
TOOLS_ROOT=${PKG_ROOT}/tools
@@ -113,7 +114,7 @@ echo "Staging tools package files"
bin_files=("algons" "auctionconsole" "auctionmaster" "auctionminion" "coroner" "dispenser" "netgoal" "nodecfg" "pingpong" "cc_service" "cc_agent" "cc_client" "COPYING")
mkdir -p ${TOOLS_ROOT}
for bin in "${bin_files[@]}"; do
- cp ${GOPATH}/bin/${bin} ${TOOLS_ROOT}
+ cp ${GOPATHBIN}/${bin} ${TOOLS_ROOT}
if [ $? -ne 0 ]; then exit 1; fi
done
diff --git a/scripts/check_deps.sh b/scripts/check_deps.sh
index d61f49c93a..92fb4f49d7 100755
--- a/scripts/check_deps.sh
+++ b/scripts/check_deps.sh
@@ -25,7 +25,7 @@ GO_BIN="$(echo "$GOPATH" | cut -d: -f1)/bin"
MISSING=0
missing_dep() {
- echo "$YELLOW_FG[WARNING]$END_FG_COLOR Mising dependency \`$TEAL_FG${1}$END_FG_COLOR\`."
+ echo "$YELLOW_FG[WARNING]$END_FG_COLOR Missing dependency \`$TEAL_FG${1}$END_FG_COLOR\`."
MISSING=1
}
@@ -52,6 +52,12 @@ check_deps() {
then
missing_dep shellcheck
fi
+
+ # Don't print `sqlite3`s location.
+ if ! which sqlite3 > /dev/null
+ then
+ missing_dep sqlite3
+ fi
}
check_deps
@@ -60,7 +66,7 @@ if [ $MISSING -eq 0 ]
then
echo "$GREEN_FG[$0]$END_FG_COLOR Required dependencies installed."
else
- echo -e "$RED_FG[$0]$END_FG_COLOR Required dependencies missing. Run \`${TEAL_FG}./scripts/configure-dev.sh$END_FG_COLOR\` to install."
+ echo -e "$RED_FG[$0]$END_FG_COLOR Required dependencies missing. Run \`${TEAL_FG}./scripts/configure_dev.sh$END_FG_COLOR\` to install."
exit 1
fi
diff --git a/scripts/configure_dev.sh b/scripts/configure_dev.sh
index 1d4721bc9a..11ce3ac421 100755
--- a/scripts/configure_dev.sh
+++ b/scripts/configure_dev.sh
@@ -21,7 +21,7 @@ if [ "${OS}" = "linux" ]; then
fi
sudo apt-get update
- sudo apt-get install -y libboost-all-dev expect jq autoconf shellcheck
+ sudo apt-get install -y libboost-all-dev expect jq autoconf shellcheck sqlite3
elif [ "${OS}" = "darwin" ]; then
brew update
brew tap homebrew/cask
diff --git a/scripts/deploy_linux_version.sh b/scripts/deploy_linux_version.sh
index 2bda582501..c1be6b2304 100755
--- a/scripts/deploy_linux_version.sh
+++ b/scripts/deploy_linux_version.sh
@@ -16,7 +16,7 @@ export GOPATH=$(go env GOPATH)
# Anchor our repo root reference location
REPO_ROOT="$( cd "$(dirname "$0")" ; pwd -P )"/..
-cd ${REPO_ROOT}/..
+cd ${REPO_ROOT}
SRCPATH=${REPO_ROOT}
TMPDIR="${SRCPATH}/tmp"
diff --git a/scripts/get_latest_go.py b/scripts/release/archive/get_latest_go.py
similarity index 100%
rename from scripts/get_latest_go.py
rename to scripts/release/archive/get_latest_go.py
diff --git a/scripts/release/build/Jenkinsfile b/scripts/release/build/Jenkinsfile
index d5bdde05cb..788e02dfb3 100644
--- a/scripts/release/build/Jenkinsfile
+++ b/scripts/release/build/Jenkinsfile
@@ -11,9 +11,7 @@ pipeline {
agent {
dockerfile {
filename 'scripts/release/common/docker/setup.Dockerfile'
- /*
- args '-v /etc/passwd:/etc/passwd'
- */
+ args '-u root'
}
}
@@ -55,6 +53,12 @@ pipeline {
}
}
+ stage("verify signatures") {
+ steps {
+ sh script: "scripts/release/build/stage/verify/run.sh"
+ }
+ }
+
stage("upload") {
steps {
script {
diff --git a/scripts/release/build/rpm/build.sh b/scripts/release/build/rpm/build.sh
index 45810ca98b..c84efbee0b 100755
--- a/scripts/release/build/rpm/build.sh
+++ b/scripts/release/build/rpm/build.sh
@@ -11,13 +11,21 @@ export HOME=/root
. "${HOME}"/subhome/build_env
-GIT_REPO_PATH=https://github.com/algorand/go-algorand
mkdir -p "${HOME}/go/src/github.com/algorand"
-cd "${HOME}/go/src/github.com/algorand" && git clone --single-branch --branch "${BRANCH}" "${GIT_REPO_PATH}" go-algorand
-
-# Get golang 1.12 and build its own copy of go-algorand.
+cd "${HOME}/go/src/github.com/algorand"
+if ! git clone --single-branch --branch "${BRANCH}" https://github.com/algorand/go-algorand go-algorand
+then
+ echo There has been a problem cloning the "$BRANCH" branch.
+ exit 1
+fi
+
+# Get golang 1.12.9 and build its own copy of go-algorand.
cd "${HOME}"
-python3 "${HOME}/go/src/github.com/algorand/go-algorand/scripts/get_latest_go.py" --version-prefix=1.12
+if ! curl -O https://dl.google.com/go/go1.12.9.linux-amd64.tar.gz
+then
+ echo Golang could not be installed!
+ exit 1
+fi
bash -c "cd /usr/local && tar zxf ${HOME}/go*.tar.gz"
GOPATH=$(/usr/local/go/bin/go env GOPATH)
@@ -29,7 +37,6 @@ REPO_DIR=/root/go/src/github.com/algorand/go-algorand
# Build!
"${REPO_DIR}"/scripts/configure_dev-deps.sh
cd "${REPO_DIR}"
-make crypto/lib/libsodium.a
make build
# Copy binaries to the host for use in the packaging stage.
diff --git a/scripts/release/build/stage/build/run.sh b/scripts/release/build/stage/build/run.sh
index 87245ee664..62acf252c8 100755
--- a/scripts/release/build/stage/build/run.sh
+++ b/scripts/release/build/stage/build/run.sh
@@ -2,6 +2,8 @@
set -ex
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
# Path(s) are relative to the root of the Jenkins workspace.
ssh -i ReleaseBuildInstanceKey.pem -A ubuntu@"$(cat scripts/release/common/ec2/tmp/instance)" bash go/src/github.com/algorand/go-algorand/scripts/release/build/stage/build/task.sh
diff --git a/scripts/release/build/stage/build/task.sh b/scripts/release/build/stage/build/task.sh
index a10955979f..ca8df5ce17 100755
--- a/scripts/release/build/stage/build/task.sh
+++ b/scripts/release/build/stage/build/task.sh
@@ -33,7 +33,7 @@ fi
# Run RPM build in Centos7 Docker container
sg docker "docker build -t algocentosbuild - < $HOME/go/src/github.com/algorand/go-algorand/scripts/release/common/docker/centos.Dockerfile"
-sg docker "docker run --rm --mount type=bind,src=${HOME},dst=/root/subhome algocentosbuild /root/subhome/go/src/github.com/algorand/go-algorand/scripts/release/build/rpm/build.sh"
+sg docker "docker run --rm --env-file ${HOME}/build_env_docker --mount type=bind,src=${HOME},dst=/root/subhome algocentosbuild /root/subhome/go/src/github.com/algorand/go-algorand/scripts/release/build/rpm/build.sh"
echo
date "+build_release end BUILD stage %Y%m%d_%H%M%S"
diff --git a/scripts/release/build/stage/package/run.sh b/scripts/release/build/stage/package/run.sh
index 08e7ed86e1..8bcd9b32cd 100755
--- a/scripts/release/build/stage/package/run.sh
+++ b/scripts/release/build/stage/package/run.sh
@@ -2,6 +2,8 @@
set -ex
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
# Path(s) are relative to the root of the Jenkins workspace.
ssh -i ReleaseBuildInstanceKey.pem -A ubuntu@"$(cat scripts/release/common/ec2/tmp/instance)" bash go/src/github.com/algorand/go-algorand/scripts/release/build/stage/package/task.sh
diff --git a/scripts/release/build/stage/setup/run.sh b/scripts/release/build/stage/setup/run.sh
index 2cb79accb9..80916a9eee 100755
--- a/scripts/release/build/stage/setup/run.sh
+++ b/scripts/release/build/stage/setup/run.sh
@@ -3,9 +3,11 @@
set -ex
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
# Path(s) are relative to the root of the Jenkins workspace.
-INSTANCE=$(cat scripts/release/common/ec2/tmp/instance)
-BRANCH="$1"
+BRANCH=$(./scripts/release/util/check_remote.sh "$1")
+INSTANCE=$(cat ./scripts/release/common/ec2/tmp/instance)
aws s3 cp s3://algorand-devops-misc/tools/gnupg2.2.9_centos7_amd64.tar.bz2 .
scp -i ReleaseBuildInstanceKey.pem -o StrictHostKeyChecking=no -r scripts/release/common/setup.sh gnupg2.2.9_centos7_amd64.tar.bz2 ubuntu@"$INSTANCE":
diff --git a/scripts/release/build/stage/sign/run.sh b/scripts/release/build/stage/sign/run.sh
index 7fd86557e8..4c40ddf67b 100755
--- a/scripts/release/build/stage/sign/run.sh
+++ b/scripts/release/build/stage/sign/run.sh
@@ -2,6 +2,8 @@
set -ex
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
# Path(s) are relative to the root of the Jenkins workspace.
ssh -i ReleaseBuildInstanceKey.pem -A ubuntu@"$(cat scripts/release/common/ec2/tmp/instance)" bash go/src/github.com/algorand/go-algorand/scripts/release/build/stage/sign/task.sh
diff --git a/scripts/release/build/stage/upload/run.sh b/scripts/release/build/stage/upload/run.sh
index c96d2d9158..83a7b5d731 100755
--- a/scripts/release/build/stage/upload/run.sh
+++ b/scripts/release/build/stage/upload/run.sh
@@ -2,6 +2,8 @@
set -ex
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
# Path(s) are relative to the root of the Jenkins workspace.
#BUCKET_LOCATION="$2"
@@ -15,8 +17,6 @@ rm -rf pkg && mkdir -p pkg/"$FULLVERSION"
ssh -i ReleaseBuildInstanceKey.pem -A ubuntu@"$INSTANCE" bash go/src/github.com/algorand/go-algorand/scripts/release/build/stage/upload/task.sh
scp -i ReleaseBuildInstanceKey.pem -o StrictHostKeyChecking=no -r ubuntu@"$INSTANCE":~/node_pkg/* pkg/"$FULLVERSION"/
-# Create the buildlog file.
-scp -i ReleaseBuildInstanceKey.pem -o StrictHostKeyChecking=no ubuntu@"$INSTANCE":~/build_status_"$CHANNEL"_*.asc.gz pkg/"$FULLVERSION"
#aws s3 sync --exclude dev* --exclude master* --exclude nightly* --exclude stable* --acl public-read pkg/"$FULLVERSION" s3://"$BUCKET_LOCATION"/"$CHANNEL"/"$FULLVERSION"/
aws s3 sync --exclude dev* --exclude master* --exclude nightly* --exclude stable* --acl public-read pkg/"$FULLVERSION" s3://ben-test-2.0.3/"$CHANNEL"/"$FULLVERSION"/
diff --git a/scripts/release/build/stage/upload/task.sh b/scripts/release/build/stage/upload/task.sh
index bfb53dbf0d..23d39a4171 100755
--- a/scripts/release/build/stage/upload/task.sh
+++ b/scripts/release/build/stage/upload/task.sh
@@ -58,7 +58,7 @@ EOF
# Note this file is scp'd in stage/upload.sh
dpkg -l >> "${STATUSFILE}"
gpg --clearsign "${STATUSFILE}"
-gzip "${STATUSFILE}".asc > "${HOME}"/node_pkg/"${STATUSFILE}".asc.gz
+gzip -c "${STATUSFILE}".asc > "${HOME}"/node_pkg/"${STATUSFILE}".asc.gz
echo
date "+build_release end UPLOAD stage %Y%m%d_%H%M%S"
diff --git a/scripts/release/build/stage/verify/run.sh b/scripts/release/build/stage/verify/run.sh
new file mode 100755
index 0000000000..0c984d4d18
--- /dev/null
+++ b/scripts/release/build/stage/verify/run.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+set -ex
+
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
+# Path(s) are relative to the root of the Jenkins workspace.
+ssh -i ReleaseBuildInstanceKey.pem -A ubuntu@"$(cat scripts/release/common/ec2/tmp/instance)" bash go/src/github.com/algorand/go-algorand/scripts/release/build/stage/verify/task.sh
+
diff --git a/scripts/release/build/stage/verify/task.sh b/scripts/release/build/stage/verify/task.sh
new file mode 100755
index 0000000000..038e6e36f4
--- /dev/null
+++ b/scripts/release/build/stage/verify/task.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+
+set -ex
+
+echo
+date "+build_release begin VERIFY %Y%m%d_%H%M%S"
+echo
+
+RETVAL=0
+
+cd "$HOME/node_pkg"
+
+for file in *.{gz,deb,rpm}
+do
+ key_id=dev@algorand.com
+
+ # Check the filename extension.
+ if [ "${file##*.}" == "rpm" ]
+ then
+ key_id=rpm@algorand.com
+ fi
+
+ if ! gpg -u "$key_id" --verify "$file".sig "$file"
+ then
+ echo -e "[$0] Could not verify signature for $file"
+ RETVAL=1
+ fi
+done
+
+if [ $RETVAL -eq 0 ]
+then
+ echo -e "[$0] All signatures have been verified as good."
+fi
+
+echo
+date "+build_release end VERIFY stage %Y%m%d_%H%M%S"
+echo
+
+exit $RETVAL
+
diff --git a/scripts/release/common/docker/setup.Dockerfile b/scripts/release/common/docker/setup.Dockerfile
index d68331b62c..b2c15664ba 100644
--- a/scripts/release/common/docker/setup.Dockerfile
+++ b/scripts/release/common/docker/setup.Dockerfile
@@ -1,5 +1,16 @@
+# Note that we're installing `awscli` from pip rather than from the apt repository because of
+# the following error message:
+#
+# upload failed: pkg/2.0.63803/build_status_dev_2.0.63803.asc.gz to s3://{upload_location}/build_status_dev_2.0.63803.asc.gz seek() takes 2 positional arguments but 3 were given
+#
+# Note that the error only seems to occur when there is a file to upload with zero bytes,
+# but just to be safe we'll still use pip to download and install.
+#
+# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=869194
+# https://github.com/boto/s3transfer/pull/102
+
FROM ubuntu:18.04
-RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y awscli jq ssh
-#RUN adduser --uid $(grep jenkins /etc/passwd | awk -F: '{ print $3 }') ubuntu
-RUN adduser --uid 111 ubuntu
+ENV DEBIAN_FRONTEND=noninteractive
+RUN apt-get update && apt-get install -y jq git python python-pip python3-boto3 ssh && \
+ pip install awscli
diff --git a/scripts/release/common/ec2/shutdown.sh b/scripts/release/common/ec2/shutdown.sh
index 8cc312eeb4..67b5fc5aa5 100755
--- a/scripts/release/common/ec2/shutdown.sh
+++ b/scripts/release/common/ec2/shutdown.sh
@@ -1,18 +1,12 @@
#!/usr/bin/env bash
# shellcheck disable=2164
-AWS_REGION="$1"
+AWS_REGION="${1:-us-west-1}"
GREEN_FG=$(echo -en "\e[32m")
YELLOW_FG=$(echo -en "\e[33m")
END_FG_COLOR=$(echo -en "\e[39m")
REPO_ROOT="$( cd "$(dirname "$0")" ; pwd -P )"
-if [ "$AWS_REGION" = "" ]
-then
- echo "Missing AWS_REGION argument"
- exit 1
-fi
-
pushd "$REPO_ROOT"/tmp > /dev/null
SGID=$(cat sgid)
INSTANCE_ID=$(cat instance-id)
diff --git a/scripts/release/common/setup.sh b/scripts/release/common/setup.sh
index 069258b279..1901a947c6 100755
--- a/scripts/release/common/setup.sh
+++ b/scripts/release/common/setup.sh
@@ -7,17 +7,25 @@ if [ -z "${BUILDTIMESTAMP}" ]; then
BUILDTIMESTAMP=$(cat "${HOME}/buildtimestamp")
export BUILDTIMESTAMP
echo run "${0}" with output to "${HOME}/buildlog_${BUILDTIMESTAMP}"
- (bash "${0}" "${1}" 2>&1) | tee "${HOME}/buildlog_${BUILDTIMESTAMP}"
- exit 0
+ bash "${0}" "${1}" 2>&1 | tee "${HOME}/buildlog_${BUILDTIMESTAMP}"
+ # http://tldp.org/LDP/abs/html/internalvariables.html#PIPESTATUSREF
+ exit "${PIPESTATUS[0]}"
fi
echo
date "+build_release begin SETUP stage %Y%m%d_%H%M%S"
echo
+# `apt-get` fails randomly when downloading package, this is a hack that "works" reasonably well.
+echo -e "deb http://us.archive.ubuntu.com/ubuntu/ bionic main universe multiverse\ndeb http://archive.ubuntu.com/ubuntu/ bionic main universe multiverse" | sudo tee /etc/apt/sources.list.d/ubuntu
+
sudo apt-get update
sudo apt-get upgrade -y
-sudo apt-get install -y build-essential automake autoconf awscli docker.io git gpg nfs-common python3 rpm sqlite3 python3-boto3 g++ libtool rng-tools
+
+# `apt-get` fails randomly when downloading package, this is a hack that "works" reasonably well.
+sudo apt-get update
+
+sudo apt-get install -y build-essential automake autoconf awscli docker.io git gpg nfs-common python python3 rpm sqlite3 python3-boto3 g++ libtool rng-tools
sudo rngd -r /dev/urandom
#umask 0077
@@ -29,15 +37,25 @@ export BRANCH
# Check out
mkdir -p "${HOME}/go/src/github.com/algorand"
-cd "${HOME}/go/src/github.com/algorand" && git clone --single-branch --branch "${BRANCH}" https://github.com/algorand/go-algorand go-algorand
-# TODO: if we are checking out a release tag, `git tag --verify` it
+cd "${HOME}/go/src/github.com/algorand"
+if ! git clone --single-branch --branch "${BRANCH}" https://github.com/algorand/go-algorand go-algorand
+then
+ echo There has been a problem cloning the "$BRANCH" branch.
+ exit 1
+fi
+
+cd go-algorand
+COMMIT_HASH=$(git rev-parse "${BRANCH}")
export DEBIAN_FRONTEND=noninteractive
-# Install latest Go
+# Install latest go.1.12.9.
cd "${HOME}"
-python3 "${HOME}/go/src/github.com/algorand/go-algorand/scripts/get_latest_go.py" --version-prefix=1.12
-# $HOME will be interpreted by the outer shell to create the string passed to sudo bash
+if ! curl -O https://dl.google.com/go/go1.12.9.linux-amd64.tar.gz
+then
+ echo Golang could not be installed!
+ exit 1
+fi
sudo bash -c "cd /usr/local && tar zxf ${HOME}/go*.tar.gz"
GOPATH=$(/usr/local/go/bin/go env GOPATH)
@@ -95,7 +113,11 @@ EOF
# Install aptly for building debian repo
mkdir -p "$GOPATH/src/github.com/aptly-dev"
cd "$GOPATH/src/github.com/aptly-dev"
-git clone https://github.com/aptly-dev/aptly
+if ! git clone https://github.com/aptly-dev/aptly
+then
+ echo There has been a problem cloning the aptly project.
+ exit 1
+fi
cd aptly && git fetch
# As of 2019-06-06 release tag v1.3.0 is 2018-May, GnuPG 2 support was added in October but they haven't tagged a new release yet. Hash below seems to work so far.
@@ -111,6 +133,7 @@ PLATFORM_SPLIT=(${PLATFORM//\// })
cat << EOF > "${HOME}"/build_env
export BRANCH=${BRANCH}
export CHANNEL=$("${GOPATH}"/src/github.com/algorand/go-algorand/scripts/compute_branch_channel.sh "${BRANCH}")
+export COMMIT_HASH=${COMMIT_HASH}
export DEFAULTNETWORK=$(PATH=${PATH} "${REPO_ROOT}"/scripts/compute_branch_network.sh)
export DC_IP=$(curl --silent http://169.254.169.254/latest/meta-data/local-ipv4)
export FULLVERSION=$("${GOPATH}"/src/github.com/algorand/go-algorand/scripts/compute_build_number.sh -f)
diff --git a/scripts/release/prod/stage/setup/run.sh b/scripts/release/prod/stage/setup/run.sh
index dcfe9f8067..901c460eb6 100755
--- a/scripts/release/prod/stage/setup/run.sh
+++ b/scripts/release/prod/stage/setup/run.sh
@@ -1,10 +1,14 @@
#!/usr/bin/env bash
# shellcheck disable=2029
+set -ex
+
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
# Path(s) are relative to the root of the Jenkins workspace.
+BRANCH=$(./scripts/release/util/check_remote.sh "$1")
INSTANCE=$(cat scripts/release/common/ec2/tmp/instance)
#BUCKET="$1"
-BRANCH="$1"
BUILD_ENV=$(ssh -i ReleaseBuildInstanceKey.pem -o -A ubuntu@"$INSTANCE" cat build_env)
CHANNEL=$(sed -n 's/.*CHANNEL=\(.*\)/\1/p' <<< "$BUILD_ENV")
diff --git a/scripts/release/prod/stage/sync/run.sh b/scripts/release/prod/stage/sync/run.sh
index c492a509bb..b22a94b147 100755
--- a/scripts/release/prod/stage/sync/run.sh
+++ b/scripts/release/prod/stage/sync/run.sh
@@ -1,6 +1,10 @@
#!/usr/bin/env bash
# shellcheck disable=2029
+set -ex
+
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
# Path(s) are relative to the root of the Jenkins workspace.
STAGING="$1"
PROD="$2"
diff --git a/scripts/release/test/Jenkinsfile b/scripts/release/test/Jenkinsfile
index 4b83d7f7e6..bcc8c825e9 100644
--- a/scripts/release/test/Jenkinsfile
+++ b/scripts/release/test/Jenkinsfile
@@ -15,6 +15,7 @@ pipeline {
agent {
dockerfile {
filename 'scripts/release/common/docker/setup.Dockerfile'
+ args '-u root'
}
}
diff --git a/scripts/release/test/deb/run_ubuntu.sh b/scripts/release/test/deb/run_ubuntu.sh
index 0ea99246eb..8d1cab29ba 100755
--- a/scripts/release/test/deb/run_ubuntu.sh
+++ b/scripts/release/test/deb/run_ubuntu.sh
@@ -1,6 +1,4 @@
#!/usr/bin/env bash
-#
-# This script exists to give a trap atexit context for killing the httpd so that we're not waiting on that
set -ex
diff --git a/scripts/release/test/stage/setup/run.sh b/scripts/release/test/stage/setup/run.sh
index c2f905cdb2..b4cd20c7b0 100755
--- a/scripts/release/test/stage/setup/run.sh
+++ b/scripts/release/test/stage/setup/run.sh
@@ -3,9 +3,11 @@
set -ex
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
# Path(s) are relative to the root of the Jenkins workspace.
+BRANCH=$(./scripts/release/util/check_remote.sh "$1")
INSTANCE=$(cat scripts/release/common/ec2/tmp/instance)
-BRANCH="$1"
aws s3 cp s3://algorand-devops-misc/tools/gnupg2.2.9_centos7_amd64.tar.bz2 .
scp -i ReleaseBuildInstanceKey.pem -o StrictHostKeyChecking=no -r gnupg2.2.9_centos7_amd64.tar.bz2 ubuntu@"$INSTANCE":
diff --git a/scripts/release/test/stage/test/run.sh b/scripts/release/test/stage/test/run.sh
index 24e6c41fd1..1107fe7cab 100755
--- a/scripts/release/test/stage/test/run.sh
+++ b/scripts/release/test/stage/test/run.sh
@@ -2,6 +2,8 @@
set -ex
+trap 'bash ./scripts/release/common/ec2/shutdown.sh' ERR
+
# Path(s) are relative to the root of the Jenkins workspace.
INSTANCE=$(cat scripts/release/common/ec2/tmp/instance)
diff --git a/scripts/release/test/stage/test/task.sh b/scripts/release/test/stage/test/task.sh
index 1e6ac52d1b..77805bb04c 100755
--- a/scripts/release/test/stage/test/task.sh
+++ b/scripts/release/test/stage/test/task.sh
@@ -17,6 +17,13 @@ date "+build_release done testing ubuntu %Y%m%d_%H%M%S"
"${HOME}"/go/src/github.com/algorand/go-algorand/scripts/release/test/rpm/run_centos.sh
date "+build_release done testing centos %Y%m%d_%H%M%S"
+echo Use Docker to perform a smoke test.
+pushd "${HOME}"/go/src/github.com/algorand/go-algorand/scripts/release/test/util
+# Copy all packages to the same directory where the Dockerfile will reside.
+cp "${HOME}"/node_pkg/* .
+./test_package.sh
+popd
+
echo
date "+build_release end TEST stage %Y%m%d_%H%M%S"
echo
diff --git a/scripts/release/test/util/check_sig.sh b/scripts/release/test/util/check_sig.sh
new file mode 100755
index 0000000000..fdbe45426c
--- /dev/null
+++ b/scripts/release/test/util/check_sig.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+# shellcheck disable=2045
+
+if [ $# -ne 1 ]
+then
+ echo "Usage: $0 "
+ exit 1
+fi
+
+GREEN_FG=$(tput setaf 2 2>/dev/null)
+RED_FG=$(tput setaf 1 2>/dev/null)
+END_FG_COLOR=$(tput sgr0 2>/dev/null)
+RETVAL=0
+
+pushd "$1" > /dev/null
+
+for file in $(ls ./*.{gz,deb,rpm})
+do
+ key_id=dev@algorand.com
+
+ # Check the filename extension.
+ if [ "${file##*.}" == "rpm" ]
+ then
+ key_id=rpm@algorand.com
+ fi
+
+ if ! gpg -u "$key_id" --verify "$file".sig "$file"
+ then
+ echo -e "$RED_FG[$0]$END_FG_COLOR Could not verify signature for $file"
+ RETVAL=1
+ fi
+done
+
+popd > /dev/null
+
+if [ $RETVAL -eq 0 ]
+then
+ echo -e "$GREEN_FG[$0]$END_FG_COLOR All signatures have been verified as good."
+fi
+
+exit $RETVAL
+
diff --git a/scripts/release/test/util/gpg-fake.sh b/scripts/release/test/util/gpg-fake.sh
new file mode 100755
index 0000000000..756f108fcf
--- /dev/null
+++ b/scripts/release/test/util/gpg-fake.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+# shellcheck disable=2012
+
+set -ex
+
+echo
+date "+build_release begin GPG SETUP stage %Y%m%d_%H%M%S"
+echo
+
+cat <"${HOME}/gpgbin/remote_gpg_socket"
+export GOPATH=\${HOME}/go
+export PATH=\${HOME}/gpgbin:${GOPATH}/bin:/usr/local/go/bin:${PATH}
+gpgconf --list-dirs | grep agent-socket | awk -F: '{ print \$2 }'
+EOF
+
+chmod +x "${HOME}/gpgbin/remote_gpg_socket"
+
+# This real name and email must precisely match GPG key
+git config --global user.name "Algorand developers"
+git config --global user.email dev@algorand.com
+
+# configure GnuPG to rely on forwarded remote gpg-agent
+umask 0077
+touch "${HOME}/.gnupg/gpg.conf"
+if grep -q no-autostart "${HOME}/.gnupg/gpg.conf"; then
+ echo ""
+else
+ echo "no-autostart" >> "${HOME}/.gnupg/gpg.conf"
+fi
+
+umask 0002
+
+gpgconf --launch gpg-agent
+
+GNUPGHOME="${HOME}"/.gnupg
+gpgconf --kill gpg-agent
+chmod 700 "${GNUPGHOME}"
+
+cat > "${GNUPGHOME}"/keygenscript < "${GNUPGHOME}"/rpmkeygenscript < "${GNUPGHOME}"/gpg-agent.conf
+# Only needed for gpg < 2.1.17 (https://wiki.gnupg.org/AgentForwarding)
+#extra-socket "${HOME}"/S.gpg-agent.extra
+# Enable unattended daemon mode.
+allow-preset-passphrase
+# Cache password 30 days.
+default-cache-ttl 2592000
+max-cache-ttl 2592000
+EOF
+
+# Added 2020-01-20
+gpgconf --launch gpg-agent
+
+gpg --gen-key --batch "${GNUPGHOME}"/keygenscript
+gpg --gen-key --batch "${GNUPGHOME}"/rpmkeygenscript
+gpg --export -a dev@algorand.com > "${HOME}/keys/dev.pub"
+gpg --export -a rpm@algorand.com > "${HOME}/keys/rpm.pub"
+
+gpgconf --kill gpg-agent
+gpgconf --launch gpg-agent
+
+gpgp=$(ls /usr/lib/gnupg{2,,1}/gpg-preset-passphrase | head -1)
+for name in {dev,rpm}
+do
+ KEYGRIP=$(gpg -K --with-keygrip --textmode "$name"@algorand.com | grep Keygrip | head -1 | awk '{ print $3 }')
+ echo foogorand | "${gpgp}" --verbose --preset "${KEYGRIP}"
+done
+
+echo
+date "+build_release end GPG SETUP stage %Y%m%d_%H%M%S"
+echo
+
diff --git a/scripts/release/test/util/smoke_test.sh b/scripts/release/test/util/smoke_test.sh
new file mode 100755
index 0000000000..996105075f
--- /dev/null
+++ b/scripts/release/test/util/smoke_test.sh
@@ -0,0 +1,70 @@
+#!/usr/bin/env bash
+
+set -ex
+
+# This is currently used by `test_package.sh`.
+# It is copied into a docker image at build time and then invoked at run time.
+
+BRANCH=
+CHANNEL=
+COMMIT_HASH=
+FULLVERSION=
+
+while [ "$1" != "" ]; do
+ case "$1" in
+ -b)
+ shift
+ BRANCH="$1"
+ ;;
+ -c)
+ shift
+ CHANNEL="$1"
+ ;;
+ -h)
+ shift
+ COMMIT_HASH="$1"
+ ;;
+ -r)
+ shift
+ FULLVERSION="$1"
+ ;;
+ *)
+ echo "Unknown option" "$1"
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+if [ -z "$BRANCH" ] || [ -z "$CHANNEL" ] || [ -z "$COMMIT_HASH" ] || [ -z "$FULLVERSION" ]
+then
+ echo "[ERROR] $0 -b $BRANCH -c $CHANNEL -h $COMMIT_HASH -r $FULLVERSION"
+ exit 1
+fi
+
+echo "[$0] Testing: algod -v"
+if < /etc/os-release grep Ubuntu > /dev/null
+then
+ dpkg -i ./*.deb
+else
+ yum install ./*.rpm -y
+fi
+
+STR=$(algod -v)
+SHORT_HASH=${COMMIT_HASH:0:8}
+
+# We're looking for a line that looks like the following:
+#
+# 2.0.4.stable [rel/stable] (commit #729b125a)
+#
+# Since we're passing in the full hash, we won't using the closing paren.
+# Use a regex over the multi-line string.
+if [[ "$STR" =~ .*"$FULLVERSION.$CHANNEL [$BRANCH] (commit #$SHORT_HASH)".* ]]
+then
+ echo -e "[$0] The result of \`algod -v\` is a correct match.\n$STR"
+ exit 0
+fi
+
+echo "[$0] The result of \`algod -v\` is an incorrect match."
+exit 1
+
diff --git a/scripts/release/test/util/test_package.sh b/scripts/release/test/util/test_package.sh
new file mode 100755
index 0000000000..1106db0bc6
--- /dev/null
+++ b/scripts/release/test/util/test_package.sh
@@ -0,0 +1,98 @@
+#!/usr/bin/env bash
+# shellcheck disable=1090
+
+# TODO: use `trap` instead of cleanup function?
+
+set -ex
+
+. "${HOME}"/build_env
+
+# TODO: The following error happens on centos:8
+#
+# Error:
+# Problem: conflicting requests
+# - nothing provides yum-cron needed by algorand-2.0.4-1.x86_64
+# (try to add '--skip-broken' to skip uninstallable packages or '--nobest' to use not only best candidate packages)
+# smoke_test.sh: line 47: algod: command not found
+
+OS_LIST=(
+ centos:7
+# centos:8
+ fedora:28
+ ubuntu:16.04
+ ubuntu:18.04
+)
+
+FAILED=()
+
+if [ -z "$BRANCH" ] || [ -z "$CHANNEL" ] || [ -z "$COMMIT_HASH" ] || [ -z "$FULLVERSION" ]
+then
+ echo "[ERROR] $0 was not provided with BRANCH, CHANNEL, COMMIT_HASH or FULLVERSION!"
+ exit 1
+fi
+
+build_images () {
+ # We'll use this simple tokenized Dockerfile.
+ # https://serverfault.com/a/72511
+ TOKENIZED=$(echo -e "\
+FROM {{OS}}\n\n\
+WORKDIR /root\n\
+COPY . .\n\
+CMD [\"/bin/bash\"]")
+
+ for item in ${OS_LIST[*]}
+ do
+ # Note: we eventually want to move to storing the Dockerfiles.
+ #
+ # Use pattern substitution here (like sed).
+ # ${parameter/pattern/substitution}
+ echo -e "${TOKENIZED/\{\{OS\}\}/$item}" > Dockerfile
+ if ! docker build -t "${item}-smoke-test" .
+ then
+ FAILED+=("$item")
+ fi
+ done
+}
+
+run_images () {
+ for item in ${OS_LIST[*]}
+ do
+ echo "[$0] Running ${item}-test..."
+ if ! docker run --rm --name algorand -t "${item}-smoke-test" bash smoke_test.sh -b "$BRANCH" -c "$CHANNEL" -h "$COMMIT_HASH" -r "$FULLVERSION"
+ then
+ FAILED+=("$item")
+ fi
+ done
+}
+
+cleanup() {
+ rm -f Dockerfile
+}
+
+check_failures() {
+ if [ "${#FAILED[@]}" -gt 0 ]
+ then
+ echo -e "\n[$0] The following images could not be $1:"
+
+ for failed in ${FAILED[*]}
+ do
+ echo " - $failed"
+ done
+
+ echo
+
+ cleanup
+ exit 1
+ fi
+}
+
+build_images
+check_failures built
+echo "[$0] All builds completed with no failures."
+
+run_images
+check_failures verified
+echo "[$0] All runs completed with no failures."
+
+cleanup
+
diff --git a/scripts/release/util/check_remote.sh b/scripts/release/util/check_remote.sh
new file mode 100755
index 0000000000..aba935c207
--- /dev/null
+++ b/scripts/release/util/check_remote.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+# When a Jenkins job watches multiple branches, the GIT_BRANCH env var can return "origin/rel/beta"
+# and the remote repo name must be # stripped from the front of the string.
+
+BRANCH="$1"
+
+for repo in $(git remote)
+do
+ pattern="$repo"/
+
+ if [[ "$BRANCH" =~ ^$pattern ]]
+ then
+ # Remove matching prefix.
+ echo "${BRANCH#$pattern}"
+ exit 0
+ fi
+done
+
+echo "$BRANCH"
+
diff --git a/scripts/travis/before_build.sh b/scripts/travis/before_build.sh
index edee4e6525..49ad1a0108 100755
--- a/scripts/travis/before_build.sh
+++ b/scripts/travis/before_build.sh
@@ -43,13 +43,13 @@ GOPATH=$(go env GOPATH)
export GOPATH
export GO111MODULE=on
-echo "Building libsodium-fork..."
-make crypto/lib/libsodium.a
-
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
OS=$("${SCRIPTPATH}"/../ostype.sh)
ARCH=$("${SCRIPTPATH}"/../archtype.sh)
+echo "Building libsodium-fork..."
+make crypto/libs/${OS}/${ARCH}/lib/libsodium.a
+
if [ "${OS}-${ARCH}" = "linux-arm" ]; then
echo "Skipping running 'go vet'/gofmt/golint for arm builds"
exit 0
diff --git a/scripts/travis/build.sh b/scripts/travis/build.sh
index ddf54989cb..6a9eb7de7d 100755
--- a/scripts/travis/build.sh
+++ b/scripts/travis/build.sh
@@ -58,6 +58,11 @@ scripts/travis/before_build.sh
# Force re-evaluation of genesis files to see if source files changed w/o running make
touch gen/generate.go
+# Force re-generation of msgpack encoders/decoders with msgp. If this re-generated code
+# does not match the checked-in code, some structs may have been added or updated without
+# refreshing the generated codecs. The enlistment check below will error out, if so.
+make msgp
+
if [ "${OS}-${ARCH}" = "linux-arm" ]; then
# for arm, build just the basic distro
MAKE_DEBUG_OPTION=""
diff --git a/scripts/travis/external_build_printlog.sh b/scripts/travis/external_build_printlog.sh
index f42eb8291c..54d7a8665e 100755
--- a/scripts/travis/external_build_printlog.sh
+++ b/scripts/travis/external_build_printlog.sh
@@ -25,7 +25,15 @@ while [ $SECONDS -lt $end ]; do
aws s3 ls "${BUILD_LOG_PATH}"-"${LOG_SEQ}" ${NO_SIGN_REQUEST} 2> /dev/null > /dev/null
if [ "$?" = "0" ]; then
while true ; do
- LOG_CHUNK=$(aws s3 cp "${BUILD_LOG_PATH}"-"${LOG_SEQ}" - ${NO_SIGN_REQUEST} 2> /dev/null)
+ if [ "${NO_SIGN_REQUEST}" == "--no-sign-request" ]; then
+ URL="${BUILD_LOG_PATH}-${LOG_SEQ}"
+ URL="${URL#s3://}"
+ URL="${URL/\//.s3.amazonaws.com/}"
+ URL="https://${URL}"
+ LOG_CHUNK=$(curl --fail "${URL}" 2> /dev/null)
+ else
+ LOG_CHUNK=$(aws s3 cp "${BUILD_LOG_PATH}"-"${LOG_SEQ}" - ${NO_SIGN_REQUEST} 2> /dev/null)
+ fi
if [ "$?" = "0" ]; then
echo "${LOG_CHUNK}"
((LOG_SEQ++))
diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
index 50be3903fa..ec19b0a033 100755
--- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
+++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
@@ -56,6 +56,84 @@ proc ::AlgorandGoal::Abort { ERROR } {
exit 1
}
+# Start the node
+proc ::AlgorandGoal::StartNode { TEST_ALGO_DIR {SYSTEMD_MANAGED ""} } {
+ set ::GLOBAL_TEST_ALGO_DIR $TEST_ALGO_DIR
+ set timeout 15
+
+ if { [catch {
+ puts "node start with $TEST_ALGO_DIR"
+ if { $SYSTEMD_MANAGED eq "" } {
+ spawn goal node start -d $TEST_ALGO_DIR
+ expect {
+ timeout { close; ::AlgorandGoal::Abort "Did not recieve appropriate message during node start" }
+ "^Algorand node successfully started!*" {puts "Node started successfully"; close}
+ }
+ } else {
+ spawn goal node start -d $TEST_ALGO_DIR
+ expect {
+ timeout { close; ::AlgorandGoal::Abort "Did not recieve appropriate message during node start" }
+ "^This node is managed by systemd, you must run the following command to make your desired state change to your node:*" { puts "Goal showed correct error message for systemd" ; close}
+ }
+ }
+ } EXCEPTION] } {
+ puts "ERROR in StartNode: $EXCEPTION"
+ exit 1
+ }
+}
+
+# Stop the node
+proc ::AlgorandGoal::StopNode { TEST_ALGO_DIR {SYSTEMD_MANAGED ""} } {
+ set ::GLOBAL_TEST_ALGO_DIR $TEST_ALGO_DIR
+ set timeout 15
+
+ if { [catch {
+ puts "node stop with $TEST_ALGO_DIR"
+ if { $SYSTEMD_MANAGED eq "" } {
+ spawn goal node stop -d $TEST_ALGO_DIR
+ expect {
+ timeout { close; ::AlgorandGoal::Abort "Did not recieve appropriate message during node stop" }
+ "*The node was successfully stopped.*" {puts "Node stopped successfully"; close}
+ }
+ } else {
+ spawn goal node stop -d $TEST_ALGO_DIR
+ expect {
+ timeout { close; ::AlgorandGoal::Abort "Did not recieve appropriate message during node stop" }
+ "*This node is managed by systemd, you must run the following command to make your desired state change to your node:*" { puts "Goal showed correct error message for systemd" ; close}
+ }
+ }
+ } EXCEPTION] } {
+ puts "ERROR in StopNode: $EXCEPTION"
+ exit 1
+ }
+}
+
+# Restart the node
+proc ::AlgorandGoal::RestartNode { TEST_ALGO_DIR {SYSTEMD_MANAGED ""} } {
+ set ::GLOBAL_TEST_ALGO_DIR $TEST_ALGO_DIR
+ set timeout 30
+
+ if { [catch {
+ puts "node restart with $TEST_ALGO_DIR"
+ if { $SYSTEMD_MANAGED eq "" } {
+ spawn goal node restart -d $TEST_ALGO_DIR
+ expect {
+ timeout { close; ::AlgorandGoal::Abort "Did not recieve appropriate message during node restart" }
+ "^The node was successfully stopped.*Algorand node successfully started!*" {puts "Node restarted successfully"; close}
+ }
+ } else {
+ spawn goal node restart -d $TEST_ALGO_DIR
+ expect {
+ timeout { close; ::AlgorandGoal::Abort "Did not recieve appropriate message during node restart" }
+ "^This node is managed by systemd, you must run the following command to make your desired state change to your node:*" { puts "Goal showed correct error message for systemd" ; close}
+ }
+ }
+ } EXCEPTION] } {
+ puts "ERROR in RestartNode: $EXCEPTION"
+ exit 1
+ }
+}
+
# Start the network
proc ::AlgorandGoal::StartNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ALGO_DIR TEST_ROOT_DIR } {
set ::GLOBAL_TEST_ALGO_DIR $TEST_ALGO_DIR
diff --git a/test/e2e-go/cli/goal/expect/goalNodeSystemdTest.exp b/test/e2e-go/cli/goal/expect/goalNodeSystemdTest.exp
new file mode 100644
index 0000000000..6fe0e08527
--- /dev/null
+++ b/test/e2e-go/cli/goal/expect/goalNodeSystemdTest.exp
@@ -0,0 +1,31 @@
+#!/usr/bin/expect -f
+#exp_internal 1
+set err 0
+log_user 1
+
+if { [catch {
+
+ source goalExpectCommon.exp
+ set TEST_ALGO_DIR [lindex $argv 0]
+ set TEST_DATA_DIR [lindex $argv 1]
+
+ puts "TEST_ALGO_DIR: $TEST_ALGO_DIR"
+
+ spawn cp ../../../../testdata/configs/system-v0.json $TEST_ALGO_DIR/system.json
+
+ # Start node
+ ::AlgorandGoal::StartNode $TEST_ALGO_DIR True
+
+ # Restart node
+ ::AlgorandGoal::RestartNode $TEST_ALGO_DIR True
+
+ # Stop node
+ ::AlgorandGoal::StopNode $TEST_ALGO_DIR True
+
+ puts "Basic Goal Test Successful"
+
+ exit 0
+} EXCEPTION] } {
+ puts "ERROR in goalNodeStartSystemdTest: $EXCEPTION"
+ exit 1
+}
diff --git a/test/e2e-go/cli/goal/expect/goalNodeTest.exp b/test/e2e-go/cli/goal/expect/goalNodeTest.exp
new file mode 100644
index 0000000000..d06375f2cc
--- /dev/null
+++ b/test/e2e-go/cli/goal/expect/goalNodeTest.exp
@@ -0,0 +1,45 @@
+#!/usr/bin/expect -f
+#exp_internal 1
+set err 0
+log_user 1
+
+if { [catch {
+
+ source goalExpectCommon.exp
+ set TEST_ALGO_DIR [lindex $argv 0]
+ set TEST_DATA_DIR [lindex $argv 1]
+
+ puts "TEST_ALGO_DIR: $TEST_ALGO_DIR"
+ puts "TEST_DATA_DIR: $TEST_DATA_DIR"
+
+ set TIME_STAMP [clock seconds]
+
+ set TEST_ROOT_DIR $TEST_ALGO_DIR/root
+ set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/
+ set NETWORK_NAME test_net_expect_$TIME_STAMP
+ set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50Each.json"
+
+ # Create network
+ ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
+
+ exec sleep 5
+
+ # Start node
+ ::AlgorandGoal::StartNode $TEST_PRIMARY_NODE_DIR
+
+ # Restart node
+ ::AlgorandGoal::RestartNode $TEST_PRIMARY_NODE_DIR
+
+ # Stop node
+ ::AlgorandGoal::StopNode $TEST_PRIMARY_NODE_DIR
+
+ # Shutdown the network
+ ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ALGO_DIR $TEST_ROOT_DIR
+
+ puts "Basic Goal Test Successful"
+
+ exit 0
+} EXCEPTION] } {
+ puts "ERROR in goalNodeStartSystemdTest: $EXCEPTION"
+ exit 1
+}
diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go
index bc81235cd3..c2ad4df654 100644
--- a/test/e2e-go/features/catchup/basicCatchup_test.go
+++ b/test/e2e-go/features/catchup/basicCatchup_test.go
@@ -19,12 +19,13 @@ package rewards
import (
"os"
"path/filepath"
- "runtime"
"testing"
"time"
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
)
@@ -71,14 +72,27 @@ func TestBasicCatchup(t *testing.T) {
a.NoError(err)
}
+// TestCatchupOverGossip tests catchup across network versions
+// The current versions are the original v1 and the upgraded to v2.1
func TestCatchupOverGossip(t *testing.T) {
- if runtime.GOOS == "darwin" {
- t.Skip()
- }
+ t.Parallel()
+ // ledger node upgraded version, fetcher node upgraded version
+ runCatchupOverGossip(t, false, false)
+ // ledger node older version, fetcher node upgraded version
+ runCatchupOverGossip(t, true, false)
+ // ledger node upgraded older version, fetcher node older version
+ runCatchupOverGossip(t, false, true)
+ // ledger node older version, fetcher node older version
+ runCatchupOverGossip(t, true, true)
+}
+
+func runCatchupOverGossip(t *testing.T,
+ ledgerNodeDowngrade,
+ fetcherNodeDowngrade bool) {
+
if testing.Short() {
t.Skip()
}
- t.Parallel()
a := require.New(t)
// Overview of this test:
// Start a two-node network (Primary with 0% stake, Secondary with 100% stake)
@@ -89,7 +103,28 @@ func TestCatchupOverGossip(t *testing.T) {
// Give the second node (which starts up last) all the stake so that its proposal always has better credentials,
// and so that its proposal isn't dropped. Otherwise the test burns 17s to recover. We don't care about stake
// distribution for catchup so this is fine.
- fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes100Second.json"))
+ fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes100Second.json"))
+
+ if ledgerNodeDowngrade {
+ // Force the node to only support v1
+ dir, err := fixture.GetNodeDir("Node")
+ a.NoError(err)
+ cfg, err := config.LoadConfigFromDisk(dir)
+ a.NoError(err)
+ cfg.NetworkProtocolVersion = "1"
+ cfg.SaveToDisk(dir)
+ }
+
+ if fetcherNodeDowngrade {
+ // Force the node to only support v1
+ dir := fixture.PrimaryDataDir()
+ cfg, err := config.LoadConfigFromDisk(dir)
+ a.NoError(err)
+ cfg.NetworkProtocolVersion = "1"
+ cfg.SaveToDisk(dir)
+ }
+
+ fixture.Start()
defer fixture.Shutdown()
ncPrim, err := fixture.GetNodeController("Primary")
a.NoError(err)
@@ -141,6 +176,15 @@ func TestCatchupOverGossip(t *testing.T) {
}
}
+// consensusTestUnupgradedProtocol is a version of ConsensusCurrentVersion
+// that allows the control of the upgrade from consensusTestUnupgradedProtocol to
+// consensusTestUnupgradedProtocol
+const consensusTestUnupgradedProtocol = protocol.ConsensusVersion("test-unupgraded-protocol")
+
+// consensusTestUnupgradedToProtocol is a version of ConsensusCurrentVersion
+// It is used as an upgrade from consensusTestUnupgradedProtocol
+const consensusTestUnupgradedToProtocol = protocol.ConsensusVersion("test-unupgradedto-protocol")
+
func TestStoppedCatchupOnUnsupported(t *testing.T) {
if testing.Short() {
t.Skip()
@@ -148,8 +192,30 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) {
t.Parallel()
a := require.New(t)
- defer os.Unsetenv("ALGORAND_TEST_UNUPGRADEDPROTOCOL_DELETE_UPGRADE")
- os.Setenv("ALGORAND_TEST_UNUPGRADEDPROTOCOL_DELETE_UPGRADE", "0")
+ consensus := make(config.ConsensusProtocols)
+ // The following two protocols: testUnupgradedProtocol and testUnupgradedToProtocol
+ // are used to test the case when some nodes in the network do not make progress.
+
+ // testUnupgradedToProtocol is derived from ConsensusCurrentVersion and upgraded
+ // from testUnupgradedProtocol.
+ testUnupgradedToProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
+ testUnupgradedToProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ consensus[consensusTestUnupgradedToProtocol] = testUnupgradedToProtocol
+
+ // testUnupgradedProtocol is used to control the upgrade of a node. This is used
+ // to construct and run a network where some node is upgraded, and some other
+ // node is not upgraded.
+ // testUnupgradedProtocol is derived from ConsensusCurrentVersion and upgrades to
+ // testUnupgradedToProtocol.
+ testUnupgradedProtocol := config.Consensus[protocol.ConsensusCurrentVersion]
+ testUnupgradedProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ testUnupgradedProtocol.UpgradeVoteRounds = 3
+ testUnupgradedProtocol.UpgradeThreshold = 2
+ testUnupgradedProtocol.DefaultUpgradeWaitRounds = 3
+
+ testUnupgradedProtocol.ApprovedUpgrades[consensusTestUnupgradedToProtocol] = 0
+ consensus[consensusTestUnupgradedProtocol] = testUnupgradedProtocol
// Overview of this test:
// Start a two-node network (primary has 0%, secondary has 100%)
@@ -157,6 +223,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) {
// Spin up a third node and see if it catches up
var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(consensus)
// Give the second node (which starts up last) all the stake so that its proposal always has better credentials,
// and so that its proposal isn't dropped. Otherwise the test burns 17s to recover. We don't care about stake
// distribution for catchup so this is fine.
@@ -173,13 +240,14 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) {
err = fixture.ClientWaitForRoundWithTimeout(fixture.GetAlgodClientForController(nc), waitForRound)
a.NoError(err)
- os.Setenv("ALGORAND_TEST_UNUPGRADEDPROTOCOL_DELETE_UPGRADE", "1")
-
// Now spin up third node
cloneDataDir := filepath.Join(fixture.PrimaryDataDir(), "../clone")
cloneLedger := false
err = fixture.NC.Clone(cloneDataDir, cloneLedger)
a.NoError(err)
+
+ delete(consensus, consensusTestUnupgradedToProtocol)
+ fixture.GetNodeControllerForDataDir(cloneDataDir).SetConsensus(consensus)
cloneClient, err := fixture.StartNode(cloneDataDir)
a.NoError(err)
defer shutdownClonedNode(cloneDataDir, &fixture, t)
diff --git a/test/e2e-go/features/participation/participationRewards_test.go b/test/e2e-go/features/participation/participationRewards_test.go
index e2c48cb631..e22649aec5 100644
--- a/test/e2e-go/features/participation/participationRewards_test.go
+++ b/test/e2e-go/features/participation/participationRewards_test.go
@@ -301,9 +301,25 @@ func TestRewardRateRecalculation(t *testing.T) {
t.Parallel()
r := require.New(t)
+ // consensusTestRapidRewardRecalculation is a version of ConsensusCurrentVersion
+ // that decreases the RewardsRateRefreshInterval greatly.
+ const consensusTestRapidRewardRecalculation = protocol.ConsensusVersion("test-fast-reward-recalculation")
+
+ rapidRecalcParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ rapidRecalcParams.RewardsRateRefreshInterval = 10
+ //because rapidRecalcParams is based on ConsensusCurrentVersion,
+ //it *shouldn't* have any ApprovedUpgrades
+ //but explicitly mark "no approved upgrades" just in case
+ rapidRecalcParams.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(config.ConsensusProtocols{
+ consensusTestRapidRewardRecalculation: rapidRecalcParams,
+ })
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each_RapidRewardRecalculation.json"))
defer fixture.Shutdown()
+ consensus, err := fixture.NC.GetConsensus()
+ r.NoError(err)
client := fixture.LibGoalClient
r.NoError(fixture.WaitForRoundWithTimeout(uint64(5)))
@@ -321,8 +337,9 @@ func TestRewardRateRecalculation(t *testing.T) {
blk, err := client.Block(curStatus.LastRound)
r.NoError(err)
- consensus := config.Consensus[protocol.ConsensusVersion(blk.CurrentProtocol)]
- rewardRecalcRound := consensus.RewardsRateRefreshInterval
+ r.Equal(protocol.ConsensusVersion(blk.CurrentProtocol), consensusTestRapidRewardRecalculation)
+ consensusParams := consensus[protocol.ConsensusVersion(blk.CurrentProtocol)]
+ rewardRecalcRound := consensusParams.RewardsRateRefreshInterval
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound - 1))
balanceOfRewardsPool, roundQueried := fixture.GetBalanceAndRound(rewardsAccount)
if roundQueried != rewardRecalcRound-1 {
@@ -333,18 +350,18 @@ func TestRewardRateRecalculation(t *testing.T) {
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound))
blk, err = client.Block(rewardRecalcRound)
r.NoError(err)
- if !consensus.PendingResidueRewards {
+ if !consensusParams.PendingResidueRewards {
lastRoundBeforeRewardRecals.RewardsResidue = 0
}
- r.Equalf((balanceOfRewardsPool-minBal-lastRoundBeforeRewardRecals.RewardsResidue)/consensus.RewardsRateRefreshInterval, blk.RewardsRate, "Mismatching (%d-%d-%d)/%d != %d @ round %d", balanceOfRewardsPool, minBal, lastRoundBeforeRewardRecals.RewardsResidue, consensus.RewardsRateRefreshInterval, blk.RewardsRate, lastRoundBeforeRewardRecals.Round)
+ r.Equalf((balanceOfRewardsPool-minBal-lastRoundBeforeRewardRecals.RewardsResidue)/consensusParams.RewardsRateRefreshInterval, blk.RewardsRate, "Mismatching (%d-%d-%d)/%d != %d @ round %d", balanceOfRewardsPool, minBal, lastRoundBeforeRewardRecals.RewardsResidue, consensusParams.RewardsRateRefreshInterval, blk.RewardsRate, lastRoundBeforeRewardRecals.Round)
curStatus, err = client.Status()
r.NoError(err)
deadline = curStatus.LastRound + uint64(5)
fixture.SendMoneyAndWait(deadline, amountToSend, minFee, richAccount.Address, rewardsAccount)
- rewardRecalcRound = rewardRecalcRound + consensus.RewardsRateRefreshInterval
+ rewardRecalcRound = rewardRecalcRound + consensusParams.RewardsRateRefreshInterval
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound - 1))
balanceOfRewardsPool, roundQueried = fixture.GetBalanceAndRound(rewardsAccount)
@@ -353,14 +370,14 @@ func TestRewardRateRecalculation(t *testing.T) {
}
lastRoundBeforeRewardRecals, err = client.Block(rewardRecalcRound - 1)
r.NoError(err)
- consensus = config.Consensus[protocol.ConsensusVersion(lastRoundBeforeRewardRecals.CurrentProtocol)]
+ consensusParams = consensus[protocol.ConsensusVersion(lastRoundBeforeRewardRecals.CurrentProtocol)]
r.NoError(fixture.WaitForRoundWithTimeout(rewardRecalcRound))
blk, err = client.Block(rewardRecalcRound)
r.NoError(err)
- if !consensus.PendingResidueRewards {
+ if !consensusParams.PendingResidueRewards {
lastRoundBeforeRewardRecals.RewardsResidue = 0
}
- r.Equal((balanceOfRewardsPool-minBal-lastRoundBeforeRewardRecals.RewardsResidue)/consensus.RewardsRateRefreshInterval, blk.RewardsRate)
+ r.Equal((balanceOfRewardsPool-minBal-lastRoundBeforeRewardRecals.RewardsResidue)/consensusParams.RewardsRateRefreshInterval, blk.RewardsRate)
// if the network keeps progressing without error,
// this shows the network is healthy and that we didn't panic
finalRound := rewardRecalcRound + uint64(5)
diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go
index 2ff90965e0..715fa73062 100644
--- a/test/e2e-go/features/transactions/asset_test.go
+++ b/test/e2e-go/features/transactions/asset_test.go
@@ -19,7 +19,6 @@ package transactions
import (
"fmt"
"path/filepath"
- "runtime"
"strings"
"testing"
@@ -420,9 +419,6 @@ func TestAssetConfig(t *testing.T) {
}
func TestAssetInformation(t *testing.T) {
- if runtime.GOOS == "darwin" {
- t.Skip()
- }
t.Parallel()
a := require.New(t)
@@ -640,9 +636,6 @@ func TestAssetGroupCreateSendDestroy(t *testing.T) {
}
func TestAssetSend(t *testing.T) {
- if runtime.GOOS == "darwin" {
- t.Skip()
- }
t.Parallel()
a := require.New(t)
@@ -890,7 +883,7 @@ func TestAssetSend(t *testing.T) {
}
func TestAssetCreateWaitRestartDelete(t *testing.T) {
- a, fixture, client, account0 := setupTestAndNetwork(t, "")
+ a, fixture, client, account0 := setupTestAndNetwork(t, "", nil)
defer fixture.Shutdown()
// There should be no assets to start with
@@ -954,7 +947,25 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
if testing.Short() {
t.Skip()
}
- a, fixture, client, account0 := setupTestAndNetwork(t, "TwoNodes50EachTestShorterLookback.json")
+ configurableConsensus := make(config.ConsensusProtocols)
+
+ consensusVersion := protocol.ConsensusVersion("test-shorter-lookback")
+
+ // Setting the testShorterLookback parameters derived from ConsensusCurrentVersion
+ // Will result in MaxBalLookback = 32
+ // Used to run tests faster where past MaxBalLookback values are checked
+ consensusParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ consensusParams.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback
+ // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md
+ consensusParams.SeedLookback = 2
+ consensusParams.SeedRefreshInterval = 8
+ consensusParams.MaxBalLookback = 2 * consensusParams.SeedLookback * consensusParams.SeedRefreshInterval // 32
+
+ configurableConsensus[consensusVersion] = consensusParams
+
+ a, fixture, client, account0 := setupTestAndNetwork(t, "TwoNodes50EachTestShorterLookback.json", configurableConsensus)
defer fixture.Shutdown()
// There should be no assets to start with
@@ -1018,7 +1029,7 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
/** Helper functions **/
// Setup the test and the network
-func setupTestAndNetwork(t *testing.T, networkTemplate string) (
+func setupTestAndNetwork(t *testing.T, networkTemplate string, consensus config.ConsensusProtocols) (
Assertions *require.Assertions, Fixture *fixtures.RestClientFixture, Client *libgoal.Client, Account0 string) {
t.Parallel()
@@ -1028,6 +1039,9 @@ func setupTestAndNetwork(t *testing.T, networkTemplate string) (
networkTemplate = "TwoNodes50Each.json"
}
var fixture fixtures.RestClientFixture
+ if consensus != nil {
+ fixture.SetConsensus(consensus)
+ }
fixture.Setup(t, filepath.Join("nettemplates", networkTemplate))
accountList, err := fixture.GetWalletsSortedByBalance()
asser.NoError(err)
diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go
index 33c313caea..2f7e59d5bf 100644
--- a/test/e2e-go/features/transactions/sendReceive_test.go
+++ b/test/e2e-go/features/transactions/sendReceive_test.go
@@ -19,7 +19,6 @@ package transactions
import (
"math/rand"
"path/filepath"
- "runtime"
"testing"
"github.com/stretchr/testify/require"
@@ -41,7 +40,7 @@ func GenerateRandomBytes(n int) []byte {
// this test checks that two accounts' balances stay up to date
// as they send each other money many times
func TestAccountsCanSendMoney(t *testing.T) {
- if runtime.GOOS == "darwin" {
+ if testing.Short() {
t.Skip()
}
if testing.Short() {
diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go
index 248bd37717..76d8688bd9 100644
--- a/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go
+++ b/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go
@@ -295,7 +295,7 @@ func TestSignTransaction(t *testing.T) {
// Request a signature
req1 := kmdapi.APIV1POSTTransactionSignRequest{
WalletHandleToken: walletHandleToken,
- Transaction: protocol.Encode(tx),
+ Transaction: protocol.Encode(&tx),
WalletPassword: f.WalletPassword,
}
resp1 := kmdapi.APIV1POSTTransactionSignResponse{}
@@ -399,7 +399,7 @@ func BenchmarkSignTransaction(b *testing.B) {
// Request a signature
req1 := kmdapi.APIV1POSTTransactionSignRequest{
WalletHandleToken: walletHandleToken,
- Transaction: protocol.Encode(tx),
+ Transaction: protocol.Encode(&tx),
WalletPassword: f.WalletPassword,
}
resp1 := kmdapi.APIV1POSTTransactionSignResponse{}
diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go
index 299e61f9b4..4c2e70aa13 100644
--- a/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go
+++ b/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go
@@ -190,7 +190,7 @@ func TestMultisigSign(t *testing.T) {
// Try to sign
req2 := kmdapi.APIV1POSTMultisigTransactionSignRequest{
WalletHandleToken: walletHandleToken,
- Transaction: protocol.Encode(tx),
+ Transaction: protocol.Encode(&tx),
PublicKey: pk1,
PartialMsig: crypto.MultisigSig{},
WalletPassword: f.WalletPassword,
@@ -206,7 +206,7 @@ func TestMultisigSign(t *testing.T) {
// Try to add another signature
req3 := kmdapi.APIV1POSTMultisigTransactionSignRequest{
WalletHandleToken: walletHandleToken,
- Transaction: protocol.Encode(tx),
+ Transaction: protocol.Encode(&tx),
PublicKey: pk2,
PartialMsig: msig,
WalletPassword: f.WalletPassword,
diff --git a/test/e2e-go/perf/basic_test.go b/test/e2e-go/perf/basic_test.go
index d027329c10..f511ddae92 100644
--- a/test/e2e-go/perf/basic_test.go
+++ b/test/e2e-go/perf/basic_test.go
@@ -85,7 +85,20 @@ func BenchmarkPaymentsThroughput(b *testing.B) {
func doBenchTemplate(b *testing.B, template string, moneynode string) {
fmt.Printf("Starting to benchmark template %s\n", template)
+ // consensusTestBigBlocks is a version of ConsensusV0 used for testing
+ // with big block size (large MaxTxnBytesPerBlock).
+ // at the time versioning was introduced.
+ const consensusTestBigBlocks = protocol.ConsensusVersion("test-big-blocks")
+
var fixture fixtures.RestClientFixture
+
+ testBigBlocks := config.Consensus[protocol.ConsensusCurrentVersion]
+ testBigBlocks.MaxTxnBytesPerBlock = 100000000
+ testBigBlocks.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+
+ fixture.SetConsensus(config.ConsensusProtocols{
+ consensusTestBigBlocks: testBigBlocks,
+ })
fixture.Setup(b, filepath.Join("nettemplates", template))
defer fixture.Shutdown()
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index dfd875e6a2..9938b31900 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -24,7 +24,6 @@ import (
"math/rand"
"os"
"path/filepath"
- "runtime"
"strings"
"testing"
"time"
@@ -196,9 +195,6 @@ func TestClientCanGetStatusAfterBlock(t *testing.T) {
}
func TestTransactionsByAddr(t *testing.T) {
- if runtime.GOOS == "darwin" {
- t.Skip()
- }
var localFixture fixtures.RestClientFixture
localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer localFixture.Shutdown()
diff --git a/test/e2e-go/upgrades/send_receive_upgrade_test.go b/test/e2e-go/upgrades/send_receive_upgrade_test.go
index 5367ffaf42..e748a55189 100644
--- a/test/e2e-go/upgrades/send_receive_upgrade_test.go
+++ b/test/e2e-go/upgrades/send_receive_upgrade_test.go
@@ -26,6 +26,8 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
)
@@ -102,11 +104,43 @@ func TestAccountsCanSendMoneyAcrossUpgradeV15toV16(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV15Upgrade.json"))
}
+// ConsensusTestFastUpgrade is meant for testing of protocol upgrades:
+// during testing, it is equivalent to another protocol with the exception
+// of the upgrade parameters, which allow for upgrades to take place after
+// only a few rounds.
+func consensusTestFastUpgrade(proto protocol.ConsensusVersion) protocol.ConsensusVersion {
+ return "test-fast-upgrade-" + proto
+}
+
+func generateFastUpgradeConsensus() (fastUpgradeProtocols config.ConsensusProtocols) {
+ fastUpgradeProtocols = make(config.ConsensusProtocols)
+
+ for proto, params := range config.Consensus {
+ fastParams := params
+ fastParams.UpgradeVoteRounds = 5
+ fastParams.UpgradeThreshold = 3
+ fastParams.DefaultUpgradeWaitRounds = 5
+ fastParams.MaxVersionStringLen += len(consensusTestFastUpgrade(""))
+ fastParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64)
+
+ for ver := range params.ApprovedUpgrades {
+ fastParams.ApprovedUpgrades[consensusTestFastUpgrade(ver)] = 0
+ }
+
+ fastUpgradeProtocols[consensusTestFastUpgrade(proto)] = fastParams
+ }
+ return
+}
+
func testAccountsCanSendMoneyAcrossUpgrade(t *testing.T, templatePath string) {
t.Parallel()
a := require.New(t)
os.Setenv("ALGOSMALLLAMBDAMSEC", "500")
+
+ consensus := generateFastUpgradeConsensus()
+
var fixture fixtures.RestClientFixture
+ fixture.SetConsensus(consensus)
fixture.Setup(t, templatePath)
defer fixture.Shutdown()
c := fixture.LibGoalClient
diff --git a/test/framework/fixtures/auctionFixture.go b/test/framework/fixtures/auctionFixture.go
index 67665f0729..f4ebe9296a 100644
--- a/test/framework/fixtures/auctionFixture.go
+++ b/test/framework/fixtures/auctionFixture.go
@@ -537,7 +537,7 @@ func (f *AuctionFixture) readAndDecode(filePath string, obj interface{}) {
return
}
- err = protocol.Decode(data, obj)
+ err = protocol.DecodeReflect(data, obj)
if err != nil {
f.t.Errorf("Decoding from %s: %v", filePath, err)
return
@@ -842,7 +842,7 @@ func (f *AuctionFixture) signBid(walletHandle []byte, password string, account s
return
}
- signedBidNote = client.BytesBase64(protocol.Encode(auction.NoteField{
+ signedBidNote = client.BytesBase64(protocol.Encode(&auction.NoteField{
Type: auction.NoteBid,
SignedBid: auction.SignedBid{
Bid: bid,
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index ffc11f12a7..b8ea76404d 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -53,6 +53,13 @@ type LibGoalFixture struct {
t TestingT
tMu deadlock.RWMutex
clientPartKeys map[string][]account.Participation
+ consensus config.ConsensusProtocols
+}
+
+// SetConsensus applies a new consensus settings which would get deployed before
+// any of the nodes starts
+func (f *RestClientFixture) SetConsensus(consensus config.ConsensusProtocols) {
+ f.consensus = consensus
}
// Setup is called to initialize the test fixture for the test(s)
@@ -86,7 +93,7 @@ func (f *LibGoalFixture) setup(test TestingT, testName string, templateFile stri
os.RemoveAll(f.rootDir)
templateFile = filepath.Join(f.testDataDir, templateFile)
importKeys := false // Don't automatically import root keys when creating folders, we'll import on-demand
- network, err := netdeploy.CreateNetworkFromTemplate("test", f.rootDir, templateFile, f.binDir, importKeys, f.nodeExitWithError)
+ network, err := netdeploy.CreateNetworkFromTemplate("test", f.rootDir, templateFile, f.binDir, importKeys, f.nodeExitWithError, f.consensus)
f.failOnError(err, "CreateNetworkFromTemplate failed: %v")
f.network = network
@@ -448,8 +455,15 @@ func (f *LibGoalFixture) ConsensusParams(round uint64) (consensus config.Consens
if err != nil {
return
}
-
- return config.Consensus[protocol.ConsensusVersion(block.CurrentProtocol)], nil
+ version := protocol.ConsensusVersion(block.CurrentProtocol)
+ if f.consensus != nil {
+ consensus, has := f.consensus[version]
+ if has {
+ return consensus, nil
+ }
+ }
+ consensus = config.Consensus[version]
+ return
}
// CurrentMinFeeAndBalance returns the MinTxnFee and MinBalance for the currently active protocol
diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go
index 535275d086..b9b384bfde 100644
--- a/test/framework/fixtures/restClientFixture.go
+++ b/test/framework/fixtures/restClientFixture.go
@@ -48,7 +48,6 @@ func (f *RestClientFixture) Setup(t TestingT, templateFile string) {
// but does not start the network before returning. Call NC.Start() to start later.
func (f *RestClientFixture) SetupNoStart(t TestingT, templateFile string) {
f.LibGoalFixture.SetupNoStart(t, templateFile)
- f.AlgodClient = f.GetAlgodClientForController(f.NC)
}
// SetupShared is called to initialize the test fixture that will be used for multiple tests
diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py
index 72da5ba8c8..aef12cfe39 100755
--- a/test/scripts/e2e_client_runner.py
+++ b/test/scripts/e2e_client_runner.py
@@ -316,25 +316,25 @@ def xrun(cmd, *args, **kwargs):
cmdr = repr(cmd)
logger.error('subprocess timed out {}'.format(cmdr), exc_info=True)
if p.stdout:
- sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, p.stdout))
+ sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, p.stdout.read()))
if p.stderr:
- sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, p.stderr))
+ sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, p.stderr.read()))
raise
except Exception as e:
cmdr = repr(cmd)
logger.error('subprocess exception {}'.format(cmdr), exc_info=True)
if p.stdout:
- sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, p.stdout))
+ sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, p.stdout.read()))
if p.stderr:
- sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, p.stderr))
+ sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, p.stderr.read()))
raise
if p.returncode != 0:
cmdr = repr(cmd)
logger.error('cmd failed {}'.format(cmdr))
if p.stdout:
- sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, p.stdout))
+ sys.stderr.write('output from {}:\n{}\n\n'.format(cmdr, p.stdout.read()))
if p.stderr:
- sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, p.stderr))
+ sys.stderr.write('stderr from {}:\n{}\n\n'.format(cmdr, p.stderr.read()))
raise Exception('error: cmd failed: {}'.format(cmdr))
_logging_format = '%(asctime)s :%(lineno)d %(message)s'
diff --git a/test/testdata/configs/config-v5.json b/test/testdata/configs/config-v5.json
index 984e9aa3c5..fee988caf3 100644
--- a/test/testdata/configs/config-v5.json
+++ b/test/testdata/configs/config-v5.json
@@ -10,6 +10,7 @@
"ConnectionsRateLimitingWindowSeconds": 1,
"ConnectionsRateLimitingCount": 60,
"DeadlockDetection": 0,
+ "DisableOutgoingConnectionThrottling": false,
"DNSBootstrapID": ".algorand.network",
"EnableAgreementReporting": false,
"EnableIncomingMessageFilter": false,
@@ -31,6 +32,7 @@
"NodeExporterPath": "./node_exporter",
"OutgoingMessageFilterBucketCount": 3,
"OutgoingMessageFilterBucketSize": 128,
+ "PeerConnectionsUpdateInterval": 3600,
"PriorityPeers": {},
"ReconnectTime": 60000000000,
"ReservedFDs": 256,
@@ -38,12 +40,11 @@
"RestWriteTimeoutSeconds": 120,
"RunHosted": false,
"SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
"TelemetryToLog": true,
"TxPoolExponentialIncreaseFactor": 2,
"TxPoolSize": 15000,
"TxSyncIntervalSeconds": 60,
"TxSyncTimeoutSeconds": 30,
- "TxSyncServeResponseSize": 1000000,
- "SuggestedFeeSlidingWindowSize": 50,
- "PeerConnectionsUpdateInterval": 3600
+ "TxSyncServeResponseSize": 1000000
}
diff --git a/test/testdata/configs/config-v6.json b/test/testdata/configs/config-v6.json
new file mode 100644
index 0000000000..e56e6f1388
--- /dev/null
+++ b/test/testdata/configs/config-v6.json
@@ -0,0 +1,51 @@
+{
+ "Version": 6,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 1073741824,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "ConnectionsRateLimitingCount": 60,
+ "DeadlockDetection": 0,
+ "DNSBootstrapID": ".algorand.network",
+ "EnableAgreementReporting": false,
+ "EnableIncomingMessageFilter": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnableRequestLogger": false,
+ "EnableTopAccountsReporting": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 10000,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxConnectionsPerIP": 30,
+ "NetAddress": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "PriorityPeers": {},
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "TelemetryToLog": true,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 15000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncTimeoutSeconds": 30,
+ "TxSyncServeResponseSize": 1000000,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "PeerConnectionsUpdateInterval": 3600,
+ "DNSSecurityFlags": 1,
+ "EnablePingHandler": true
+}
diff --git a/test/testdata/configs/system-v0.json b/test/testdata/configs/system-v0.json
new file mode 100644
index 0000000000..7828bb4ee5
--- /dev/null
+++ b/test/testdata/configs/system-v0.json
@@ -0,0 +1,4 @@
+{
+ "systemd_managed": true,
+ "shared_server": true
+}
diff --git a/test/testdata/deployednettemplates/recipes/scenario3/genesis.json b/test/testdata/deployednettemplates/recipes/scenario3/genesis.json
index fe07d441d7..1c489157d2 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3/genesis.json
+++ b/test/testdata/deployednettemplates/recipes/scenario3/genesis.json
@@ -1,7 +1,7 @@
{
"NetworkName": "",
"VersionModifier": "",
- "ConsensusProtocol": "future",
+ "ConsensusProtocol": "",
"FirstPartKeyRound": 0,
"LastPartKeyRound": 3000000,
"PartKeyDilution": 0,
diff --git a/test/testdata/deployednettemplates/recipes/scenario3/net.json b/test/testdata/deployednettemplates/recipes/scenario3/net.json
index 09ac1a84fc..c2bc2b2dad 100644
--- a/test/testdata/deployednettemplates/recipes/scenario3/net.json
+++ b/test/testdata/deployednettemplates/recipes/scenario3/net.json
@@ -15,7 +15,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -34,7 +34,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -53,7 +53,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -72,7 +72,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -91,7 +91,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -110,7 +110,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -129,7 +129,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -148,7 +148,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -167,7 +167,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -186,7 +186,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -205,7 +205,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -224,7 +224,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -243,7 +243,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -262,7 +262,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -281,7 +281,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -300,7 +300,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -319,7 +319,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -338,7 +338,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -357,7 +357,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -376,7 +376,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -434,7 +434,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node101",
@@ -487,7 +487,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node201",
@@ -540,7 +540,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node301",
@@ -593,7 +593,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node401",
@@ -646,7 +646,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node501",
@@ -699,7 +699,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node601",
@@ -752,7 +752,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node701",
@@ -805,7 +805,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node801",
@@ -858,7 +858,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node901",
@@ -911,7 +911,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -969,7 +969,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node102",
@@ -1022,7 +1022,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node202",
@@ -1075,7 +1075,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node302",
@@ -1128,7 +1128,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node402",
@@ -1181,7 +1181,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node502",
@@ -1234,7 +1234,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node602",
@@ -1287,7 +1287,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node702",
@@ -1340,7 +1340,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node802",
@@ -1393,7 +1393,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node902",
@@ -1446,7 +1446,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -1504,7 +1504,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node103",
@@ -1557,7 +1557,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node203",
@@ -1610,7 +1610,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node303",
@@ -1663,7 +1663,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node403",
@@ -1716,7 +1716,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node503",
@@ -1769,7 +1769,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node603",
@@ -1822,7 +1822,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node703",
@@ -1875,7 +1875,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node803",
@@ -1928,7 +1928,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node903",
@@ -1981,7 +1981,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -2039,7 +2039,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node104",
@@ -2092,7 +2092,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node204",
@@ -2145,7 +2145,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node304",
@@ -2198,7 +2198,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node404",
@@ -2251,7 +2251,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node504",
@@ -2304,7 +2304,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node604",
@@ -2357,7 +2357,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node704",
@@ -2410,7 +2410,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node804",
@@ -2463,7 +2463,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node904",
@@ -2516,7 +2516,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -2574,7 +2574,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node105",
@@ -2627,7 +2627,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node205",
@@ -2680,7 +2680,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node305",
@@ -2733,7 +2733,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node405",
@@ -2786,7 +2786,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node505",
@@ -2839,7 +2839,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node605",
@@ -2892,7 +2892,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node705",
@@ -2945,7 +2945,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node805",
@@ -2998,7 +2998,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node905",
@@ -3051,7 +3051,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -3109,7 +3109,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node106",
@@ -3162,7 +3162,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node206",
@@ -3215,7 +3215,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node306",
@@ -3268,7 +3268,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node406",
@@ -3321,7 +3321,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node506",
@@ -3374,7 +3374,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node606",
@@ -3427,7 +3427,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node706",
@@ -3480,7 +3480,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node806",
@@ -3533,7 +3533,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node906",
@@ -3586,7 +3586,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -3644,7 +3644,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node107",
@@ -3697,7 +3697,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": true,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node207",
@@ -3750,7 +3750,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node307",
@@ -3803,7 +3803,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node407",
@@ -3856,7 +3856,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node507",
@@ -3909,7 +3909,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node607",
@@ -3962,7 +3962,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node707",
@@ -4015,7 +4015,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node807",
@@ -4068,7 +4068,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node907",
@@ -4121,7 +4121,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -4179,7 +4179,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node108",
@@ -4232,7 +4232,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node208",
@@ -4285,7 +4285,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node308",
@@ -4338,7 +4338,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node408",
@@ -4391,7 +4391,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node508",
@@ -4444,7 +4444,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node608",
@@ -4497,7 +4497,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node708",
@@ -4550,7 +4550,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node808",
@@ -4603,7 +4603,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node908",
@@ -4656,7 +4656,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -4714,7 +4714,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node109",
@@ -4767,7 +4767,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node209",
@@ -4820,7 +4820,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node309",
@@ -4873,7 +4873,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node409",
@@ -4926,7 +4926,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node509",
@@ -4979,7 +4979,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node609",
@@ -5032,7 +5032,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node709",
@@ -5085,7 +5085,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node809",
@@ -5138,7 +5138,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node909",
@@ -5191,7 +5191,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
}
]
},
@@ -5249,7 +5249,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node110",
@@ -5302,7 +5302,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node210",
@@ -5355,7 +5355,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node310",
@@ -5408,7 +5408,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node410",
@@ -5461,7 +5461,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node510",
@@ -5514,7 +5514,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node610",
@@ -5567,7 +5567,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node710",
@@ -5620,7 +5620,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node810",
@@ -5673,7 +5673,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }"
},
{
"Name": "node910",
@@ -5726,7 +5726,7 @@
"MetricsURI": "{{MetricsURI}}",
"EnableService": false,
"EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4 }"
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \"