diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md
index bab0d9df73..220da69148 100644
--- a/.github/ISSUE_TEMPLATE/question.md
+++ b/.github/ISSUE_TEMPLATE/question.md
@@ -4,14 +4,14 @@ about: 'General questions related to the algorand platform.'
title: ''
labels: 'question'
---
-
+Additional Developer information is available here: https://developer.algorand.org/
+
+NOTE: If this issue relates to security, please use the vulnerability disclosure form here:
+https://www.algorand.com/resources/blog/security
diff --git a/.travis.yml b/.travis.yml
index af5f20621b..66d1cbc09b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,4 +1,4 @@
-dist: xenial
+dist: bionic
go:
- "1.12"
go_import_path: github.com/algorand/go-algorand
diff --git a/Makefile b/Makefile
index 141a378eb6..d8fbe60842 100644
--- a/Makefile
+++ b/Makefile
@@ -157,12 +157,17 @@ buildsrc: $(SRCPATH)/crypto/lib/libsodium.a node_exporter NONGO_BIN deps $(ALGOD
cd $(SRCPATH) && \
go vet $(UNIT_TEST_SOURCES) $(E2E_TEST_SOURCES)
+SOURCES_RACE := github.com/algorand/go-algorand/cmd/kmd
+
## Build binaries with the race detector enabled in them.
## This allows us to run e2e tests with race detection.
+## We overwrite bin-race/kmd with a non -race version due to
+## the incredible performance impact of -race on Scrypt.
build-race: build
@mkdir -p $(GOPATH)/bin-race
cd $(SRCPATH) && \
- GOBIN=$(GOPATH)/bin-race go install $(GOTAGS) -ldflags="$(GOLDFLAGS)" $(SOURCES)
+ GOBIN=$(GOPATH)/bin-race go install $(GOTAGS) -race -ldflags="$(GOLDFLAGS)" $(SOURCES) && \
+ GOBIN=$(GOPATH)/bin-race go install $(GOTAGS) -ldflags="$(GOLDFLAGS)" $(SOURCES_RACE)
NONGO_BIN_FILES=$(GOPATH)/bin/find-nodes.sh $(GOPATH)/bin/update.sh $(GOPATH)/bin/updatekey.json $(GOPATH)/bin/COPYING
diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go
index ee86510a28..9e71159e61 100644
--- a/agreement/cryptoVerifier_test.go
+++ b/agreement/cryptoVerifier_test.go
@@ -70,15 +70,7 @@ func makeUnauthenticatedVote(l Ledger, sender basics.Address, selection *crypto.
m, _ := membership(l, rv.Sender, rv.Round, rv.Period, rv.Step)
cred := committee.MakeCredential(&selection.SK, m.Selector)
ephID := basics.OneTimeIDForRound(rv.Round, voting.KeyDilution(config.Consensus[protocol.ConsensusCurrentVersion]))
- var sig crypto.OneTimeSignature
-
- proto, err := l.ConsensusParams(ParamsRound(rv.Round))
- // If we can't figure out the protocol params, the ledger has moved forward
- // well ahead of rv.Round, so our vote is irrelevant. Generate an empty
- // signature in that case.
- if err == nil {
- sig = voting.Sign(ephID, proto.FineGrainedEphemeralKeys, rv)
- }
+ sig := voting.Sign(ephID, rv)
return unauthenticatedVote{
R: rv,
diff --git a/agreement/fuzzer/ledger_test.go b/agreement/fuzzer/ledger_test.go
index e7af218c89..c7d6b2de1a 100644
--- a/agreement/fuzzer/ledger_test.go
+++ b/agreement/fuzzer/ledger_test.go
@@ -334,11 +334,11 @@ func (l *testLedger) EnsureDigest(c agreement.Certificate, quit chan struct{}, v
}
func (l *testLedger) ConsensusParams(r basics.Round) (config.ConsensusParams, error) {
- return config.Consensus[protocol.ConsensusV2], nil
+ return config.Consensus[protocol.ConsensusV7], nil
}
func (l *testLedger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, error) {
- return protocol.ConsensusV2, nil
+ return protocol.ConsensusV7, nil
}
func (l *testLedger) TryEnsuringDigest() bool {
diff --git a/agreement/message_test.go b/agreement/message_test.go
index eb5bd07683..f64791bab1 100644
--- a/agreement/message_test.go
+++ b/agreement/message_test.go
@@ -63,7 +63,7 @@ func BenchmarkVoteDecoding(b *testing.B) {
Cred: committee.UnauthenticatedCredential{
Proof: vrfProof,
},
- Sig: oneTimeSecrets.Sign(id, false, proposal),
+ Sig: oneTimeSecrets.Sign(id, proposal),
}
msgBytes := protocol.Encode(&uv)
diff --git a/agreement/proposal.go b/agreement/proposal.go
index 8781ad6510..c4c7d1c866 100644
--- a/agreement/proposal.go
+++ b/agreement/proposal.go
@@ -132,110 +132,42 @@ func deriveNewSeed(address basics.Address, vrf *crypto.VRFSecrets, rnd round, pe
err = fmt.Errorf("failed to obtain consensus parameters in round %v: %v", ParamsRound(rnd), err)
return
}
- if cparams.TwinSeeds {
- var alpha crypto.Digest
- prevSeed, err := ledger.Seed(seedRound(rnd, cparams))
- if err != nil {
- reterr = fmt.Errorf("failed read seed of round %v: %v", seedRound(rnd, cparams), err)
- return
- }
-
- if period == 0 {
- seedProof, ok = vrf.SK.Prove(prevSeed)
- if !ok {
- reterr = fmt.Errorf("could not make seed proof")
- return
- }
- vrfOut, ok = seedProof.Hash()
- if !ok {
- // If proof2hash fails on a proof we produced with VRF Prove, this indicates our VRF code has a dangerous bug.
- // Panicking is the only safe thing to do.
- logging.Base().Panicf("VrfProof.Hash() failed on a proof we ourselves generated; this indicates a bug in the VRF code: %v", seedProof)
- }
- alpha = crypto.HashObj(proposerSeed{Addr: address, VRF: vrfOut})
- } else {
- alpha = crypto.HashObj(prevSeed)
- }
-
- input := seedInput{Alpha: alpha}
- rerand := rnd % basics.Round(cparams.SeedLookback*cparams.SeedRefreshInterval)
- if rerand < basics.Round(cparams.SeedLookback) {
- digrnd := rnd.SubSaturate(basics.Round(cparams.SeedLookback * cparams.SeedRefreshInterval))
- oldDigest, err := ledger.LookupDigest(digrnd)
- if err != nil {
- reterr = fmt.Errorf("could not lookup old entry digest (for seed) from round %v: %v", digrnd, err)
- return
- }
- input.History = oldDigest
- }
- newSeed = committee.Seed(crypto.HashObj(input))
- return
- }
-
- // Compute the new seed
- prevSeed, err := ledger.Seed(rnd.SubSaturate(1))
+ var alpha crypto.Digest
+ prevSeed, err := ledger.Seed(seedRound(rnd, cparams))
if err != nil {
reterr = fmt.Errorf("failed read seed of round %v: %v", seedRound(rnd, cparams), err)
return
}
- if (rnd % basics.Round(cparams.SeedLookback)) != 0 {
- // In odd rounds, the seed is just the seed from the previous round, unchanged.
- // This simplifies the analysis now that our seed lookback parameter is 2. (In the original paper it was 1.)
- newSeed = prevSeed
- } else {
- // In even rounds, we evolve the seed
- var q1 crypto.Digest
- var ok bool
- var vrfOut crypto.VrfOutput
- if period == 0 {
- // For period 0, the proposer runs the previous seed through their VRF.
- // To an adversary trying to predict (or influence) future seeds, as soon as there's an honest proposer the seed becomes completely rerandomized.
- // This is because a VRF output is pseudorandom to anyone without the secret key or the corresponding proof.
- // The adversary's ability to influence the seed is also limited because of the uniqueness property of the VRF.
- seedProof, ok = vrf.SK.Prove(prevSeed)
- if !ok {
- reterr = fmt.Errorf("Could not make seed proof")
- return
- }
- vrfOut, ok = seedProof.Hash()
- if !ok {
- // If proof2hash fails on a proof we produced with VRF Prove, this indicates our VRF code has a dangerous bug.
- // Panicking is the only safe thing to do.
- logging.Base().Panicf("VrfProof.Hash() failed on a proof we ourselves generated; this indicates a bug in the VRF code: %v", seedProof)
- }
- // Hashing in the proposer's address is not strictly speaking necessary.
- // We do it here to be consistent with Credentials, where hashing the address in with the VRF output is necessary to prevent a specific attack.
- q1 = crypto.Hash(append(vrfOut[:], address[:]...))
- } else {
- // For periods > 0, we don't use the proposer's VRF or address.
- // This limits an adversary's ability to influence the seed.
- // In particular, some of the adversary's accounts may be likely to be selected in period 0, others in period 1, and so on.
- // If the adversary doesn't like any of the seeds from any of their period-0 possible proposers, they might try causing the network to move on to the next period until they reach a period where one of their likely proposers gives them a good seed.
- // By making periods > 0 give only one possible seed, this limits the number of new seeds the adversary can choose between.
- q1 = crypto.Hash(prevSeed[:])
+
+ if period == 0 {
+ seedProof, ok = vrf.SK.Prove(prevSeed)
+ if !ok {
+ reterr = fmt.Errorf("could not make seed proof")
+ return
+ }
+ vrfOut, ok = seedProof.Hash()
+ if !ok {
+ // If proof2hash fails on a proof we produced with VRF Prove, this indicates our VRF code has a dangerous bug.
+ // Panicking is the only safe thing to do.
+ logging.Base().Panicf("VrfProof.Hash() failed on a proof we ourselves generated; this indicates a bug in the VRF code: %v", seedProof)
}
+ alpha = crypto.HashObj(proposerSeed{Addr: address, VRF: vrfOut})
+ } else {
+ alpha = crypto.HashObj(prevSeed)
+ }
- // Periodically mix an older block hash (which either implicitly or explicitly commits to the balances) into the seed.
- // This prevents a specific attack wherein an attacker during a long partition can cause the network to permanently stall even after the partition has healed.
- // In particular, during a partition, the adversary can (by dropping other proposals) propose every block.
- // Thus they can predict (and to some extent influence) seed values for future rounds that are during the partition.
- // Say the partition is going to end just before round R. In round R, proposers are selected using the seed from round (R-SeedLookback)
- // and the balances / VRF keys from round (R-BalLookback), both of which are during the partition.
- // Say we're before (R-BalLookback). Because the adversary knows what the seed will be at round R-(SeedLookback), they can find
- // (by brute force) and register VRF public keys that give extremely good credentials (disproportionate to stake) for being round R proposer in period 0.
- // Likewise they can register VRF public keys that will make them be proposer in round R period 1, and period 2, and so on for all periods.
- // Then even after the partition has healed, the adversary can permanently stall the network because they will be selected in every period of round R and can keep proposing bad blocks.
- // Periodically mixing the block hash into the seed defeats this attack: any change to the balances / VRF keys registered in round (R-BalLookback) will cause the seed in round (R-SeedLookback) to change. So by brute force the adversary may be able to make themselves leader in a few periods of round R but certainly not all of them, and they won't be able to stall the network after the partition has healed.
- if rnd%basics.Round(cparams.SeedRefreshInterval) == 0 {
- oldDigest, err := ledger.LookupDigest(rnd.SubSaturate(basics.Round(cparams.SeedRefreshInterval)))
- if err != nil {
- reterr = fmt.Errorf("Could not lookup old entry digest (for seed): %v", err)
- return
- }
- q1 = crypto.Hash(append(q1[:], oldDigest[:]...))
+ input := seedInput{Alpha: alpha}
+ rerand := rnd % basics.Round(cparams.SeedLookback*cparams.SeedRefreshInterval)
+ if rerand < basics.Round(cparams.SeedLookback) {
+ digrnd := rnd.SubSaturate(basics.Round(cparams.SeedLookback * cparams.SeedRefreshInterval))
+ oldDigest, err := ledger.LookupDigest(digrnd)
+ if err != nil {
+ reterr = fmt.Errorf("could not lookup old entry digest (for seed) from round %v: %v", digrnd, err)
+ return
}
- newSeed = committee.Seed(q1)
+ input.History = oldDigest
}
+ newSeed = committee.Seed(crypto.HashObj(input))
return
}
@@ -253,79 +185,41 @@ func verifyNewSeed(p unauthenticatedProposal, ledger LedgerReader) error {
return fmt.Errorf("failed to obtain balance record for address %v in round %v: %v", value.OriginalProposer, balanceRound, err)
}
- if cparams.TwinSeeds {
- var alpha crypto.Digest
- prevSeed, err := ledger.Seed(seedRound(rnd, cparams))
- if err != nil {
- return fmt.Errorf("failed read seed of round %v: %v", seedRound(rnd, cparams), err)
- }
-
- if value.OriginalPeriod == 0 {
- verifier := proposerRecord.SelectionID
- ok, vrfOut := verifier.Verify(p.SeedProof, prevSeed)
- if !ok {
- return fmt.Errorf("payload seed proof malformed (%v, %v)", prevSeed, p.SeedProof)
- }
- vrfOut, ok = p.SeedProof.Hash()
- if !ok {
- // If proof2hash fails on a proof we produced with VRF Prove, this indicates our VRF code has a dangerous bug.
- // Panicking is the only safe thing to do.
- logging.Base().Panicf("VrfProof.Hash() failed on a proof we ourselves generated; this indicates a bug in the VRF code: %v", p.SeedProof)
- }
- alpha = crypto.HashObj(proposerSeed{Addr: proposerRecord.Addr, VRF: vrfOut})
- } else {
- alpha = crypto.HashObj(prevSeed)
- }
+ var alpha crypto.Digest
+ prevSeed, err := ledger.Seed(seedRound(rnd, cparams))
+ if err != nil {
+ return fmt.Errorf("failed read seed of round %v: %v", seedRound(rnd, cparams), err)
+ }
- input := seedInput{Alpha: alpha}
- rerand := rnd % basics.Round(cparams.SeedLookback*cparams.SeedRefreshInterval)
- if rerand < basics.Round(cparams.SeedLookback) {
- digrnd := rnd.SubSaturate(basics.Round(cparams.SeedLookback * cparams.SeedRefreshInterval))
- oldDigest, err := ledger.LookupDigest(digrnd)
- if err != nil {
- return fmt.Errorf("could not lookup old entry digest (for seed) from round %v: %v", digrnd, err)
- }
- input.History = oldDigest
+ if value.OriginalPeriod == 0 {
+ verifier := proposerRecord.SelectionID
+ ok, vrfOut := verifier.Verify(p.SeedProof, prevSeed)
+ if !ok {
+ return fmt.Errorf("payload seed proof malformed (%v, %v)", prevSeed, p.SeedProof)
}
- if p.Seed() != committee.Seed(crypto.HashObj(input)) {
- return fmt.Errorf("payload seed malformed (%v != %v)", committee.Seed(crypto.HashObj(input)), p.Seed())
+ vrfOut, ok = p.SeedProof.Hash()
+ if !ok {
+ // If proof2hash fails on a proof we produced with VRF Prove, this indicates our VRF code has a dangerous bug.
+ // Panicking is the only safe thing to do.
+ logging.Base().Panicf("VrfProof.Hash() failed on a proof we ourselves generated; this indicates a bug in the VRF code: %v", p.SeedProof)
}
+ alpha = crypto.HashObj(proposerSeed{Addr: proposerRecord.Addr, VRF: vrfOut})
} else {
- prevSeed, err := ledger.Seed(p.Round().SubSaturate(1))
- if err != nil {
- return fmt.Errorf("could not perform ledger read for prevSeed: %v", err)
- }
+ alpha = crypto.HashObj(prevSeed)
+ }
- // Check the seed is computed correctly. See comments in proposalForBlock() for details.
- if p.Round()%basics.Round(cparams.SeedLookback) != 0 {
- if p.Seed() != prevSeed {
- return fmt.Errorf("payload seed malformed")
- }
- } else {
- var q1 crypto.Digest
- if value.OriginalPeriod == 0 {
- verifier := proposerRecord.SelectionID
- ok, vrfOut := verifier.Verify(p.SeedProof, prevSeed)
- if !ok {
- return fmt.Errorf("payload seed proof malformed (%v, %v)", prevSeed, p.SeedProof)
- }
- q1 = crypto.Hash(append(vrfOut[:], proposerRecord.Addr[:]...))
- } else {
- q1 = crypto.Hash(prevSeed[:])
- }
-
- if p.Round()%basics.Round(cparams.SeedRefreshInterval) == 0 {
- oldDigest, err := ledger.LookupDigest(p.Round().SubSaturate(basics.Round(cparams.SeedRefreshInterval)))
- if err != nil {
- return fmt.Errorf("could not perform ledger read for oldDigest: %v", err)
- }
- q1 = crypto.Hash(append(q1[:], oldDigest[:]...))
- }
-
- if p.Seed() != committee.Seed(q1) {
- return fmt.Errorf("payload seed malformed (%v != %v)", committee.Seed(q1), p.Seed())
- }
+ input := seedInput{Alpha: alpha}
+ rerand := rnd % basics.Round(cparams.SeedLookback*cparams.SeedRefreshInterval)
+ if rerand < basics.Round(cparams.SeedLookback) {
+ digrnd := rnd.SubSaturate(basics.Round(cparams.SeedLookback * cparams.SeedRefreshInterval))
+ oldDigest, err := ledger.LookupDigest(digrnd)
+ if err != nil {
+ return fmt.Errorf("could not lookup old entry digest (for seed) from round %v: %v", digrnd, err)
}
+ input.History = oldDigest
+ }
+ if p.Seed() != committee.Seed(crypto.HashObj(input)) {
+ return fmt.Errorf("payload seed malformed (%v != %v)", committee.Seed(crypto.HashObj(input)), p.Seed())
}
return nil
}
diff --git a/agreement/selector.go b/agreement/selector.go
index 80c364d516..94dd299914 100644
--- a/agreement/selector.go
+++ b/agreement/selector.go
@@ -46,15 +46,7 @@ func (sel selector) CommitteeSize(proto config.ConsensusParams) uint64 {
}
func balanceRound(r basics.Round, cparams config.ConsensusParams) basics.Round {
- if cparams.TwinSeeds {
- return r.SubSaturate(basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback))
- }
-
- lookback := basics.Round(2*cparams.SeedRefreshInterval + cparams.SeedLookback + 1)
- if cparams.IncorrectBalLookback {
- return (r + 2).SubSaturate(lookback)
- }
- return r.SubSaturate(lookback)
+ return r.SubSaturate(basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback))
}
func seedRound(r basics.Round, cparams config.ConsensusParams) basics.Round {
diff --git a/agreement/vote.go b/agreement/vote.go
index 8607a4b91e..8136c8cfdc 100644
--- a/agreement/vote.go
+++ b/agreement/vote.go
@@ -126,7 +126,7 @@ func (uv unauthenticatedVote) verify(l LedgerReader) (vote, error) {
ephID := basics.OneTimeIDForRound(rv.Round, m.Record.KeyDilution(proto))
voteID := m.Record.VoteID
- if !voteID.Verify(ephID, proto.FineGrainedEphemeralKeys, rv, uv.Sig) {
+ if !voteID.Verify(ephID, rv, uv.Sig) {
return vote{}, fmt.Errorf("unauthenticatedVote.verify: could not verify FS signature on vote by %v given %v: %+v", rv.Sender, voteID, uv)
}
@@ -173,7 +173,7 @@ func makeVote(rv rawVote, voting crypto.OneTimeSigner, selection *crypto.VRFSecr
}
ephID := basics.OneTimeIDForRound(rv.Round, voting.KeyDilution(proto))
- sig := voting.Sign(ephID, proto.FineGrainedEphemeralKeys, rv)
+ sig := voting.Sign(ephID, rv)
if (sig == crypto.OneTimeSignature{}) {
return unauthenticatedVote{}, fmt.Errorf("makeVote: got back empty signature for vote")
}
diff --git a/cmd/algokey/export.go b/cmd/algokey/export.go
index 66312d56ba..507e5e0b7d 100644
--- a/cmd/algokey/export.go
+++ b/cmd/algokey/export.go
@@ -37,7 +37,8 @@ func init() {
var exportCmd = &cobra.Command{
Use: "export",
Short: "Export key file to mnemonic and public key",
- Run: func(cmd *cobra.Command, args []string) {
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, _ []string) {
seed := loadKeyfile(exportKeyfile)
mnemonic := computeMnemonic(seed)
diff --git a/cmd/algokey/generate.go b/cmd/algokey/generate.go
index 2b993a4b54..3cd353fbc6 100644
--- a/cmd/algokey/generate.go
+++ b/cmd/algokey/generate.go
@@ -36,7 +36,8 @@ func init() {
var generateCmd = &cobra.Command{
Use: "generate",
Short: "Generate key",
- Run: func(cmd *cobra.Command, args []string) {
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, _ []string) {
var seed crypto.Seed
crypto.RandBytes(seed[:])
diff --git a/cmd/algokey/import.go b/cmd/algokey/import.go
index ceb6524eff..ac157dbcc1 100644
--- a/cmd/algokey/import.go
+++ b/cmd/algokey/import.go
@@ -37,7 +37,8 @@ func init() {
var importCmd = &cobra.Command{
Use: "import",
Short: "Import key file from mnemonic",
- Run: func(cmd *cobra.Command, args []string) {
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, _ []string) {
seed := loadMnemonic(mnemonic)
key := crypto.GenerateSignatureSecrets(seed)
diff --git a/cmd/algokey/main.go b/cmd/algokey/main.go
index c83221170f..6f1c40b03d 100644
--- a/cmd/algokey/main.go
+++ b/cmd/algokey/main.go
@@ -26,6 +26,7 @@ import (
var rootCmd = &cobra.Command{
Use: "algokey",
Short: "CLI for managing Algorand keys",
+ Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
// If no arguments passed, we should fallback to help
cmd.HelpFunc()(cmd, args)
@@ -38,6 +39,7 @@ func init() {
rootCmd.AddCommand(exportCmd)
rootCmd.AddCommand(signCmd)
rootCmd.AddCommand(multisigCmd)
+ rootCmd.AddCommand(partCmd)
}
func main() {
diff --git a/cmd/algokey/multisig.go b/cmd/algokey/multisig.go
index e91710fd0d..1075eea537 100644
--- a/cmd/algokey/multisig.go
+++ b/cmd/algokey/multisig.go
@@ -46,7 +46,8 @@ func init() {
var multisigCmd = &cobra.Command{
Use: "multisig",
Short: "Add a multisig signature to transactions from a file using a private key",
- Run: func(cmd *cobra.Command, args []string) {
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, _ []string) {
seed := loadKeyfileOrMnemonic(multisigKeyfile, multisigMnemonic)
key := crypto.GenerateSignatureSecrets(seed)
diff --git a/cmd/algokey/part.go b/cmd/algokey/part.go
new file mode 100644
index 0000000000..46d1f55447
--- /dev/null
+++ b/cmd/algokey/part.go
@@ -0,0 +1,175 @@
+// Copyright (C) 2019 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package main
+
+import (
+ "encoding/base64"
+ "fmt"
+ "math"
+ "os"
+
+ "github.com/spf13/cobra"
+
+ "github.com/algorand/go-algorand/data/account"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/util/db"
+)
+
+var partKeyfile string
+var partFirstRound uint64
+var partLastRound uint64
+var partKeyDilution uint64
+var partParent string
+
+var partCmd = &cobra.Command{
+ Use: "part",
+ Short: "Manage participation keys",
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, args []string) {
+ // If no arguments passed, we should fallback to help
+ cmd.HelpFunc()(cmd, args)
+ },
+}
+
+var partGenerateCmd = &cobra.Command{
+ Use: "generate",
+ Short: "Generate participation key",
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, _ []string) {
+ if partLastRound < partFirstRound {
+ fmt.Fprintf(os.Stderr, "Last round %d < first round %d\n", partLastRound, partFirstRound)
+ os.Exit(1)
+ }
+
+ if partKeyDilution == 0 {
+ partKeyDilution = 1 + uint64(math.Sqrt(float64(partLastRound-partFirstRound)))
+ }
+
+ var err error
+ var parent basics.Address
+ if partParent != "" {
+ parent, err = basics.UnmarshalChecksumAddress(partParent)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot parse parent address %s: %v\n", partParent, err)
+ os.Exit(1)
+ }
+ }
+
+ partdb, err := db.MakeErasableAccessor(partKeyfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot open partkey database %s: %v\n", partKeyfile, err)
+ os.Exit(1)
+ }
+
+ partkey, err := account.FillDBWithParticipationKeys(partdb, parent, basics.Round(partFirstRound), basics.Round(partLastRound), partKeyDilution)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot generate partkey database %s: %v\n", partKeyfile, err)
+ os.Exit(1)
+ }
+
+ printPartkey(partkey)
+ },
+}
+
+var partInfoCmd = &cobra.Command{
+ Use: "info",
+ Short: "Print participation key information",
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, _ []string) {
+ partdb, err := db.MakeErasableAccessor(partKeyfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot open partkey database %s: %v\n", partKeyfile, err)
+ os.Exit(1)
+ }
+
+ partkey, err := account.RestoreParticipation(partdb)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot load partkey database %s: %v\n", partKeyfile, err)
+ os.Exit(1)
+ }
+
+ printPartkey(partkey)
+ },
+}
+
+var partReparentCmd = &cobra.Command{
+ Use: "reparent",
+ Short: "Change parent address of participation key",
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, _ []string) {
+ parent, err := basics.UnmarshalChecksumAddress(partParent)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot parse parent address %s: %v\n", partParent, err)
+ os.Exit(1)
+ }
+
+ partdb, err := db.MakeErasableAccessor(partKeyfile)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot open partkey database %s: %v\n", partKeyfile, err)
+ os.Exit(1)
+ }
+
+ partkey, err := account.RestoreParticipation(partdb)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot load partkey database %s: %v\n", partKeyfile, err)
+ os.Exit(1)
+ }
+
+ partkey.Parent = parent
+ err = partkey.PersistNewParent()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Cannot persist partkey database %s: %v\n", partKeyfile, err)
+ os.Exit(1)
+ }
+
+ printPartkey(partkey)
+ },
+}
+
+func printPartkey(partkey account.Participation) {
+ fmt.Printf("Parent address: %s\n", partkey.Parent.GetChecksumAddress().String())
+ fmt.Printf("VRF public key: %s\n", base64.StdEncoding.EncodeToString(partkey.VRF.PK[:]))
+ fmt.Printf("Voting public key: %s\n", base64.StdEncoding.EncodeToString(partkey.Voting.OneTimeSignatureVerifier[:]))
+ fmt.Printf("First round: %d\n", partkey.FirstValid)
+ fmt.Printf("Last round: %d\n", partkey.LastValid)
+ fmt.Printf("Key dilution: %d\n", partkey.KeyDilution)
+ fmt.Printf("First batch: %d\n", partkey.Voting.FirstBatch)
+ fmt.Printf("First offset: %d\n", partkey.Voting.FirstOffset)
+}
+
+func init() {
+ partCmd.AddCommand(partGenerateCmd)
+ partCmd.AddCommand(partInfoCmd)
+ partCmd.AddCommand(partReparentCmd)
+
+ partGenerateCmd.Flags().StringVar(&partKeyfile, "keyfile", "", "Participation key filename")
+ partGenerateCmd.Flags().Uint64Var(&partFirstRound, "first", 0, "First round for participation key")
+ partGenerateCmd.Flags().Uint64Var(&partLastRound, "last", 0, "Last round for participation key")
+ partGenerateCmd.Flags().Uint64Var(&partKeyDilution, "dilution", 0, "Key dilution (default to sqrt of validity window)")
+ partGenerateCmd.Flags().StringVar(&partParent, "parent", "", "Address of parent account")
+ partGenerateCmd.MarkFlagRequired("first")
+ partGenerateCmd.MarkFlagRequired("last")
+ partGenerateCmd.MarkFlagRequired("keyfile")
+
+ partInfoCmd.Flags().StringVar(&partKeyfile, "keyfile", "", "Participation key filename")
+ partInfoCmd.MarkFlagRequired("keyfile")
+
+ partReparentCmd.Flags().StringVar(&partKeyfile, "keyfile", "", "Participation key filename")
+ partReparentCmd.Flags().StringVar(&partParent, "parent", "", "Address of parent account")
+ partReparentCmd.MarkFlagRequired("keyfile")
+ partReparentCmd.MarkFlagRequired("parent")
+}
diff --git a/cmd/algokey/sign.go b/cmd/algokey/sign.go
index e4981ab4a4..3d8d423302 100644
--- a/cmd/algokey/sign.go
+++ b/cmd/algokey/sign.go
@@ -46,7 +46,8 @@ func init() {
var signCmd = &cobra.Command{
Use: "sign",
Short: "Sign transactions from a file using a private key",
- Run: func(cmd *cobra.Command, args []string) {
+ Args: cobra.NoArgs,
+ Run: func(cmd *cobra.Command, _ []string) {
seed := loadKeyfileOrMnemonic(signKeyfile, signMnemonic)
key := crypto.GenerateSignatureSecrets(seed)
diff --git a/cmd/algorelay/relayCmd.go b/cmd/algorelay/relayCmd.go
index 6e6fa6eaf9..483cc85825 100644
--- a/cmd/algorelay/relayCmd.go
+++ b/cmd/algorelay/relayCmd.go
@@ -34,16 +34,14 @@ import (
var (
inputFileArg string
outputFileArg string
- srvDomainArg string
- nameDomainArg string
+ srvDomainArg string // e.g. algorand.network
+ nameDomainArg string // e.g. algorand-mainnet.network
defaultPortArg uint16
- dnsBootstrapArg string
+ dnsBootstrapArg string // e.g. mainnet or testnet
recordIDArg int64
- cfEmail string
- cfAuthKey string
- cfSrvZoneID string
- cfNameZoneID string
+ cfEmail string
+ cfAuthKey string
)
var nameRecordTypes = []string{"A", "CNAME", "SRV"}
@@ -52,11 +50,9 @@ var srvRecordTypes = []string{"SRV"}
const metricsPort = uint16(9100)
func init() {
- cfSrvZoneID = os.Getenv("CLOUDFLARE_SRV_ZONE_ID")
- cfNameZoneID = os.Getenv("CLOUDFLARE_NAME_ZONE_ID")
cfEmail = os.Getenv("CLOUDFLARE_EMAIL")
cfAuthKey = os.Getenv("CLOUDFLARE_AUTH_KEY")
- if cfSrvZoneID == "" || cfNameZoneID == "" || cfEmail == "" || cfAuthKey == "" {
+ if cfEmail == "" || cfAuthKey == "" {
panic("One or more credentials missing from ENV")
}
@@ -78,7 +74,6 @@ func init() {
checkCmd.Flags().StringVarP(&dnsBootstrapArg, "dnsbootstrap", "b", "", "Bootstrap name for SRV records (eg mainnet)")
checkCmd.MarkFlagRequired("dnsbootstrap")
-
rootCmd.AddCommand(updateCmd)
updateCmd.Flags().StringVarP(&inputFileArg, "inputfile", "i", "", "File containing Relay data")
@@ -108,52 +103,68 @@ func loadRelays(file string) []eb.Relay {
}
type checkResult struct {
- ID int64
- Success bool
- Error string `json:",omitempty"`
+ ID int64
+ Success bool
+ Error string `json:",omitempty"`
}
type dnsContext struct {
- nameEntries map[string]string
- bootstrap srvService
- metrics srvService
+ nameEntries map[string]string
+ bootstrap srvService
+ metrics srvService
+ srvZoneID string
+ nameZoneID string
}
type srvService struct {
- serviceName string
- entries map[string]uint16
- shortName string
- networkName string
+ serviceName string
+ entries map[string]uint16
+ shortName string
+ networkName string
}
func makeDNSContext() *dnsContext {
- nameEntries, err := getReverseMappedEntries(cfNameZoneID, nameRecordTypes)
+ cloudflareCred := cloudflare.NewCred(cfEmail, cfAuthKey)
+
+ nameZoneID, err := cloudflareCred.GetZoneID(context.Background(), nameDomainArg)
+ if err != nil {
+ panic(err)
+ }
+
+ nameEntries, err := getReverseMappedEntries(nameZoneID, nameRecordTypes)
if err != nil {
panic(err)
}
- bootstrap, err := getSrvRecords("_algobootstrap", dnsBootstrapArg + "." + srvDomainArg, cfSrvZoneID)
+ srvZoneID, err := cloudflareCred.GetZoneID(context.Background(), srvDomainArg)
if err != nil {
panic(err)
}
- metrics, err := getSrvRecords("_metrics", srvDomainArg, cfSrvZoneID)
+ bootstrap, err := getSrvRecords("_algobootstrap", dnsBootstrapArg+"."+srvDomainArg, srvZoneID)
+ if err != nil {
+ panic(err)
+ }
+
+ metrics, err := getSrvRecords("_metrics", srvDomainArg, srvZoneID)
if err != nil {
panic(err)
}
return &dnsContext{
nameEntries: nameEntries,
- bootstrap: bootstrap,
- metrics: metrics,
+ bootstrap: bootstrap,
+ metrics: metrics,
+ srvZoneID: srvZoneID,
+ nameZoneID: nameZoneID,
}
}
func makeService(shortName, networkName string) srvService {
return srvService{
serviceName: shortName + "._tcp." + networkName,
- entries: make(map[string]uint16),
- shortName: shortName,
+ entries: make(map[string]uint16),
+ shortName: shortName,
networkName: networkName,
}
}
@@ -167,7 +178,7 @@ var checkCmd = &cobra.Command{
context := makeDNSContext()
checkOne := recordIDArg != 0
- results := make([]checkResult,0)
+ results := make([]checkResult, 0)
anyCheckError := false
for _, relay := range relays {
@@ -184,15 +195,15 @@ var checkCmd = &cobra.Command{
if err != nil {
fmt.Fprintf(os.Stderr, "[%d] ERROR: %s: %s\n", relay.ID, relay.IPOrDNSName, err)
results = append(results, checkResult{
- ID: relay.ID,
+ ID: relay.ID,
Success: false,
- Error: err.Error(),
- })
+ Error: err.Error(),
+ })
anyCheckError = true
} else {
fmt.Printf("[%d] OK: %s -> %s:%d\n", relay.ID, relay.IPOrDNSName, name, port)
results = append(results, checkResult{
- ID: relay.ID,
+ ID: relay.ID,
Success: true,
})
}
@@ -222,7 +233,7 @@ var updateCmd = &cobra.Command{
context := makeDNSContext()
updateOne := recordIDArg != 0
- results := make([]checkResult,0)
+ results := make([]checkResult, 0)
anyUpdateError := false
for _, relay := range relays {
@@ -240,15 +251,15 @@ var updateCmd = &cobra.Command{
if err != nil {
fmt.Fprintf(os.Stderr, "[%d] ERROR: %s: %s\n", relay.ID, relay.IPOrDNSName, err)
results = append(results, checkResult{
- ID: relay.ID,
+ ID: relay.ID,
Success: false,
- Error: err.Error(),
+ Error: err.Error(),
})
anyUpdateError = true
} else {
fmt.Printf("[%d] OK: %s -> %s:%d\n", relay.ID, relay.IPOrDNSName, name, port)
results = append(results, checkResult{
- ID: relay.ID,
+ ID: relay.ID,
Success: true,
})
}
@@ -322,7 +333,7 @@ func ensureRelayStatus(checkOnly bool, relay eb.Relay, nameDomain string, srvDom
}
// Add A/CNAME for the DNSAlias assigned
- err = addDNSRecord(targetDomainAlias, topmost, cfNameZoneID)
+ err = addDNSRecord(targetDomainAlias, topmost, ctx.nameZoneID)
if err != nil {
return
}
@@ -370,7 +381,7 @@ func ensureRelayStatus(checkOnly bool, relay eb.Relay, nameDomain string, srvDom
}
// Add SRV entry to map to our DNSAlias
- err = addSRVRecord(ctx.bootstrap.networkName, topmost, port, ctx.bootstrap.shortName, cfSrvZoneID)
+ err = addSRVRecord(ctx.bootstrap.networkName, topmost, port, ctx.bootstrap.shortName, ctx.srvZoneID)
if err != nil {
return
}
@@ -385,7 +396,7 @@ func ensureRelayStatus(checkOnly bool, relay eb.Relay, nameDomain string, srvDom
}
// Add SRV entry for metrics
- err = addSRVRecord(ctx.metrics.networkName, topmost, metricsPort, ctx.metrics.shortName, cfSrvZoneID)
+ err = addSRVRecord(ctx.metrics.networkName, topmost, metricsPort, ctx.metrics.shortName, ctx.srvZoneID)
if err != nil {
return
}
@@ -422,10 +433,10 @@ func getTargetDNSChain(nameEntries map[string]string, target string) (names []st
}
}
-func getReverseMappedEntries(zoneID string, recordTypes []string) (reverseMap map[string]string, err error) {
+func getReverseMappedEntries(nameZoneID string, recordTypes []string) (reverseMap map[string]string, err error) {
reverseMap = make(map[string]string)
- cloudflareDNS := cloudflare.NewDNS(zoneID, cfEmail, cfAuthKey)
+ cloudflareDNS := cloudflare.NewDNS(nameZoneID, cfEmail, cfAuthKey)
for _, recType := range recordTypes {
var records []cloudflare.DNSRecordResponseEntry
@@ -448,7 +459,7 @@ func getReverseMappedEntries(zoneID string, recordTypes []string) (reverseMap ma
return
}
-func getSrvRecords(serviceName string, networkName, zoneID string) (service srvService, err error){
+func getSrvRecords(serviceName string, networkName, zoneID string) (service srvService, err error) {
service = makeService(serviceName, networkName)
cloudflareDNS := cloudflare.NewDNS(zoneID, cfEmail, cfAuthKey)
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index 23aa655771..a6ff52a780 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -56,6 +56,8 @@ var (
keyDilution uint64
threshold uint8
partKeyOutDir string
+ partKeyFile string
+ partKeyDeleteInput bool
importDefault bool
mnemonic string
)
@@ -69,6 +71,7 @@ func init() {
accountCmd.AddCommand(rewardsCmd)
accountCmd.AddCommand(changeOnlineCmd)
accountCmd.AddCommand(addParticipationKeyCmd)
+ accountCmd.AddCommand(installParticipationKeyCmd)
accountCmd.AddCommand(listParticipationKeysCmd)
accountCmd.AddCommand(importCmd)
accountCmd.AddCommand(exportCmd)
@@ -118,8 +121,8 @@ func init() {
rewardsCmd.MarkFlagRequired("address")
// changeOnlineStatus flags
- changeOnlineCmd.Flags().StringVarP(&accountAddress, "address", "a", "", "Account address to change (required)")
- changeOnlineCmd.MarkFlagRequired("address")
+ changeOnlineCmd.Flags().StringVarP(&accountAddress, "address", "a", "", "Account address to change (required if no -partkeyfile)")
+ changeOnlineCmd.Flags().StringVarP(&partKeyFile, "partkeyfile", "", "", "Participation key file (required if no -account)")
changeOnlineCmd.Flags().BoolVarP(&online, "online", "o", true, "Set this account to online or offline")
changeOnlineCmd.MarkFlagRequired("online")
changeOnlineCmd.Flags().Uint64VarP(&transactionFee, "fee", "f", 0, "The Fee to set on the status change transaction (defaults to suggested fee)")
@@ -138,6 +141,11 @@ func init() {
addParticipationKeyCmd.Flags().StringVarP(&partKeyOutDir, "outdir", "o", "", "Save participation key file to specified output directory to (for offline creation)")
addParticipationKeyCmd.Flags().Uint64VarP(&keyDilution, "keyDilution", "", 0, "Key dilution for two-level participation keys")
+ // installParticipationKey flags
+ installParticipationKeyCmd.Flags().StringVar(&partKeyFile, "partkey", "", "Participation key file to install")
+ installParticipationKeyCmd.MarkFlagRequired("partkey")
+ installParticipationKeyCmd.Flags().BoolVar(&partKeyDeleteInput, "delete-input", false, "Acknowledge that installpartkey will delete the input key file")
+
// import flags
importCmd.Flags().BoolVarP(&importDefault, "default", "f", false, "Set this account as the default one")
importCmd.Flags().StringVarP(&mnemonic, "mnemonic", "m", "", "Mnemonic to import (will prompt otherwise)")
@@ -468,14 +476,44 @@ var rewardsCmd = &cobra.Command{
var changeOnlineCmd = &cobra.Command{
Use: "changeonlinestatus",
Short: "Change online status for the specified account",
- Long: `Change online status for the specified account. Set online should be 1 to set online, 0 to set offline. The broadcast transaction will be valid for a limited number of rounds. goal will provide the TXID of the transaction if successful. Going online requires that the given account have a valid participation key.`,
+ Long: `Change online status for the specified account. Set online should be 1 to set online, 0 to set offline. The broadcast transaction will be valid for a limited number of rounds. goal will provide the TXID of the transaction if successful. Going online requires that the given account has a valid participation key. If the participation key is specified using --partkeyfile, you must separately install the participation key from that file using "goal account installpartkey".`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
+ if accountAddress == "" && partKeyFile == "" {
+ fmt.Printf("Must specify one of --address or --partkeyfile\n")
+ os.Exit(1)
+ }
+
+ if partKeyFile != "" && !online {
+ fmt.Printf("Going offline does not support --partkeyfile\n")
+ os.Exit(1)
+ }
+
// Pull the current round for use in our new transactions
dataDir := ensureSingleDataDir()
client := ensureFullClient(dataDir)
- err := changeAccountOnlineStatus(accountAddress, nil, online, onlineTxFile, walletName, onlineFirstRound, onlineValidRounds, transactionFee, dataDir, client)
+ var part *algodAcct.Participation
+ if partKeyFile != "" {
+ partdb, err := db.MakeErasableAccessor(partKeyFile)
+ if err != nil {
+ fmt.Printf("Cannot open partkey %s: %v\n", partKeyFile, err)
+ os.Exit(1)
+ }
+
+ partkey, err := algodAcct.RestoreParticipation(partdb)
+ if err != nil {
+ fmt.Printf("Cannot load partkey %s: %v\n", partKeyFile, err)
+ os.Exit(1)
+ }
+
+ part = &partkey
+ if accountAddress == "" {
+ accountAddress = part.Parent.GetChecksumAddress().String()
+ }
+ }
+
+ err := changeAccountOnlineStatus(accountAddress, part, online, onlineTxFile, walletName, onlineFirstRound, onlineValidRounds, transactionFee, dataDir, client)
if err != nil {
reportErrorf(err.Error())
}
@@ -579,6 +617,37 @@ var addParticipationKeyCmd = &cobra.Command{
},
}
+var installParticipationKeyCmd = &cobra.Command{
+ Use: "installpartkey",
+ Short: "Install a participation key",
+ Long: `Install a participation key from a partkey file. Intended for use with participation key files generated by "algokey part generate". Does not change the online status of an account or register the participation key; use "goal account changeonlinestatus" for doing so. Deletes input key file on successful install to ensure forward security.`,
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ if !partKeyDeleteInput {
+ fmt.Println(
+`The installpartkey command deletes the input participation file on
+successful installation. Please acknowledge this by passing the
+"--delete-input" flag to the installpartkey command. You can make
+a copy of the input file if needed, but please keep in mind that
+participation keys must be securely deleted for each round, to ensure
+forward security. Storing old participation keys compromises overall
+system security.
+
+No --delete-input flag specified, exiting without installing key.`)
+ os.Exit(1)
+ }
+
+ dataDir := ensureSingleDataDir()
+
+ client := ensureAlgodClient(dataDir)
+ _, _, err := client.InstallParticipationKeys(partKeyFile)
+ if err != nil {
+ reportErrorf(errorRequestFail, err)
+ }
+ fmt.Println("Participation key installed successfully")
+ },
+}
+
var renewParticipationKeyCmd = &cobra.Command{
Use: "renewpartkey",
Short: "Renew an account's participation key",
diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go
index 171ce24d99..3fb2a57d81 100644
--- a/cmd/goal/commands.go
+++ b/cmd/goal/commands.go
@@ -76,6 +76,9 @@ func init() {
// ledger.go
rootCmd.AddCommand(ledgerCmd)
+ // completion.go
+ rootCmd.AddCommand(completionCmd)
+
// Config
defaultDataDirValue := []string{""}
rootCmd.PersistentFlags().StringArrayVarP(&dataDirs, "datadir", "d", defaultDataDirValue, "Data directory for the node")
diff --git a/cmd/goal/completion.go b/cmd/goal/completion.go
new file mode 100644
index 0000000000..eb670062ff
--- /dev/null
+++ b/cmd/goal/completion.go
@@ -0,0 +1,59 @@
+// Copyright (C) 2019 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package main
+
+import (
+ "os"
+
+ "github.com/spf13/cobra"
+)
+
+func init() {
+ completionCmd.AddCommand(bashCompletionCmd)
+ completionCmd.AddCommand(zshCompletionCmd)
+}
+
+var completionCmd = &cobra.Command{
+ Use: "completion",
+ Short: "Shell completion helper",
+ Long: "Shell completion helper",
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, args []string) {
+ // If no arguments passed, we should fallback to help
+ cmd.HelpFunc()(cmd, args)
+ },
+}
+
+var bashCompletionCmd = &cobra.Command{
+ Use: "bash",
+ Short: "Generate bash completion commands",
+ Long: "Generate bash completion commands",
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, _ []string) {
+ rootCmd.GenBashCompletion(os.Stdout)
+ },
+}
+
+var zshCompletionCmd = &cobra.Command{
+ Use: "zsh",
+ Short: "Generate zsh completion commands",
+ Long: "Generate zsh completion commands",
+ Args: validateNoPosArgsFn,
+ Run: func(cmd *cobra.Command, _ []string) {
+ rootCmd.GenZshCompletion(os.Stdout)
+ },
+}
diff --git a/cmd/goal/ledger.go b/cmd/goal/ledger.go
index 2b3e443ac6..e36b6b8490 100644
--- a/cmd/goal/ledger.go
+++ b/cmd/goal/ledger.go
@@ -49,6 +49,6 @@ var supplyCmd = &cobra.Command{
reportErrorf(errorRequestFail, err)
}
- fmt.Printf("Round: %v microAlgos\nTotal Money: %v microAlgos\nOnline Money: %v microAlgos\n", response.Round, response.TotalMoney, response.OnlineMoney)
+ fmt.Printf("Round: %v\nTotal Money: %v microAlgos\nOnline Money: %v microAlgos\n", response.Round, response.TotalMoney, response.OnlineMoney)
},
}
diff --git a/config/config.go b/config/config.go
index 7a69814820..b622101d0b 100644
--- a/config/config.go
+++ b/config/config.go
@@ -107,18 +107,11 @@ type ConsensusParams struct {
RequireGenesisHash bool
// DefaultKeyDilution specifies the granularity of top-level ephemeral
- // keys. If FineGrainedEphemeralKeys is not set, then every ephemeral
- // key is valid for DefaultKeyDilution rounds. If FineGrainedEphemeralKeys
- // is set, then KeyDilution is the number of second-level keys in each
- // batch, signed by a top-level "batch" key. The default value can be
+ // keys. KeyDilution is the number of second-level keys in each batch,
+ // signed by a top-level "batch" key. The default value can be
// overriden in the account state.
DefaultKeyDilution uint64
- // FineGrainedEphemeralKeys indicates support for fine-grained
- // ephemeral keys, implemented as a two-level tree. We will accept
- // (and produce) fine-grained vote signatures only if this flag is true.
- FineGrainedEphemeralKeys bool
-
// MinBalance specifies the minimum balance that can appear in
// an account. To spend money below MinBalance requires issuing
// an account-closing transaction, which transfers all of the
@@ -130,10 +123,6 @@ type ConsensusParams struct {
// a way of making the spender subsidize the cost of storing this transaction.
MinTxnFee uint64
- // SupportTxnClosing indicates if we support transactions that
- // close out an account.
- SupportTransactionClose bool
-
// RewardUnit specifies the number of MicroAlgos corresponding to one reward
// unit.
//
@@ -145,17 +134,6 @@ type ConsensusParams struct {
// rewards level is recomputed for the next RewardsRateRefreshInterval rounds.
RewardsRateRefreshInterval uint64
- // IncorrectBalLookback, if true, causes committee selection to use a balance lookback that disagrees with the spec and the rest of the code.
- // If false, use the correct balance lookback everywhere.
- // TODO: This option exists to allow fixing this bug with an in-band protocol upgrade. It should be removed the next time genesis is bumped.
- IncorrectBalLookback bool
- // TwinSeeds specifies whether we are using multiple seeds in parallel (instead of just one).
- TwinSeeds bool
-
- // ExplicitEphemeralParams indicates support for explicitly specifying
- // VotingFirstValid, VotingLastValid, and VotingKeyDilution.
- ExplicitEphemeralParams bool
-
// seed-related parameters
SeedLookback uint64 // how many blocks back we use seeds from in sortition. delta_s in the spec
SeedRefreshInterval uint64 // how often an old block hash is mixed into the seed. delta_r in the spec
@@ -232,14 +210,15 @@ func initConsensusProtocols() {
// does not copy the ApprovedUpgrades map. Make sure that each new
// ConsensusParams structure gets a fresh ApprovedUpgrades map.
- // Base consensus protocol version, v2.
- v2 := ConsensusParams{
+ // Base consensus protocol version, v7.
+ v7 := ConsensusParams{
UpgradeVoteRounds: 10000,
UpgradeThreshold: 9000,
UpgradeWaitRounds: 10000,
MaxVersionStringLen: 64,
- MinTxnFee: 1,
+ MinBalance: 10000,
+ MinTxnFee: 1000,
MaxTxnLife: 1000,
MaxTxnNoteBytes: 1024,
MaxTxnBytesPerBlock: 1000000,
@@ -251,7 +230,6 @@ func initConsensusProtocols() {
RewardsRateRefreshInterval: 5e5,
ApprovedUpgrades: map[protocol.ConsensusVersion]bool{},
- IncorrectBalLookback: true,
NumProposers: 30,
SoftCommitteeSize: 2500,
@@ -272,63 +250,15 @@ func initConsensusProtocols() {
SeedLookback: 2,
SeedRefreshInterval: 100,
- MaxBalLookback: 203,
+ MaxBalLookback: 320,
}
- Consensus[protocol.ConsensusV2] = v2
-
- // In v3, we add support for fine-grained ephemeral keys.
- v3 := v2
- v3.FineGrainedEphemeralKeys = true
- v3.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
- Consensus[protocol.ConsensusV3] = v3
-
- // v2 can be upgraded to v3.
- v2.ApprovedUpgrades[protocol.ConsensusV3] = true
-
- // In v4, we add a minimum balance, and add support for transactions
- // that close an account.
- v4 := v3
- v4.MinBalance = 1000
- v4.SupportTransactionClose = true
- v4.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
- Consensus[protocol.ConsensusV4] = v4
-
- // v3 can be upgraded to v4.
- v3.ApprovedUpgrades[protocol.ConsensusV4] = true
-
- // v5 sets the min transaction fee to 1000 microAlgos, the min balance to 10000 microAlgos and also fixes a balance lookback bug
- v5 := v4
- v5.MinTxnFee = 1000
- v5.MinBalance = 10000
- v5.IncorrectBalLookback = false
- v5.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
- Consensus[protocol.ConsensusV5] = v5
-
- // v4 can be upgraded to v5.
- v4.ApprovedUpgrades[protocol.ConsensusV5] = true
-
- // v6 adds support for explicit ephemeral-key parameters.
- v6 := v5
- v6.ExplicitEphemeralParams = true
- v6.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
- Consensus[protocol.ConsensusV6] = v6
-
- // v5 can be upgraded to v6.
- v5.ApprovedUpgrades[protocol.ConsensusV6] = true
-
- // v7 increases block retention in the ledger to 320 (= 2 * 2 [seed lookback] * 80 [seed refresh interval])
- v7 := v6
- v7.MaxBalLookback = 320
+
v7.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV7] = v7
- // v6 can be upgraded to v7.
- v6.ApprovedUpgrades[protocol.ConsensusV7] = true
-
// v8 uses parameters and a seed derivation policy (the "twin seeds") from Georgios' new analysis
v8 := v7
- v8.TwinSeeds = true
v8.SeedRefreshInterval = 80
v8.NumProposers = 9
v8.SoftCommitteeSize = 2990
@@ -669,7 +599,7 @@ type Local struct {
// the max size the sync server would return
TxSyncServeResponseSize int
- // IsIndexerActive indicates wheather to activate the indexer for fast retrieval of transactions
+ // IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions
// Note -- Indexer cannot operate on non Archival nodes
IsIndexerActive bool
@@ -678,6 +608,9 @@ type Local struct {
// proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header
// field can be used.
UseXForwardedForAddressField string
+
+ // ForceRelayMessages indicates whether the network library relay messages even in the case that no NetAddress was specified.
+ ForceRelayMessages bool
}
// Filenames of config files within the configdir (e.g. ~/.algorand)
diff --git a/config/config_test.go b/config/config_test.go
index 405f028622..aba65943e9 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -355,8 +355,8 @@ func TestConfigLatestVersion(t *testing.T) {
func TestConsensusUpgrades(t *testing.T) {
a := require.New(t)
- // Starting with v1, ensure we have a path to ConsensusCurrentVersion
- currentVersionName := protocol.ConsensusV2
+ // Starting with v7, ensure we have a path to ConsensusCurrentVersion
+ currentVersionName := protocol.ConsensusV7
latestVersionName := protocol.ConsensusCurrentVersion
leadsTo := consensusUpgradesTo(a, currentVersionName, latestVersionName)
diff --git a/crypto/onetimesig.go b/crypto/onetimesig.go
index 08fc56a859..d0454756b2 100644
--- a/crypto/onetimesig.go
+++ b/crypto/onetimesig.go
@@ -41,7 +41,9 @@ type OneTimeSignature struct {
PK ed25519PublicKey `codec:"p"`
// Old-style signature that does not use proper domain separation.
- // PKSigOld is a signature of (PK || BatchID) under the master key (OneTimeSignatureVerifier).
+ // PKSigOld is unused; however, unfortunately we forgot to mark it
+ // `codec:omitempty` and so it appears (with zero value) in certs.
+ // This means we can't delete the field without breaking catchup.
PKSigOld ed25519Signature `codec:"ps"`
// Used to verify a new-style two-level ephemeral signature.
@@ -186,19 +188,12 @@ func GenerateOneTimeSignatureSecretsRNG(startBatch uint64, numBatches uint64, rn
pk, sk := ed25519GenerateKeyRNG(rng)
batchnum := startBatch + i
- // Generate the old-style signature in case we need to sign a message
- // compatible with the old-style protocol. Can eventually go away,
- // once we never need to sign messages in an old protocol.
- oldid := OneTimeSignatureIdentifier{Batch: batchnum}
- oldsig := ed25519Sign(ephemeralSec, append(pk[:], oldid.BatchBytes()...))
-
newid := OneTimeSignatureSubkeyBatchID{SubKeyPK: pk, Batch: batchnum}
newsig := ed25519Sign(ephemeralSec, hashRep(newid))
subkeys[i] = ephemeralSubkey{
PK: pk,
SK: sk,
- PKSigOld: oldsig,
PKSigNew: newsig,
}
}
@@ -228,169 +223,90 @@ func (s *OneTimeSignatureSecrets) getRNG() RNG {
}
// Sign produces a OneTimeSignature of some Hashable message under some
-// OneTimeSignatureIdentifier. fineGrained specifies whether the signature
-// should use the new plan (two-level ephemeral keys) or the old plan (one
-// ephemeral key for an entire batch).
-func (s *OneTimeSignatureSecrets) Sign(id OneTimeSignatureIdentifier, fineGrained bool, message Hashable) OneTimeSignature {
+// OneTimeSignatureIdentifier.
+func (s *OneTimeSignatureSecrets) Sign(id OneTimeSignatureIdentifier, message Hashable) OneTimeSignature {
s.mu.RLock()
defer s.mu.RUnlock()
- if fineGrained {
- // Check if we already have a partial batch of subkeys.
- if id.Batch+1 == s.FirstBatch && id.Offset >= s.FirstOffset && id.Offset-s.FirstOffset < uint64(len(s.Offsets)) {
- offidx := id.Offset - s.FirstOffset
- sig := ed25519Sign(s.Offsets[offidx].SK, hashRep(message))
- return OneTimeSignature{
- Sig: sig,
- PK: s.Offsets[offidx].PK,
- PK1Sig: s.Offsets[offidx].PKSigNew,
- PK2: s.OffsetsPK2,
- PK2Sig: s.OffsetsPK2Sig,
- }
+ // Check if we already have a partial batch of subkeys.
+ if id.Batch+1 == s.FirstBatch && id.Offset >= s.FirstOffset && id.Offset-s.FirstOffset < uint64(len(s.Offsets)) {
+ offidx := id.Offset - s.FirstOffset
+ sig := ed25519Sign(s.Offsets[offidx].SK, hashRep(message))
+ return OneTimeSignature{
+ Sig: sig,
+ PK: s.Offsets[offidx].PK,
+ PK1Sig: s.Offsets[offidx].PKSigNew,
+ PK2: s.OffsetsPK2,
+ PK2Sig: s.OffsetsPK2Sig,
}
+ }
- // Check if we are asking for an offset from an available batch.
- if id.Batch >= s.FirstBatch && id.Batch-s.FirstBatch < uint64(len(s.Batches)) {
- // Since we have not yet broken out this batch into per-offset keys,
- // generate a fresh subkey right away, sign it, and use it.
- pk, sk := ed25519GenerateKeyRNG(s.getRNG())
- sig := ed25519Sign(sk, hashRep(message))
-
- batchidx := id.Batch - s.FirstBatch
- pksig := s.Batches[batchidx].PKSigNew
+ // Check if we are asking for an offset from an available batch.
+ if id.Batch >= s.FirstBatch && id.Batch-s.FirstBatch < uint64(len(s.Batches)) {
+ // Since we have not yet broken out this batch into per-offset keys,
+ // generate a fresh subkey right away, sign it, and use it.
+ pk, sk := ed25519GenerateKeyRNG(s.getRNG())
+ sig := ed25519Sign(sk, hashRep(message))
- // Backwards compatibility: we might only have a participation
- // key generated with the old signature plan. If so, use it.
- if pksig == (ed25519Signature{}) {
- pksig = s.Batches[batchidx].PKSigOld
- }
+ batchidx := id.Batch - s.FirstBatch
+ pksig := s.Batches[batchidx].PKSigNew
- pk1id := OneTimeSignatureSubkeyOffsetID{
- SubKeyPK: pk,
- Batch: id.Batch,
- Offset: id.Offset,
- }
- return OneTimeSignature{
- Sig: sig,
- PK: pk,
- PK1Sig: ed25519Sign(s.Batches[batchidx].SK, hashRep(pk1id)),
- PK2: s.Batches[batchidx].PK,
- PK2Sig: pksig,
- }
+ pk1id := OneTimeSignatureSubkeyOffsetID{
+ SubKeyPK: pk,
+ Batch: id.Batch,
+ Offset: id.Offset,
}
-
- errmsg := fmt.Sprintf("tried to sign %v with out-of-range one-time identifier %v (firstbatch %d, len(batches) %d, firstoffset %d, len(offsets) %d)",
- message, id, s.FirstBatch, len(s.Batches), s.FirstOffset, len(s.Offsets))
-
- // It's expected that we sometimes hit this error, when trying to sign
- // using an identifier of a block that we just reached agreement on and
- // thus deleted. Don't warn if we're out-of-range by just one. This
- // might still trigger a false warning if we're out-of-range by just one
- // and it happens to be a batch boundary, but we don't have the batch
- // size (key dilution) parameter accessible here easily.
- if s.FirstBatch == id.Batch+1 && s.FirstOffset == id.Offset+1 {
- logging.Base().Info(errmsg)
- } else {
- logging.Base().Warn(errmsg)
+ return OneTimeSignature{
+ Sig: sig,
+ PK: pk,
+ PK1Sig: ed25519Sign(s.Batches[batchidx].SK, hashRep(pk1id)),
+ PK2: s.Batches[batchidx].PK,
+ PK2Sig: pksig,
}
- return OneTimeSignature{}
}
- // Old style signatures: batch subkey signs for all offsets
- // in the batch, and we use the old-style signature that does
- // not do proper domain separation.
- if id.Batch < s.FirstBatch {
- logging.Base().Warnf("tried to sign %v with expired one-time identifier %v", message, id)
- return OneTimeSignature{}
- }
- batch := id.Batch - s.FirstBatch
- if int(batch) >= len(s.Batches) {
- logging.Base().Warnf("tried to sign %v with out-of-range one-time identifier %v", message, id)
- return OneTimeSignature{}
+ errmsg := fmt.Sprintf("tried to sign %v with out-of-range one-time identifier %v (firstbatch %d, len(batches) %d, firstoffset %d, len(offsets) %d)",
+ message, id, s.FirstBatch, len(s.Batches), s.FirstOffset, len(s.Offsets))
+
+ // It's expected that we sometimes hit this error, when trying to sign
+ // using an identifier of a block that we just reached agreement on and
+ // thus deleted. Don't warn if we're out-of-range by just one. This
+ // might still trigger a false warning if we're out-of-range by just one
+ // and it happens to be a batch boundary, but we don't have the batch
+ // size (key dilution) parameter accessible here easily.
+ if s.FirstBatch == id.Batch+1 && s.FirstOffset == id.Offset+1 {
+ logging.Base().Info(errmsg)
+ } else {
+ logging.Base().Warn(errmsg)
}
-
- sig := ed25519Sign(s.Batches[batch].SK, hashRep(message))
- signed := OneTimeSignature{
- Sig: sig,
- PK: s.Batches[batch].PK,
- PKSigOld: s.Batches[batch].PKSigOld,
- }
- return signed
+ return OneTimeSignature{}
}
// Verify verifies that some Hashable signature was signed under some
// OneTimeSignatureVerifier and some OneTimeSignatureIdentifier.
-// fineGrained specifies if the signature should verify under the new
-// two-level scheme (fineGrained) or the old scheme (not fineGrained).
//
// It returns true if this is the case; otherwise, it returns false.
-func (v OneTimeSignatureVerifier) Verify(id OneTimeSignatureIdentifier, fineGrained bool, message Hashable, sig OneTimeSignature) bool {
- if fineGrained {
- offsetID := OneTimeSignatureSubkeyOffsetID{
- SubKeyPK: sig.PK,
- Batch: id.Batch,
- Offset: id.Offset,
- }
- batchID := OneTimeSignatureSubkeyBatchID{
- SubKeyPK: sig.PK2,
- Batch: id.Batch,
- }
-
- if !ed25519Verify(ed25519PublicKey(v), hashRep(batchID), sig.PK2Sig) {
- // Maybe this was signed by a user that generated their participation
- // key a while ago, before they had a PKSigNew. Check against the old
- // encoding. Once we're sure all these keys are gone, this fallback
- // can be removed.
- ccat := append(sig.PK2[:], id.BatchBytes()...)
- if !ed25519Verify(ed25519PublicKey(v), ccat, sig.PK2Sig) {
- return false
- }
- }
- if !ed25519Verify(batchID.SubKeyPK, hashRep(offsetID), sig.PK1Sig) {
- return false
- }
- if !ed25519Verify(offsetID.SubKeyPK, hashRep(message), sig.Sig) {
- return false
- }
- return true
+func (v OneTimeSignatureVerifier) Verify(id OneTimeSignatureIdentifier, message Hashable, sig OneTimeSignature) bool {
+ offsetID := OneTimeSignatureSubkeyOffsetID{
+ SubKeyPK: sig.PK,
+ Batch: id.Batch,
+ Offset: id.Offset,
+ }
+ batchID := OneTimeSignatureSubkeyBatchID{
+ SubKeyPK: sig.PK2,
+ Batch: id.Batch,
}
- // Single-level ephemeral signature, old-style. This should never get confused
- // with the new-style signature because its encoding is of a different length.
- // The old-style signature is len(PK)+8 bytes for the batch number, and the
- // new-style signature is a 2-byte HashID followed by a msgpack encoding of
- // the PK and a 64-bit batch number.
- ccat := append(sig.PK[:], id.BatchBytes()...)
- if !ed25519Verify(ed25519PublicKey(v), ccat, sig.PKSigOld) {
+ if !ed25519Verify(ed25519PublicKey(v), hashRep(batchID), sig.PK2Sig) {
return false
}
- if !ed25519Verify(sig.PK, hashRep(message), sig.Sig) {
+ if !ed25519Verify(batchID.SubKeyPK, hashRep(offsetID), sig.PK1Sig) {
return false
}
- return true
-}
-
-// DeleteBeforeCoarseGrained deletes ephemeral keys before (but not including) the given id.
-func (s *OneTimeSignatureSecrets) DeleteBeforeCoarseGrained(current OneTimeSignatureIdentifier) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- // TODO: Securely wipe the keys from memory.
-
- // Since we are in coarse-grained mode, wipe any fine-grained offsets
- // just in case.
- s.FirstOffset = 0
- s.Offsets = nil
-
- if current.Batch > s.FirstBatch {
- jump := current.Batch - s.FirstBatch
- if jump > uint64(len(s.Batches)) {
- jump = uint64(len(s.Batches))
- }
-
- s.FirstBatch += jump
- s.Batches = s.Batches[jump:]
+ if !ed25519Verify(offsetID.SubKeyPK, hashRep(message), sig.Sig) {
+ return false
}
+ return true
}
// DeleteBeforeFineGrained deletes ephemeral keys before (but not including) the given id.
@@ -449,11 +365,6 @@ func (s *OneTimeSignatureSecrets) DeleteBeforeFineGrained(current OneTimeSignatu
s.OffsetsPK2 = s.Batches[0].PK
s.OffsetsPK2Sig = s.Batches[0].PKSigNew
- // Backwards compatibility: we might only have a participation
- // key generated with the old signature plan. If so, use it.
- if s.OffsetsPK2Sig == (ed25519Signature{}) {
- s.OffsetsPK2Sig = s.Batches[0].PKSigOld
- }
s.FirstOffset = current.Offset
for off := current.Offset; off < numKeysPerBatch; off++ {
diff --git a/crypto/onetimesig_test.go b/crypto/onetimesig_test.go
index 4c07b7c17f..9de3b2b44e 100644
--- a/crypto/onetimesig_test.go
+++ b/crypto/onetimesig_test.go
@@ -29,123 +29,56 @@ func randID() OneTimeSignatureIdentifier {
}
}
-func TestOneTimeSignVerifyOldStyle(t *testing.T) {
- c := GenerateOneTimeSignatureSecrets(0, 1000)
- c2 := GenerateOneTimeSignatureSecrets(0, 1000)
-
- id := randID()
- s := randString()
- s2 := randString()
-
- sig := c.Sign(id, false, s)
- if !c.Verify(id, false, s, sig) {
- t.Errorf("correct signature failed to verify (ephemeral)")
- }
-
- if c.Verify(id, false, s2, sig) {
- t.Errorf("signature verifies on wrong message")
- }
-
- sig2 := c2.Sign(id, false, s)
- if c.Verify(id, false, s, sig2) {
- t.Errorf("wrong master key incorrectly verified (ephemeral)")
- }
-
- otherID := randID()
- for otherID.Batch == id.Batch {
- // Enforce that the Batch ID be different;
- // otherwise this test may fail spuriously
- otherID = randID()
- }
- if c.Verify(otherID, false, s, sig) {
- t.Errorf("signature verifies for wrong ID")
- }
-
- nextOffsetID := id
- nextOffsetID.Offset++
- if !c.Verify(nextOffsetID, false, s, sig) {
- t.Errorf("correct signature failed to verify after bumping batch (should be irrelevant when coarse-grained)")
- }
-
- nextID := id
- nextID.Batch++
- c.DeleteBeforeCoarseGrained(nextID)
- sigAfterDelete := c.Sign(id, false, s)
- if c.Verify(id, false, s, sigAfterDelete) {
- t.Errorf("signature verifies after delete")
- }
-
- sigNextAfterDelete := c.Sign(nextID, false, s)
- if !c.Verify(nextID, false, s, sigNextAfterDelete) {
- t.Errorf("correct signature for nextID failed to verify")
- }
-}
-
func TestOneTimeSignVerifyNewStyle(t *testing.T) {
c := GenerateOneTimeSignatureSecrets(0, 1000)
c2 := GenerateOneTimeSignatureSecrets(0, 1000)
testOneTimeSignVerifyNewStyle(t, c, c2)
}
-func TestOneTimeSignVerifyMixedStyle(t *testing.T) {
- c := GenerateOneTimeSignatureSecrets(0, 1000)
- c2 := GenerateOneTimeSignatureSecrets(0, 1000)
-
- // Wipe out PKSigNew from subkeys
- for i := range c.Batches {
- c.Batches[i].PKSigNew = ed25519Signature{}
- }
- for i := range c2.Batches {
- c2.Batches[i].PKSigNew = ed25519Signature{}
- }
-
- testOneTimeSignVerifyNewStyle(t, c, c2)
-}
-
func testOneTimeSignVerifyNewStyle(t *testing.T, c *OneTimeSignatureSecrets, c2 *OneTimeSignatureSecrets) {
id := randID()
s := randString()
s2 := randString()
- sig := c.Sign(id, true, s)
- if !c.Verify(id, true, s, sig) {
+ sig := c.Sign(id, s)
+ if !c.Verify(id, s, sig) {
t.Errorf("correct signature failed to verify (ephemeral)")
}
- if c.Verify(id, true, s2, sig) {
+ if c.Verify(id, s2, sig) {
t.Errorf("signature verifies on wrong message")
}
- sig2 := c2.Sign(id, true, s)
- if c.Verify(id, true, s, sig2) {
+ sig2 := c2.Sign(id, s)
+ if c.Verify(id, s, sig2) {
t.Errorf("wrong master key incorrectly verified (ephemeral)")
}
otherID := randID()
- if c.Verify(otherID, true, s, sig) {
+ if c.Verify(otherID, s, sig) {
t.Errorf("signature verifies for wrong ID")
}
nextOffsetID := id
nextOffsetID.Offset++
- if c.Verify(nextOffsetID, true, s, sig) {
+ if c.Verify(nextOffsetID, s, sig) {
t.Errorf("signature verifies after changing offset")
}
c.DeleteBeforeFineGrained(nextOffsetID, 256)
- sigAfterDelete := c.Sign(id, true, s)
- if c.Verify(id, false, s, sigAfterDelete) {
+ sigAfterDelete := c.Sign(id, s)
+ if c.Verify(id, s, sigAfterDelete) { // TODO(adam): Previously, this call to Verify was verifying old-style coarse-grained one-time signatures. Now it's verifying new-style fine-grained one-time signatures. Is this correct?
t.Errorf("signature verifies after delete offset")
}
- sigNextAfterDelete := c.Sign(nextOffsetID, true, s)
- if !c.Verify(nextOffsetID, true, s, sigNextAfterDelete) {
+ sigNextAfterDelete := c.Sign(nextOffsetID, s)
+ if !c.Verify(nextOffsetID, s, sigNextAfterDelete) {
t.Errorf("signature fails to verify after deleting up to this offset")
}
nextOffsetID.Offset++
- sigNext2AfterDelete := c.Sign(nextOffsetID, true, s)
- if !c.Verify(nextOffsetID, true, s, sigNext2AfterDelete) {
+ sigNext2AfterDelete := c.Sign(nextOffsetID, s)
+ if !c.Verify(nextOffsetID, s, sigNext2AfterDelete) {
t.Errorf("signature fails to verify after deleting up to previous offset")
}
@@ -155,19 +88,19 @@ func testOneTimeSignVerifyNewStyle(t *testing.T, c *OneTimeSignatureSecrets, c2
nextBatchOffsetID := nextBatchID
nextBatchOffsetID.Offset++
c.DeleteBeforeFineGrained(nextBatchOffsetID, 256)
- sigAfterDelete = c.Sign(nextBatchID, true, s)
- if c.Verify(nextBatchID, true, s, sigAfterDelete) {
+ sigAfterDelete = c.Sign(nextBatchID, s)
+ if c.Verify(nextBatchID, s, sigAfterDelete) {
t.Errorf("signature verifies after delete")
}
- sigNextAfterDelete = c.Sign(nextBatchOffsetID, true, s)
- if !c.Verify(nextBatchOffsetID, true, s, sigNextAfterDelete) {
+ sigNextAfterDelete = c.Sign(nextBatchOffsetID, s)
+ if !c.Verify(nextBatchOffsetID, s, sigNextAfterDelete) {
t.Errorf("signature fails to verify after delete up to this offset")
}
nextBatchOffsetID.Offset++
- sigNext2AfterDelete = c.Sign(nextBatchOffsetID, true, s)
- if !c.Verify(nextBatchOffsetID, true, s, sigNext2AfterDelete) {
+ sigNext2AfterDelete = c.Sign(nextBatchOffsetID, s)
+ if !c.Verify(nextBatchOffsetID, s, sigNext2AfterDelete) {
t.Errorf("signature fails to verify after delete up to previous offset")
}
@@ -178,27 +111,27 @@ func testOneTimeSignVerifyNewStyle(t *testing.T, c *OneTimeSignatureSecrets, c2
preBigJumpID := bigJumpID
preBigJumpID.Batch--
- if c.Verify(preBigJumpID, true, s, c.Sign(preBigJumpID, true, s)) {
+ if c.Verify(preBigJumpID, s, c.Sign(preBigJumpID, s)) {
t.Errorf("preBigJumpID verifies")
}
preBigJumpID.Batch++
preBigJumpID.Offset--
- if c.Verify(preBigJumpID, true, s, c.Sign(preBigJumpID, true, s)) {
+ if c.Verify(preBigJumpID, s, c.Sign(preBigJumpID, s)) {
t.Errorf("preBigJumpID verifies")
}
- if !c.Verify(bigJumpID, true, s, c.Sign(bigJumpID, true, s)) {
+ if !c.Verify(bigJumpID, s, c.Sign(bigJumpID, s)) {
t.Errorf("bigJumpID does not verify")
}
bigJumpID.Offset++
- if !c.Verify(bigJumpID, true, s, c.Sign(bigJumpID, true, s)) {
+ if !c.Verify(bigJumpID, s, c.Sign(bigJumpID, s)) {
t.Errorf("bigJumpID.Offset++ does not verify")
}
bigJumpID.Batch++
- if !c.Verify(bigJumpID, true, s, c.Sign(bigJumpID, true, s)) {
+ if !c.Verify(bigJumpID, s, c.Sign(bigJumpID, s)) {
t.Errorf("bigJumpID.Batch++ does not verify")
}
}
diff --git a/daemon/algod/api/server/lib/middlewares/auth.go b/daemon/algod/api/server/lib/middlewares/auth.go
index 0ba7e20eab..28b1770838 100644
--- a/daemon/algod/api/server/lib/middlewares/auth.go
+++ b/daemon/algod/api/server/lib/middlewares/auth.go
@@ -74,6 +74,13 @@ func Auth(log logging.Logger, apiToken string) func(http.Handler) http.Handler {
// Handle debug route
// Grab the apiToken from the HTTP header
providedToken := []byte(r.Header.Get(TokenHeader))
+ if len(providedToken) == 0 {
+ // Accept tokens provided in a bearer token format.
+ authentication := strings.SplitN(r.Header.Get("Authorization"), " ", 2)
+ if len(authentication) == 2 && strings.EqualFold("Bearer", authentication[0]) {
+ providedToken = []byte(authentication[1])
+ }
+ }
if route.GetName() == debugRouteName {
// For debug routes, we place the apiToken in the path itself
providedToken = []byte(mux.Vars(r)["apiToken"])
diff --git a/daemon/kmd/server/server.go b/daemon/kmd/server/server.go
index cd4d9b9662..e3298fea1e 100644
--- a/daemon/kmd/server/server.go
+++ b/daemon/kmd/server/server.go
@@ -101,15 +101,13 @@ func ValidateConfig(cfg WalletServerConfig) error {
// MakeWalletServer takes a WalletServerConfig, and returns a validated,
// configured WalletServer.
-func MakeWalletServer(config WalletServerConfig) (WalletServer, error) {
- var ws WalletServer
-
+func MakeWalletServer(config WalletServerConfig) (*WalletServer, error) {
err := ValidateConfig(config)
if err != nil {
- return ws, err
+ return nil, err
}
- ws = WalletServer{
+ ws := &WalletServer{
WalletServerConfig: config,
netPath: filepath.Join(config.DataDir, NetFilename),
pidPath: filepath.Join(config.DataDir, PIDFilename),
@@ -144,7 +142,7 @@ func (ws *WalletServer) releaseFileLock() error {
}
// Write out a file containing the address kmd is listening on
-func (ws WalletServer) writeStateFiles(netAddr string) (err error) {
+func (ws *WalletServer) writeStateFiles(netAddr string) (err error) {
// netPath file contains path to sock file
err = ioutil.WriteFile(ws.netPath, []byte(netAddr), 0640)
if err != nil {
@@ -156,7 +154,7 @@ func (ws WalletServer) writeStateFiles(netAddr string) (err error) {
}
// Delete the state files generated by writeStateFiles
-func (ws WalletServer) deleteStateFiles() {
+func (ws *WalletServer) deleteStateFiles() {
os.Remove(ws.pidPath)
os.Remove(ws.netPath)
}
@@ -164,7 +162,7 @@ func (ws WalletServer) deleteStateFiles() {
// makeWatchdogCallback generates a callback function that either 1. does
// nothing if ws.Timeout is nil, or 2. kicks a watchdog timer that will kill
// kmd when it expires.
-func (ws WalletServer) makeWatchdogCallback(kill chan os.Signal) func() {
+func (ws *WalletServer) makeWatchdogCallback(kill chan os.Signal) func() {
// If Timeout is nil, then we will not kill kmd after a timeout
if ws.Timeout == nil {
return func() {}
@@ -193,7 +191,7 @@ func (ws WalletServer) makeWatchdogCallback(kill chan os.Signal) func() {
// returns an error if it was unable to start the server. It reads from the
// `kill` channel in order to shut down the server gracefully, and returns a
// `died` channel that will be written after the server exits.
-func (ws WalletServer) Start(kill chan os.Signal) (died chan error, sock string, err error) {
+func (ws *WalletServer) Start(kill chan os.Signal) (died chan error, sock string, err error) {
// Ensure we're the only instance of kmd running in this data directory
err = ws.acquireFileLock()
if err != nil {
diff --git a/data/account/participation.go b/data/account/participation.go
index 5796ea2960..034ae85cb7 100644
--- a/data/account/participation.go
+++ b/data/account/participation.go
@@ -87,11 +87,7 @@ func (part Participation) DeleteOldKeys(current basics.Round, proto config.Conse
keyDilution = proto.DefaultKeyDilution
}
- if proto.FineGrainedEphemeralKeys {
- part.Voting.DeleteBeforeFineGrained(basics.OneTimeIDForRound(current, keyDilution), keyDilution)
- } else {
- part.Voting.DeleteBeforeCoarseGrained(basics.OneTimeIDForRound(current, keyDilution))
- }
+ part.Voting.DeleteBeforeFineGrained(basics.OneTimeIDForRound(current, keyDilution), keyDilution)
raw := protocol.Encode(part.Voting.Snapshot())
return part.Store.Atomic(func(tx *sql.Tx) error {
@@ -103,6 +99,14 @@ func (part Participation) DeleteOldKeys(current basics.Round, proto config.Conse
})
}
+// PersistNewParent writes a new parent address to the partkey database.
+func (part Participation) PersistNewParent() error {
+ return part.Store.Atomic(func(tx *sql.Tx) error {
+ _, err := tx.Exec("UPDATE ParticipationAccount SET parent=?", part.Parent[:])
+ return err
+ })
+}
+
// VRFSecrets returns the VRF secrets associated with this Participation account.
func (part Participation) VRFSecrets() *crypto.VRFSecrets {
return part.VRF
@@ -137,11 +141,9 @@ func (part Participation) GenerateRegistrationTransaction(fee basics.MicroAlgos,
SelectionPK: part.VRF.PK,
},
}
- if params.ExplicitEphemeralParams {
- t.KeyregTxnFields.VoteFirst = part.FirstValid
- t.KeyregTxnFields.VoteLast = part.LastValid
- t.KeyregTxnFields.VoteKeyDilution = part.KeyDilution
- }
+ t.KeyregTxnFields.VoteFirst = part.FirstValid
+ t.KeyregTxnFields.VoteLast = part.LastValid
+ t.KeyregTxnFields.VoteKeyDilution = part.KeyDilution
return t
}
diff --git a/data/pools/feeTracker_test.go b/data/pools/feeTracker_test.go
index 0df132aa99..d06f6647ac 100644
--- a/data/pools/feeTracker_test.go
+++ b/data/pools/feeTracker_test.go
@@ -50,7 +50,7 @@ func TestFeeTracker_ProcessBlock(t *testing.T) {
var block bookkeeping.Block
block.Payset = make(transactions.Payset, 0)
- proto := config.Consensus[protocol.ConsensusV2]
+ proto := config.Consensus[protocol.ConsensusV7]
for i, sender := range addresses {
for j, receiver := range addresses {
if sender != receiver {
@@ -80,5 +80,5 @@ func TestFeeTracker_ProcessBlock(t *testing.T) {
}
}
ft.ProcessBlock(block)
- require.Equal(t, uint64(0x1a), ft.EstimateFee().Raw)
+ require.Equal(t, uint64(0x1f), ft.EstimateFee().Raw)
}
diff --git a/data/transactions/keyreg.go b/data/transactions/keyreg.go
index dbdcf8d2ef..6600f55ba7 100644
--- a/data/transactions/keyreg.go
+++ b/data/transactions/keyreg.go
@@ -46,18 +46,6 @@ func (keyreg KeyregTxnFields) apply(header Header, balances Balances, spec Speci
return err
}
- if !balances.ConsensusParams().ExplicitEphemeralParams {
- if keyreg.VoteFirst != 0 {
- return fmt.Errorf("keyreg VoteFirst=%d not allowed", keyreg.VoteFirst)
- }
- if keyreg.VoteLast != 0 {
- return fmt.Errorf("keyreg VoteLast=%d not allowed", keyreg.VoteLast)
- }
- if keyreg.VoteKeyDilution != 0 {
- return fmt.Errorf("keyreg VoteKeyDilution=%d not allowed", keyreg.VoteKeyDilution)
- }
- }
-
// Update the registered keys and mark account as online (or, if the voting or selection keys are zero, offline)
record.VoteID = keyreg.VotePK
record.SelectionID = keyreg.SelectionPK
diff --git a/data/transactions/payment.go b/data/transactions/payment.go
index 23ee8efbe7..82e7ece63b 100644
--- a/data/transactions/payment.go
+++ b/data/transactions/payment.go
@@ -79,33 +79,29 @@ func (payment PaymentTxnFields) apply(header Header, balances Balances, spec Spe
}
if payment.CloseRemainderTo != (basics.Address{}) {
- if balances.ConsensusParams().SupportTransactionClose {
- rec, err := balances.Get(header.Sender)
- if err != nil {
- return err
- }
+ rec, err := balances.Get(header.Sender)
+ if err != nil {
+ return err
+ }
- closeAmount := rec.AccountData.MicroAlgos
- ad.ClosingAmount = closeAmount
- err = balances.Move(header.Sender, payment.CloseRemainderTo, closeAmount, &ad.SenderRewards, &ad.CloseRewards)
- if err != nil {
- return err
- }
+ closeAmount := rec.AccountData.MicroAlgos
+ ad.ClosingAmount = closeAmount
+ err = balances.Move(header.Sender, payment.CloseRemainderTo, closeAmount, &ad.SenderRewards, &ad.CloseRewards)
+ if err != nil {
+ return err
+ }
- // Confirm that we have no balance left
- rec, err = balances.Get(header.Sender)
- if !rec.AccountData.MicroAlgos.IsZero() {
- return fmt.Errorf("balance %d still not zero after CloseRemainderTo", rec.AccountData.MicroAlgos.Raw)
- }
+ // Confirm that we have no balance left
+ rec, err = balances.Get(header.Sender)
+ if !rec.AccountData.MicroAlgos.IsZero() {
+ return fmt.Errorf("balance %d still not zero after CloseRemainderTo", rec.AccountData.MicroAlgos.Raw)
+ }
- // Clear out entire account record, to allow the DB to GC it
- rec.AccountData = basics.AccountData{}
- err = balances.Put(rec)
- if err != nil {
- return err
- }
- } else {
- return fmt.Errorf("CloseRemainderTo not supported")
+ // Clear out entire account record, to allow the DB to GC it
+ rec.AccountData = basics.AccountData{}
+ err = balances.Put(rec)
+ if err != nil {
+ return err
}
}
diff --git a/data/transactions/payment_test.go b/data/transactions/payment_test.go
index 731c002634..b53d9e2997 100644
--- a/data/transactions/payment_test.go
+++ b/data/transactions/payment_test.go
@@ -120,7 +120,7 @@ func TestPaymentApply(t *testing.T) {
func TestCheckSpender(t *testing.T) {
mockBalV0 := mockBalances{protocol.ConsensusCurrentVersion}
- mockBalV4 := mockBalances{protocol.ConsensusV4}
+ mockBalV7 := mockBalances{protocol.ConsensusV7}
secretSrc := keypair()
src := basics.Address(secretSrc.SignatureVerifier)
@@ -151,10 +151,10 @@ func TestCheckSpender(t *testing.T) {
tx.CloseRemainderTo = poolAddr
require.Error(t, tx.checkSpender(tx.Header, spec, mockBalV0.ConsensusParams()))
- require.Error(t, tx.checkSpender(tx.Header, spec, mockBalV4.ConsensusParams()))
+ require.Error(t, tx.checkSpender(tx.Header, spec, mockBalV7.ConsensusParams()))
tx.Sender = src
- require.NoError(t, tx.checkSpender(tx.Header, spec, mockBalV4.ConsensusParams()))
+ require.NoError(t, tx.checkSpender(tx.Header, spec, mockBalV7.ConsensusParams()))
}
func TestPaymentValidation(t *testing.T) {
diff --git a/gen/generate.go b/gen/generate.go
index 16e1193a8b..8a92fdf355 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -176,11 +176,9 @@ func generateGenesisFiles(outDir string, proto protocol.ConsensusVersion, netNam
if wallet.Online == basics.Online {
data.VoteID = part.VotingSecrets().OneTimeSignatureVerifier
data.SelectionID = part.VRFSecrets().PK
- if params.ExplicitEphemeralParams {
- data.VoteFirstValid = part.FirstValid
- data.VoteLastValid = part.LastValid
- data.VoteKeyDilution = part.KeyDilution
- }
+ data.VoteFirstValid = part.FirstValid
+ data.VoteLastValid = part.LastValid
+ data.VoteKeyDilution = part.KeyDilution
}
records[wallet.Name] = data
diff --git a/installer/config.json.example b/installer/config.json.example
index c1124aa248..c32703f3f9 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -36,5 +36,6 @@
"TxPoolSize": 50000,
"TxSyncIntervalSeconds": 60,
"TxSyncServeResponseSize": 1000000,
- "TxSyncTimeoutSeconds": 30
+ "TxSyncTimeoutSeconds": 30,
+ "ForceRelayMessages": false
}
diff --git a/ledger/blockdb.go b/ledger/blockdb.go
index ad86a439b9..5ab2fab1e9 100644
--- a/ledger/blockdb.go
+++ b/ledger/blockdb.go
@@ -97,10 +97,8 @@ func blockGetHdr(tx *sql.Tx, rnd basics.Round) (hdr bookkeeping.BlockHeader, err
return
}
-func blockGetCert(tx *sql.Tx, rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) {
- var blkbuf []byte
- var certbuf []byte
- err = tx.QueryRow("SELECT blkdata, certdata FROM blocks WHERE rnd=?", rnd).Scan(&blkbuf, &certbuf)
+func blockGetEncodedCert(tx *sql.Tx, rnd basics.Round) (blk []byte, cert []byte, err error) {
+ err = tx.QueryRow("SELECT blkdata, certdata FROM blocks WHERE rnd=?", rnd).Scan(&blk, &cert)
if err != nil {
if err == sql.ErrNoRows {
err = ErrNoEntry{Round: rnd}
@@ -108,7 +106,14 @@ func blockGetCert(tx *sql.Tx, rnd basics.Round) (blk bookkeeping.Block, cert agr
return
}
+ return
+}
+func blockGetCert(tx *sql.Tx, rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) {
+ blkbuf, certbuf, err := blockGetEncodedCert(tx, rnd)
+ if err != nil {
+ return
+ }
err = protocol.Decode(blkbuf, &blk)
if err != nil {
return
diff --git a/ledger/blockqueue.go b/ledger/blockqueue.go
index fe23901661..0f3b74602f 100644
--- a/ledger/blockqueue.go
+++ b/ledger/blockqueue.go
@@ -27,6 +27,7 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
)
type blockEntry struct {
@@ -262,6 +263,29 @@ func (bq *blockQueue) getBlockHdr(r basics.Round) (hdr bookkeeping.BlockHeader,
return
}
+func (bq *blockQueue) getEncodedBlockCert(r basics.Round) (blk []byte, cert []byte, err error) {
+ e, lastCommitted, latest, err := bq.checkEntry(r)
+ if e != nil {
+ // block has yet to be committed. we'll need to encode it.
+ blk = protocol.Encode(e.block)
+ cert = protocol.Encode(e.cert)
+ err = nil
+ return
+ }
+
+ if err != nil {
+ return
+ }
+
+ err = bq.l.blockDBs.rdb.Atomic(func(tx *sql.Tx) error {
+ var err0 error
+ blk, cert, err0 = blockGetEncodedCert(tx, r)
+ return err0
+ })
+ err = updateErrNoEntry(err, lastCommitted, latest)
+ return
+}
+
func (bq *blockQueue) getBlockCert(r basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) {
e, lastCommitted, latest, err := bq.checkEntry(r)
if e != nil {
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 0f83d7e9a8..90d77137f1 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -318,6 +318,11 @@ func (l *Ledger) BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err er
return
}
+// EncodedBlockCert returns the encoded block and the corresponding encodded certificate of the block for round rnd.
+func (l *Ledger) EncodedBlockCert(rnd basics.Round) (blk []byte, cert []byte, err error) {
+ return l.blockQ.getEncodedBlockCert(rnd)
+}
+
// BlockCert returns the block and the certificate of the block for round rnd.
func (l *Ledger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) {
return l.blockQ.getBlockCert(rnd)
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index fa83fbf80f..21603b84ed 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -330,11 +330,11 @@ func TestLedgerSingleTx(t *testing.T) {
backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
defer backlogPool.Shutdown()
- initBlocks, initAccounts, initSecrets := testGenerateInitState(t, protocol.ConsensusV4)
+ initBlocks, initAccounts, initSecrets := testGenerateInitState(t, protocol.ConsensusV7)
l, err := OpenLedger(logging.Base(), t.Name(), true, initBlocks, initAccounts, crypto.Hash([]byte(t.Name())))
a.NoError(err, "could not open ledger")
- proto := config.Consensus[protocol.ConsensusV4]
+ proto := config.Consensus[protocol.ConsensusV7]
poolAddr := testPoolAddr
sinkAddr := testSinkAddr
diff --git a/libgoal/participation.go b/libgoal/participation.go
index 993a173be5..fb03e82c28 100644
--- a/libgoal/participation.go
+++ b/libgoal/participation.go
@@ -19,6 +19,7 @@ package libgoal
import (
"fmt"
"io/ioutil"
+ "math"
"os"
"path/filepath"
@@ -167,6 +168,65 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k
return newPart, partKeyPath, err
}
+// InstallParticipationKeys creates a .partkey database for a given address,
+// based on an existing database from inputfile. On successful install, it
+// deletes the input file.
+func (c *Client) InstallParticipationKeys(inputfile string) (part account.Participation, filePath string, err error) {
+ // Get the GenesisID for use in the participation key path
+ var genID string
+ genID, err = c.GenesisID()
+ if err != nil {
+ return
+ }
+
+ outDir := filepath.Join(c.DataDir(), genID)
+
+ inputdb, err := db.MakeErasableAccessor(inputfile)
+ if err != nil {
+ return
+ }
+ defer inputdb.Close()
+
+ partkey, err := account.RestoreParticipation(inputdb)
+ if err != nil {
+ return
+ }
+
+ if partkey.Parent == (basics.Address{}) {
+ err = fmt.Errorf("Cannot install partkey with missing (zero) parent address")
+ return
+ }
+
+ newdbpath, err := participationKeysPath(outDir, partkey.Parent, partkey.FirstValid, partkey.LastValid)
+ if err != nil {
+ return
+ }
+
+ newdb, err := db.MakeErasableAccessor(newdbpath)
+ if err != nil {
+ return
+ }
+
+ newpartkey := partkey
+ newpartkey.Store = newdb
+ err = newpartkey.Persist()
+ if err != nil {
+ return
+ }
+
+ // After successful install, remove the input copy of the
+ // partkey so that old keys cannot be recovered after they
+ // are used by algod. We try to delete the data inside
+ // sqlite first, so the key material is zeroed out from
+ // disk blocks, but regardless of whether that works, we
+ // delete the input file. The consensus protocol version
+ // is irrelevant for the maxuint64 round number we pass in.
+ partkey.DeleteOldKeys(basics.Round(math.MaxUint64), config.Consensus[protocol.ConsensusCurrentVersion])
+ os.Remove(inputfile)
+
+ return newpartkey, newdbpath, nil
+}
+
// ListParticipationKeys returns the available participation keys,
// as a map from database filename to Participation key object.
func (c *Client) ListParticipationKeys() (partKeyFiles map[string]account.Participation, err error) {
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 05c9ee159a..e2ad64e288 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -521,6 +521,7 @@ func (wn *WebsocketNetwork) setup() {
wn.server.IdleTimeout = httpServerIdleTimeout
wn.server.MaxHeaderBytes = httpServerMaxHeaderBytes
wn.ctx, wn.ctxCancel = context.WithCancel(context.Background())
+ wn.relayMessages = wn.config.NetAddress != "" || wn.config.ForceRelayMessages
// roughly estimate the number of messages that could be sent over the lifespan of a single round.
wn.outgoingMessagesBufferSize = int(config.Consensus[protocol.ConsensusCurrentVersion].NumProposers*2 +
config.Consensus[protocol.ConsensusCurrentVersion].SoftCommitteeSize +
@@ -801,11 +802,24 @@ func (wn *WebsocketNetwork) updateURLHost(originalRootURL string, originIP net.I
// ServerHTTP handles the gossip network functions over websockets
func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *http.Request) {
+ remoteHost, _, err := net.SplitHostPort(request.RemoteAddr)
+ if err != nil {
+ // this error should not happen. The go framework is responsible for populating the RemoteAddr using the incoming TCP connection
+ // information.
+ wn.log.Errorf("could not parse request.RemoteAddr=%v, %s", request.RemoteAddr, err)
+ response.WriteHeader(http.StatusServiceUnavailable)
+ return
+ }
+ originIP := wn.getForwardedConnectionAddress(request.Header)
+ if originIP != nil {
+ remoteHost = originIP.String()
+ }
+
if wn.numIncomingPeers() >= wn.config.IncomingConnectionsLimit {
networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_limit"})
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
- Address: justHost(request.RemoteAddr),
+ Address: remoteHost,
HostName: request.Header.Get(TelemetryIDHeader),
Incoming: true,
InstanceName: request.Header.Get(InstanceNameHeader),
@@ -814,21 +828,12 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
response.WriteHeader(http.StatusServiceUnavailable)
return
}
- remoteHost, _, err := net.SplitHostPort(request.RemoteAddr)
- if err != nil {
- wn.log.Errorf("could not parse request.RemoteAddr=%v, %s", request.RemoteAddr, err)
- response.WriteHeader(http.StatusServiceUnavailable)
- return
- }
- originIP := wn.getForwardedConnectionAddress(request.Header)
- if originIP != nil {
- remoteHost = originIP.String()
- }
+
if wn.connectedForIP(remoteHost) >= wn.config.MaxConnectionsPerIP {
- networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_limit"})
+ networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_limit"})
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent,
telemetryspec.ConnectPeerFailEventDetails{
- Address: justHost(request.RemoteAddr),
+ Address: remoteHost,
HostName: request.Header.Get(TelemetryIDHeader),
Incoming: true,
InstanceName: request.Header.Get(InstanceNameHeader),
@@ -878,7 +883,7 @@ func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *htt
wn.log.With("event", "ConnectedIn").With("remote", otherPublicAddr).With("local", localAddr).Infof("Accepted incoming connection from peer %s", otherPublicAddr)
wn.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerEvent,
telemetryspec.PeerEventDetails{
- Address: justHost(request.RemoteAddr),
+ Address: remoteHost,
HostName: otherTelemetryGUID,
Incoming: true,
InstanceName: otherInstanceName,
@@ -1528,8 +1533,6 @@ func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebook Phon
outerPhonebook := &MultiPhonebook{phonebooks: []Phonebook{phonebook}}
wn = &WebsocketNetwork{log: log, config: config, phonebook: outerPhonebook, GenesisID: genesisID, NetworkID: networkID}
- // TODO - add config parameter to allow non-relays to enable relaying.
- wn.relayMessages = config.NetAddress != ""
wn.setup()
return wn, nil
}
@@ -1554,9 +1557,9 @@ func (wn *WebsocketNetwork) removePeer(peer *wsPeer, reason disconnectReason) {
// definitely don't change this to do the logging while holding the lock.
localAddr, _ := wn.Address()
wn.log.With("event", "Disconnected").With("remote", peer.rootURL).With("local", localAddr).Infof("Peer %v disconnected", peer.rootURL)
- peerAddr := ""
+ peerAddr := peer.OriginAddress()
// we might be able to get addr out of conn, or it might be closed
- if peer.conn != nil {
+ if peerAddr == "" && peer.conn != nil {
paddr := peer.conn.RemoteAddr()
if paddr != nil {
peerAddr = justHost(paddr.String())
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index b9dd648fd1..d8be800084 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -1464,3 +1464,86 @@ func TestSlowPeerDisconnection(t *testing.T) {
time.Sleep(time.Millisecond * 5)
}
}
+
+func TestForceMessageRelaying(t *testing.T) {
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel))
+ wn := &WebsocketNetwork{
+ log: log,
+ config: defaultConfig,
+ phonebook: emptyPhonebookSingleton,
+ GenesisID: "go-test-network-genesis",
+ NetworkID: config.Devtestnet,
+ }
+ wn.setup()
+ wn.eventualReadyDelay = time.Second
+
+ netA := wn
+ netA.config.GossipFanout = 1
+
+ defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }()
+
+ counter := newMessageCounter(t, 5)
+ counterDone := counter.done
+ netA.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counter}})
+ netA.Start()
+ addrA, postListen := netA.Address()
+ require.Truef(t, postListen, "Listening network failed to start")
+
+ noAddressConfig := defaultConfig
+ noAddressConfig.NetAddress = ""
+ netB := makeTestWebsocketNodeWithConfig(t, noAddressConfig)
+ netB.config.GossipFanout = 1
+ netB.phonebook = &oneEntryPhonebook{addrA}
+ netB.Start()
+ defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }()
+
+ noAddressConfig.ForceRelayMessages = true
+ netC := makeTestWebsocketNodeWithConfig(t, noAddressConfig)
+ netC.config.GossipFanout = 1
+ netC.phonebook = &oneEntryPhonebook{addrA}
+ netC.Start()
+ defer func() { t.Log("stopping C"); netC.Stop(); t.Log("C done") }()
+
+ readyTimeout := time.NewTimer(2 * time.Second)
+ waitReady(t, netA, readyTimeout.C)
+ waitReady(t, netB, readyTimeout.C)
+ waitReady(t, netC, readyTimeout.C)
+
+ // send 5 messages from both netB and netC to netA
+ for i := 0; i < 5; i++ {
+ err := netB.Relay(context.Background(), debugTag, []byte{1, 2, 3}, true, nil)
+ require.NoError(t, err)
+ err = netC.Relay(context.Background(), debugTag, []byte{1, 2, 3}, true, nil)
+ require.NoError(t, err)
+ }
+
+ select {
+ case <-counterDone:
+ case <-time.After(2 * time.Second):
+ if counter.count < 5 {
+ require.Failf(t, "One or more messages failed to reach destination network", "%d > %d", 5, counter.count)
+ } else if counter.count > 5 {
+ require.Failf(t, "One or more messages that were expected to be dropped, reached destination network", "%d < %d", 5, counter.count)
+ }
+ }
+ netA.ClearHandlers()
+ counter = newMessageCounter(t, 10)
+ counterDone = counter.done
+ netA.RegisterHandlers([]TaggedMessageHandler{TaggedMessageHandler{Tag: debugTag, MessageHandler: counter}})
+
+ // hack the relayMessages on the netB so that it would start sending messages.
+ netB.relayMessages = true
+ // send additional 10 messages from netB
+ for i := 0; i < 10; i++ {
+ err := netB.Relay(context.Background(), debugTag, []byte{1, 2, 3}, true, nil)
+ require.NoError(t, err)
+ }
+
+ select {
+ case <-counterDone:
+ case <-time.After(2 * time.Second):
+ require.Failf(t, "One or more messages failed to reach destination network", "%d > %d", 10, counter.count)
+ }
+
+}
diff --git a/node/netprio.go b/node/netprio.go
index 8ea8c2c9a2..236c631f3e 100644
--- a/node/netprio.go
+++ b/node/netprio.go
@@ -102,7 +102,7 @@ func (node *AlgorandFullNode) MakePrioResponse(challenge string) []byte {
rs.Round = voteRound
rs.Sender = maxPart.Address()
- rs.Sig = signer.Sign(ephID, proto.FineGrainedEphemeralKeys, rs.Response)
+ rs.Sig = signer.Sign(ephID, rs.Response)
return protocol.Encode(rs)
}
@@ -132,7 +132,7 @@ func (node *AlgorandFullNode) VerifyPrioResponse(challenge string, response []by
}
ephID := basics.OneTimeIDForRound(rs.Round, data.KeyDilution(proto))
- if !data.VoteID.Verify(ephID, proto.FineGrainedEphemeralKeys, rs.Response, rs.Sig) {
+ if !data.VoteID.Verify(ephID, rs.Response, rs.Sig) {
err = fmt.Errorf("signature verification failure")
return
}
diff --git a/protocol/codec.go b/protocol/codec.go
index 1518e4fd3f..3e50f334e0 100644
--- a/protocol/codec.go
+++ b/protocol/codec.go
@@ -44,6 +44,7 @@ func init() {
CodecHandle.RecursiveEmptyCheck = true
CodecHandle.WriteExt = true
CodecHandle.PositiveIntUnsigned = true
+ CodecHandle.Raw = true
JSONHandle = new(codec.JsonHandle)
JSONHandle.ErrorIfNoField = true
diff --git a/protocol/consensus.go b/protocol/consensus.go
index e11a4d98b5..561f978474 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -30,22 +30,22 @@ const DEPRECATEDConsensusV0 = ConsensusVersion("v0")
// It is now deprecated.
const DEPRECATEDConsensusV1 = ConsensusVersion("v1")
-// ConsensusV2 fixes a bug in the agreement protocol where proposalValues
+// DEPRECATEDConsensusV2 fixes a bug in the agreement protocol where proposalValues
// fail to commit to the original period and sender of a block.
-const ConsensusV2 = ConsensusVersion("v2")
+const DEPRECATEDConsensusV2 = ConsensusVersion("v2")
-// ConsensusV3 adds support for fine-grained ephemeral keys.
-const ConsensusV3 = ConsensusVersion("v3")
+// DEPRECATEDConsensusV3 adds support for fine-grained ephemeral keys.
+const DEPRECATEDConsensusV3 = ConsensusVersion("v3")
-// ConsensusV4 adds support for a min balance and a transaction that
+// DEPRECATEDConsensusV4 adds support for a min balance and a transaction that
// closes out an account.
-const ConsensusV4 = ConsensusVersion("v4")
+const DEPRECATEDConsensusV4 = ConsensusVersion("v4")
-// ConsensusV5 sets MinTxnFee to 1000 and fixes a blance lookback bug
-const ConsensusV5 = ConsensusVersion("v5")
+// DEPRECATEDConsensusV5 sets MinTxnFee to 1000 and fixes a blance lookback bug
+const DEPRECATEDConsensusV5 = ConsensusVersion("v5")
-// ConsensusV6 adds support for explicit ephemeral-key parameters
-const ConsensusV6 = ConsensusVersion("v6")
+// DEPRECATEDConsensusV6 adds support for explicit ephemeral-key parameters
+const DEPRECATEDConsensusV6 = ConsensusVersion("v6")
// ConsensusV7 increases MaxBalLookback to 320 in preparation for
// the twin seeds change.
diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go
index e87e662e44..a67690b1bc 100644
--- a/rpcs/ledgerService.go
+++ b/rpcs/ledgerService.go
@@ -23,6 +23,8 @@ import (
"github.com/gorilla/mux"
+ "github.com/algorand/go-codec/codec"
+
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data"
@@ -54,21 +56,29 @@ type EncodedBlockCert struct {
Certificate agreement.Certificate `codec:"cert"`
}
+// PreEncodedBlockCert defines how GetBlockBytes encodes a block and its certificate,
+// using a pre-encoded Block and Certificate in msgpack format.
+type PreEncodedBlockCert struct {
+ Block codec.Raw `codec:"block"`
+ Certificate codec.Raw `codec:"cert"`
+}
+
// RegisterLedgerService creates a LedgerService around the provider Ledger and registers it for RPC with the provided Registrar
func RegisterLedgerService(config config.Local, ledger *data.Ledger, registrar Registrar, genesisID string) *LedgerService {
- service := LedgerService{ledger: ledger, genesisID: genesisID}
- registrar.RegisterHTTPHandler(LedgerServiceBlockPath, &service)
+ service := &LedgerService{ledger: ledger, genesisID: genesisID}
+ registrar.RegisterHTTPHandler(LedgerServiceBlockPath, service)
c := make(chan network.IncomingMessage, config.CatchupParallelBlocks*ledgerServerCatchupRequestBufferSize)
handlers := []network.TaggedMessageHandler{
- {Tag: protocol.UniCatchupReqTag, MessageHandler: network.HandlerFunc((&service).processIncomingMessage)},
- {Tag: protocol.UniEnsBlockReqTag, MessageHandler: network.HandlerFunc((&service).processIncomingMessage)},
+ {Tag: protocol.UniCatchupReqTag, MessageHandler: network.HandlerFunc(service.processIncomingMessage)},
+ {Tag: protocol.UniEnsBlockReqTag, MessageHandler: network.HandlerFunc(service.processIncomingMessage)},
}
registrar.RegisterHandlers(handlers)
service.catchupReqs = c
service.stop = make(chan struct{})
- return &service
+
+ return service
}
// Start listening to catchup requests over ws
@@ -245,11 +255,12 @@ func (ls *LedgerService) sendCatchupRes(ctx context.Context, target network.Unic
}
func (ls *LedgerService) encodedBlockCert(round uint64) ([]byte, error) {
- blk, cert, err := ls.ledger.BlockCert(basics.Round(round))
+ blk, cert, err := ls.ledger.EncodedBlockCert(basics.Round(round))
if err != nil {
return nil, err
}
- return protocol.Encode(EncodedBlockCert{
+
+ return protocol.Encode(PreEncodedBlockCert{
Block: blk,
Certificate: cert,
}), nil
diff --git a/scripts/build_packages.sh b/scripts/build_packages.sh
index d3b504ff7e..876140dc97 100755
--- a/scripts/build_packages.sh
+++ b/scripts/build_packages.sh
@@ -46,13 +46,6 @@ else
fi
export TIMESTAMP=${TIMESTAMP}
-# To ensure deterministic availability of testnet (stable) builds, prefix the packages
-# with "pending_" so updater will not detect the package before we rename it.
-GATE_PREFIX=""
-# if [[ "${CHANNEL}" = "stable" || "${CHANNEL}" = "nightly" ]]; then
-# GATE_PREFIX="pending_"
-# fi
-
VERSION_COMPONENTS=(${FULLVERSION//\./ })
export BUILDNUMBER=${VERSION_COMPONENTS[2]}
@@ -106,20 +99,20 @@ for var in "${VARIATION_ARRAY[@]}"; do
echo Building package for channel ${CHANNEL} to ${PLATFORM_ROOT}
pushd ${PLATFORM_ROOT}
- tar --exclude=tools -zcf ${PKG_ROOT}/${GATE_PREFIX}node_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.tar.gz * >/dev/null 2>&1
+ tar --exclude=tools -zcf ${PKG_ROOT}/node_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.tar.gz * >/dev/null 2>&1
cd bin
- tar -zcf ${PKG_ROOT}/${GATE_PREFIX}install_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.tar.gz updater update.sh >/dev/null 2>&1
+ tar -zcf ${PKG_ROOT}/install_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.tar.gz updater update.sh >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Error creating tar file for package ${PLATFORM}. Aborting..."
exit 1
fi
cd ${PLATFORM_ROOT}/tools
- tar -zcf ${PKG_ROOT}/${GATE_PREFIX}tools_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.tar.gz * >/dev/null 2>&1
+ tar -zcf ${PKG_ROOT}/tools_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.tar.gz * >/dev/null 2>&1
popd
# If Linux package, build debian (deb) package as well
- if [ $(scripts/ostype.sh) = "linux" ]; then
+ if [ ! -z "${BUILD_DEB}" && $(scripts/ostype.sh) = "linux" ]; then
DEBTMP=$(mktemp -d 2>/dev/null || mktemp -d -t "debtmp")
trap "rm -rf ${DEBTMP}" 0
scripts/build_deb.sh ${ARCH} ${DEBTMP}
@@ -128,7 +121,7 @@ for var in "${VARIATION_ARRAY[@]}"; do
exit 1
fi
pushd ${DEBTMP}
- cp -p *.deb ${PKG_ROOT}/${GATE_PREFIX}algorand_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.deb
+ cp -p *.deb ${PKG_ROOT}/algorand_${CHANNEL}_${PKG_NAME}_${FULLVERSION}.deb
popd
fi
done
diff --git a/scripts/build_release.sh b/scripts/build_release.sh
index 8cfb1a3d47..f09fe53814 100755
--- a/scripts/build_release.sh
+++ b/scripts/build_release.sh
@@ -17,23 +17,6 @@ date "+build_release start %Y%m%d_%H%M%S"
set -e
set -x
-# persistent storage of repo manager scratch space is on EFS
-if [ ! -z "${AWS_EFS_MOUNT}" ]; then
- if mount|grep -q /data; then
- echo /data already mounted
- else
- sudo mkdir -p /data
- sudo mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport "${AWS_EFS_MOUNT}":/ /data
- # make environment for release_deb.sh
- sudo mkdir -p /data/_aptly
- sudo chown -R ${USER} /data/_aptly
- export APTLY_DIR=/data/_aptly
- fi
-fi
-
-export GOPATH=${HOME}/go
-export PATH=${HOME}/gpgbin:${GOPATH}/bin:/usr/local/go/bin:${PATH}
-
# a previous docker centos build can leave junk owned by root. chown and clean
sudo chown -R ${USER} ${GOPATH}
if [ -f ${GOPATH}/src/github.com/algorand/go-algorand/crypto/libsodium-fork/Makefile ]; then
@@ -58,6 +41,17 @@ export VARIATIONS="base"
export NO_BUILD=true
if [ -z "${RSTAMP}" ]; then
RSTAMP=$(scripts/reverse_hex_timestamp)
+ echo RSTAMP=${RSTAMP} > "${HOME}/rstamp"
+fi
+# What's my default IP address?
+# get the datacenter IP address for this EC2 host.
+# this might equivalently be gotten from `netstat -rn` and `ifconfig -a`
+if [ -z "${DC_IP}" ]; then
+ DC_IP=$(curl --silent http://169.254.169.254/latest/meta-data/local-ipv4)
+fi
+if [ -z "${DC_IP}" ]; then
+ echo "ERROR: need DC_IP to be set to your local (but not localhost) IP"
+ exit 1
fi
# Update version file for this build
@@ -71,7 +65,6 @@ fi
echo ${BUILD_NUMBER} > ./buildnumber.dat
git add -A
git commit -m "Build ${BUILD_NUMBER}"
-git push
export FULLVERSION=$(./scripts/compute_build_number.sh -f)
# a bash user might `source build_env` to manually continue a broken build
@@ -89,6 +82,7 @@ export VARIATIONS=${VARIATIONS}
RSTAMP=${RSTAMP}
BUILD_NUMBER=${BUILD_NUMBER}
export FULLVERSION=${FULLVERSION}
+DC_IP=${DC_IP}
EOF
# strip leading 'export ' for docker --env-file
sed 's/^export //g' < ${HOME}/build_env > ${HOME}/build_env_docker
@@ -100,93 +94,99 @@ make ${GOPATH}/src/github.com/algorand/go-algorand/crypto/lib/libsodium.a
make build
+export BUILD_DEB=1
scripts/build_packages.sh "${PLATFORM}"
-# Run RPM bulid in Centos7 Docker container
-sg docker "docker build -t algocentosbuild - < scripts/centos-build.Dockerfile"
-
-# cleanup our libsodium build
-if [ -f ${GOPATH}/src/github.com/algorand/go-algorand/crypto/libsodium-fork/Makefile ]; then
- (cd ${GOPATH}/src/github.com/algorand/go-algorand/crypto/libsodium-fork && make distclean)
-fi
-rm -rf ${GOPATH}/src/github.com/algorand/go-algorand/crypto/lib
-# do the RPM build
-sg docker "docker run --env-file ${HOME}/build_env_docker --mount type=bind,src=${GOPATH}/src,dst=/root/go/src --mount type=bind,src=${HOME},dst=/root/subhome --mount type=bind,src=/usr/local/go,dst=/usr/local/go -a stdout -a stderr algocentosbuild /root/go/src/github.com/algorand/go-algorand/scripts/build_release_centos_docker.sh"
+# Test .deb installer
-# Tag Source
+mkdir -p ${HOME}/docker_test_resources
+if [ ! -f "${HOME}/docker_test_resources/gnupg2.2.9_centos7_amd64.tar.bz2" ]; then
+ aws s3 cp s3://algorand-devops-misc/tools/gnupg2.2.9_centos7_amd64.tar.bz2 ${HOME}/docker_test_resources
+fi
+cp -p "${HOME}/key.gpg" "${HOME}/docker_test_resources/key.pub"
-TAG=${BRANCH}-${FULLVERSION}
-if [ ! -z "${SIGNING_KEY_ADDR}" ]; then
- git tag -s -u "${SIGNING_KEY_ADDR}" ${TAG} -m "Genesis Timestamp: $(cat ./genesistimestamp.dat)"
+# copy previous installers into ~/docker_test_resources
+cd "${HOME}/docker_test_resources"
+if [ "${TEST_UPGRADE}" == "no" ]; then
+ echo "upgrade test disabled"
else
- git tag -s ${TAG} -m "Genesis Timestamp: $(cat ./genesistimestamp.dat)"
-fi
-git push origin ${TAG}
-
-git archive --prefix=algorand-${FULLVERSION}/ "${TAG}" | gzip > ${PKG_ROOT}/algorand_${CHANNEL}_source_${FULLVERSION}.tar.gz
-
-# create *.sig gpg signatures
-cd ${PKG_ROOT}
-for i in *.tar.gz *.deb *.rpm; do
- gpg --detach-sign "${i}"
-done
-HASHFILE=hashes_${CHANNEL}_${OS}_${ARCH}_${FULLVERSION}
-rm -f "${HASHFILE}"
-touch "${HASHFILE}"
-md5sum *.tar.gz *.deb *.rpm >> "${HASHFILE}"
-shasum -a 256 *.tar.gz *.deb *.rpm >> "${HASHFILE}"
-shasum -a 512 *.tar.gz *.deb *.rpm >> "${HASHFILE}"
-gpg --detach-sign "${HASHFILE}"
-gpg --clearsign "${HASHFILE}"
-
-echo RSTAMP=${RSTAMP} > "${HOME}/rstamp"
-if [ ! -z "${S3_PREFIX}" ]; then
- aws s3 sync --quiet --exclude dev\* --exclude master\* --exclude nightly\* --exclude stable\* --acl public-read ./ ${S3_PREFIX}/${CHANNEL}/${RSTAMP}_${FULLVERSION}/
+ python3 ${GOPATH}/src/github.com/algorand/go-algorand/scripts/get_current_installers.py "${S3_PREFIX}/${CHANNEL}"
fi
-# copy .rpm file to intermediate yum repo scratch space, actual publish manually later
-if [ ! -d /data/yumrepo ]; then
- sudo mkdir -p /data/yumrepo
- sudo chown ${USER} /data/yumrepo
-fi
-cp -p -n *.rpm *.rpm.sig /data/yumrepo
+echo "TEST_UPGRADE=${TEST_UPGRADE}" >> ${HOME}/build_env_docker
+
+rm -rf ${HOME}/dummyaptly
+mkdir -p ${HOME}/dummyaptly
+cat <${HOME}/dummyaptly.conf
+{
+ "rootDir": "${HOME}/dummyaptly",
+ "downloadConcurrency": 4,
+ "downloadSpeedLimit": 0,
+ "architectures": [],
+ "dependencyFollowSuggests": false,
+ "dependencyFollowRecommends": false,
+ "dependencyFollowAllVariants": false,
+ "dependencyFollowSource": false,
+ "dependencyVerboseResolve": false,
+ "gpgDisableSign": false,
+ "gpgDisableVerify": false,
+ "gpgProvider": "gpg",
+ "downloadSourcePackages": false,
+ "skipLegacyPool": true,
+ "ppaDistributorID": "ubuntu",
+ "ppaCodename": "",
+ "skipContentsPublishing": false,
+ "FileSystemPublishEndpoints": {},
+ "S3PublishEndpoints": {},
+ "SwiftPublishEndpoints": {}
+}
+EOF
+aptly -config=${HOME}/dummyaptly.conf repo create -distribution=stable -component=main algodummy
+aptly -config=${HOME}/dummyaptly.conf repo add algodummy ${HOME}/node_pkg/*.deb
+SNAPSHOT=algodummy-$(date +%Y%m%d_%H%M%S)
+aptly -config=${HOME}/dummyaptly.conf snapshot create ${SNAPSHOT} from repo algodummy
+aptly -config=${HOME}/dummyaptly.conf publish snapshot -origin=Algorand -label=Algorand ${SNAPSHOT}
-cd ${HOME}
-STATUSFILE=build_status_${CHANNEL}_${FULLVERSION}
-echo "ami-id:" > "${STATUSFILE}"
-curl --silent http://169.254.169.254/latest/meta-data/ami-id >> "${STATUSFILE}"
-cat <>"${STATUSFILE}"
+(cd ${HOME}/dummyaptly/public && python3 ${GOPATH}/src/github.com/algorand/go-algorand/scripts/httpd.py --pid ${HOME}/phttpd.pid) &
-go version:
-EOF
-go version >>"${STATUSFILE}"
-cat <>"${STATUSFILE}"
+sg docker "docker run --rm --env-file ${HOME}/build_env_docker --mount type=bind,src=${HOME}/docker_test_resources,dst=/stuff --mount type=bind,src=${GOPATH}/src,dst=/root/go/src --mount type=bind,src=/usr/local/go,dst=/usr/local/go ubuntu:16.04 bash /root/go/src/github.com/algorand/go-algorand/scripts/build_release_ubuntu_test_docker.sh"
+sg docker "docker run --rm --env-file ${HOME}/build_env_docker --mount type=bind,src=${HOME}/docker_test_resources,dst=/stuff --mount type=bind,src=${GOPATH}/src,dst=/root/go/src --mount type=bind,src=/usr/local/go,dst=/usr/local/go ubuntu:18.04 bash /root/go/src/github.com/algorand/go-algorand/scripts/build_release_ubuntu_test_docker.sh"
-go env:
-EOF
-go env >>"${STATUSFILE}"
-cat <>"${STATUSFILE}"
+kill $(cat ${HOME}/phttpd.pid)
-build_env:
-EOF
-cat <${HOME}/build_env>>"${STATUSFILE}"
-cat <>"${STATUSFILE}"
+date "+build_release done building ubuntu %Y%m%d_%H%M%S"
-dpkg-l:
-EOF
-dpkg -l >>"${STATUSFILE}"
-gpg --clearsign "${STATUSFILE}"
-gzip "${STATUSFILE}.asc"
-if [ ! -z "${S3_PREFIX_BUILDLOG}" ]; then
- aws s3 cp --quiet "${STATUSFILE}.asc.gz" "${S3_PREFIX_BUILDLOG}/${RSTAMP}/${STATUSFILE}.asc.gz"
+# Run RPM bulid in Centos7 Docker container
+sg docker "docker build -t algocentosbuild - < ${GOPATH}/src/github.com/algorand/go-algorand/scripts/centos-build.Dockerfile"
+
+# cleanup our libsodium build
+if [ -f ${GOPATH}/src/github.com/algorand/go-algorand/crypto/libsodium-fork/Makefile ]; then
+ (cd ${GOPATH}/src/github.com/algorand/go-algorand/crypto/libsodium-fork && make distclean)
fi
+rm -rf ${GOPATH}/src/github.com/algorand/go-algorand/crypto/lib
+
+# do the RPM build, sign and validate it
+
+sudo rm -rf ${HOME}/dummyrepo
+mkdir -p ${HOME}/dummyrepo
+
+cat <${HOME}/dummyrepo/algodummy.repo
+[algodummy]
+name=Algorand
+baseurl=http://${DC_IP}:8111/
+enabled=1
+gpgcheck=1
+gpgkey=https://releases.algorand.com/rpm/rpm_algorand.pub
+EOF
+(cd ${HOME}/dummyrepo && python3 ${GOPATH}/src/github.com/algorand/go-algorand/scripts/httpd.py --pid ${HOME}/phttpd.pid) &
+
+sg docker "docker run --rm --env-file ${HOME}/build_env_docker --mount type=bind,src=${HOME}/.gnupg/S.gpg-agent,dst=/S.gpg-agent --mount type=bind,src=${HOME}/dummyrepo,dst=/dummyrepo --mount type=bind,src=${HOME}/docker_test_resources,dst=/stuff --mount type=bind,src=${GOPATH}/src,dst=/root/go/src --mount type=bind,src=${HOME},dst=/root/subhome --mount type=bind,src=/usr/local/go,dst=/usr/local/go algocentosbuild /root/go/src/github.com/algorand/go-algorand/scripts/build_release_centos_docker.sh"
+
+kill $(cat ${HOME}/phttpd.pid)
-# use aptly to push .deb to its serving repo
-# Leave .deb publishing to manual step after we do more checks on the release artifacts.
-# ${GOPATH}/src/github.com/algorand/go-algorand/scripts/release_deb.sh ${PKG_ROOT}/*deb
+date "+build_release done building centos %Y%m%d_%H%M%S"
-# TODO: manually post rpm to repo
+# NEXT: build_release_sign.sh
-date "+build_release finish %Y%m%d_%H%M%S"
diff --git a/scripts/build_release_centos_docker.sh b/scripts/build_release_centos_docker.sh
index 773be3477d..b733e36356 100644
--- a/scripts/build_release_centos_docker.sh
+++ b/scripts/build_release_centos_docker.sh
@@ -42,3 +42,80 @@ RPMTMP=$(mktemp -d 2>/dev/null || mktemp -d -t "rpmtmp")
trap "rm -rf ${RPMTMP}" 0
scripts/build_rpm.sh ${RPMTMP}
cp -p ${RPMTMP}/*/*.rpm /root/subhome/node_pkg
+
+(cd ${HOME} && tar jxf /stuff/gnupg*.tar.bz2)
+export PATH="${HOME}/gnupg2/bin:${PATH}"
+export LD_LIBRARY_PATH=${HOME}/gnupg2/lib
+
+umask 0077
+mkdir -p ~/.gnupg
+umask 0022
+
+touch "${HOME}/.gnupg/gpg.conf"
+if grep -q no-autostart "${HOME}/.gnupg/gpg.conf"; then
+ echo ""
+else
+ echo "no-autostart" >> "${HOME}/.gnupg/gpg.conf"
+fi
+rm -f ${HOME}/.gnupg/S.gpg-agent
+(cd ~/.gnupg && ln -s /S.gpg-agent S.gpg-agent)
+
+gpg --import /stuff/key.pub
+gpg --import ${GOPATH}/src/github.com/algorand/go-algorand/installer/rpm/RPM-GPG-KEY-Algorand
+
+cat <"${HOME}/.rpmmacros"
+%_gpg_name Algorand RPM
+%__gpg ${HOME}/gnupg2/bin/gpg
+%__gpg_check_password_cmd true
+EOF
+
+cat <"${HOME}/rpmsign.py"
+import rpm
+import sys
+rpm.addSign(sys.argv[1], '')
+EOF
+
+NEWEST_RPM=$(ls -t /root/subhome/node_pkg/*rpm|head -1)
+python2 "${HOME}/rpmsign.py" "${NEWEST_RPM}"
+
+cp -p "${NEWEST_RPM}" /dummyrepo
+createrepo --database /dummyrepo
+rm -f /dummyrepo/repodata/repomd.xml.asc
+gpg -u rpm@algorand.com --detach-sign --armor /dummyrepo/repodata/repomd.xml
+
+OLDRPM=$(ls -t /stuff/*.rpm|head -1)
+if [ -f "${OLDRPM}" ]; then
+ yum install -y "${OLDRPM}"
+ algod -v
+ if algod -v | grep -q ${FULLVERSION}; then
+ echo "already installed current version. wat?"
+ false
+ fi
+
+ mkdir -p /root/testnode
+ cp -p /var/lib/algorand/genesis/testnet/genesis.json /root/testnode
+
+ goal node start -d /root/testnode
+ goal node wait -d /root/testnode -w 60
+ goal node stop -d /root/testnode
+fi
+
+
+yum-config-manager --add-repo http://${DC_IP}:8111/algodummy.repo
+
+yum install -y algorand
+algod -v
+# check that the installed version is now the current version
+algod -v | grep -q ${FULLVERSION}
+
+if [ ! -d /root/testnode ]; then
+ mkdir -p /root/testnode
+ cp -p /var/lib/algorand/genesis/testnet/genesis.json /root/testnode
+fi
+
+goal node start -d /root/testnode
+goal node wait -d /root/testnode -w 60
+goal node stop -d /root/testnode
+
+
+echo CENTOS_DOCKER_TEST_OK
diff --git a/scripts/build_release_local.sh b/scripts/build_release_local.sh
index 6f7f81d2af..953370c0c5 100644
--- a/scripts/build_release_local.sh
+++ b/scripts/build_release_local.sh
@@ -50,6 +50,8 @@ gpg -u dev@algorand.com --clearsign
type some stuff
^D
+gpg -u rpm@algorand.com --clearsign
+
# TODO: use simpler expression when we can rely on gpg 2.2 on ubuntu >= 18.04
#REMOTE_GPG_SOCKET=$(ssh ubuntu@${TARGET} gpgconf --list-dir agent-socket)
@@ -75,7 +77,9 @@ export AWS_EFS_MOUNT=
# to be prompted for GPG key password at a couple points.
# It can still steal the outer terminal from within piping the output to tee. Nifty, huh?
BUILDTIMESTAMP=$(cat "${HOME}/buildtimestamp")
-(bash "${HOME}/go/src/github.com/algorand/go-algorand/scripts/build_release.sh" 2>&1)|tee -a "buildlog_${BUILDTIMESTAMP}"
+(bash "${HOME}/go/src/github.com/algorand/go-algorand/scripts/build_release.sh" 2>&1)|tee -a "${HOME}/buildlog_${BUILDTIMESTAMP}"
+(bash "${HOME}/go/src/github.com/algorand/go-algorand/scripts/build_release_sign.sh" 2>&1)|tee -a "${HOME}/buildlog_${BUILDTIMESTAMP}"
+(bash "${HOME}/go/src/github.com/algorand/go-algorand/scripts/build_release_upload.sh" 2>&1)|tee -a "${HOME}/buildlog_${BUILDTIMESTAMP}"
if [ -f "${HOME}/rstamp" ]; then
. "${HOME}/rstamp"
fi
@@ -86,7 +90,7 @@ if [ -z "${RSTAMP}" ]; then
echo "could not figure out RSTAMP, script must have failed early"
exit 1
fi
-gzip "buildlog_${BUILDTIMESTAMP}"
+gzip "${HOME}/buildlog_${BUILDTIMESTAMP}"
if [ ! -z "${S3_PREFIX_BUILDLOG}" ]; then
- aws s3 cp "buildlog_${BUILDTIMESTAMP}.gz" "${S3_PREFIX_BUILDLOG}/${RSTAMP}/buildlog_${BUILDTIMESTAMP}.gz"
+ aws s3 cp "${HOME}/buildlog_${BUILDTIMESTAMP}.gz" "${S3_PREFIX_BUILDLOG}/${RSTAMP}/buildlog_${BUILDTIMESTAMP}.gz"
fi
diff --git a/scripts/build_release_setup.sh b/scripts/build_release_setup.sh
index 015922f97b..db75a4d7f9 100644
--- a/scripts/build_release_setup.sh
+++ b/scripts/build_release_setup.sh
@@ -47,7 +47,7 @@ chmod +x ${HOME}/gpgbin/remote_gpg_socket
if [ "${DISTRIB_ID}" = "Ubuntu" ]; then
if [ "${DISTRIB_RELEASE}" = "16.04" ]; then
echo "WARNING: Ubuntu 16.04 is DEPRECATED"
- sudo apt-get install -y autoconf awscli docker.io g++ fakeroot git gnupg2 gpgv2 make nfs-common python3 rpm sqlite3
+ sudo apt-get install -y autoconf awscli docker.io g++ fakeroot git gnupg2 gpgv2 make nfs-common python3 rpm sqlite3 python3-boto3
cat <${HOME}/gpgbin/gpg
#!/bin/bash
exec /usr/bin/gpg2 "\$@"
@@ -58,7 +58,7 @@ exec /usr/bin/gpgv2 "\$@"
EOF
chmod +x ${HOME}/gpgbin/*
elif [ "${DISTRIB_RELEASE}" = "18.04" ]; then
- sudo apt-get install -y autoconf awscli docker.io git gpg nfs-common python3 rpm sqlite3
+ sudo apt-get install -y autoconf awscli docker.io git gpg nfs-common python3 rpm sqlite3 python3-boto3
else
echo "don't know how to build on Ubuntu ${DISTRIB_RELEASE}"
exit 1
@@ -103,6 +103,8 @@ fi
sudo usermod -a -G docker ubuntu
sg docker "docker pull centos:7"
+sg docker "docker pull ubuntu:18.04"
+sg docker "docker pull ubuntu:16.04"
# Check out
mkdir -p ${GOPATH}/src/github.com/algorand
@@ -112,6 +114,8 @@ fi
cd ${GOPATH}/src/github.com/algorand/go-algorand
git checkout "${GIT_CHECKOUT_LABEL}"
+gpg --import ${GOPATH}/src/github.com/algorand/go-algorand/installer/rpm/RPM-GPG-KEY-Algorand
+
# Install latest Go
cd $HOME
# TODO: make a config file in root of repo with single source of truth for Go major-minor version
diff --git a/scripts/build_release_sign.sh b/scripts/build_release_sign.sh
new file mode 100644
index 0000000000..bc87b6b5d5
--- /dev/null
+++ b/scripts/build_release_sign.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+. ${HOME}/build_env
+set -e
+set -x
+
+cd ${GOPATH}/src/github.com/algorand/go-algorand
+
+# Tag Source
+TAG=${BRANCH}-${FULLVERSION}
+echo "TAG=${TAG}" >> ${HOME}/build_env
+if [ ! -z "${SIGNING_KEY_ADDR}" ]; then
+ git tag -s -u "${SIGNING_KEY_ADDR}" ${TAG} -m "Genesis Timestamp: $(cat ./genesistimestamp.dat)"
+else
+ git tag -s ${TAG} -m "Genesis Timestamp: $(cat ./genesistimestamp.dat)"
+fi
+
+git archive --prefix=algorand-${FULLVERSION}/ "${TAG}" | gzip > ${PKG_ROOT}/algorand_${CHANNEL}_source_${FULLVERSION}.tar.gz
+
+# create *.sig gpg signatures
+cd ${PKG_ROOT}
+for i in *.tar.gz *.deb *.rpm; do
+ gpg -u "${SIGNING_KEY_ADDR}" --detach-sign "${i}"
+done
+HASHFILE=hashes_${CHANNEL}_${OS}_${ARCH}_${FULLVERSION}
+rm -f "${HASHFILE}"
+touch "${HASHFILE}"
+md5sum *.tar.gz *.deb *.rpm >> "${HASHFILE}"
+shasum -a 256 *.tar.gz *.deb *.rpm >> "${HASHFILE}"
+shasum -a 512 *.tar.gz *.deb *.rpm >> "${HASHFILE}"
+gpg -u "${SIGNING_KEY_ADDR}" --detach-sign "${HASHFILE}"
+gpg -u "${SIGNING_KEY_ADDR}" --clearsign "${HASHFILE}"
+
+date "+build_release done signing %Y%m%d_%H%M%S"
+
+# NEXT: build_release_upload.sh
diff --git a/scripts/build_release_ubuntu_test_docker.sh b/scripts/build_release_ubuntu_test_docker.sh
new file mode 100644
index 0000000000..e721fed2f1
--- /dev/null
+++ b/scripts/build_release_ubuntu_test_docker.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+# test ubuntu install from inside docker image
+#
+# expects docker run with:
+# --env-file ${HOME}/build_env_docker
+# --mount type=bind,src=${HOME}/centos,dst=/stuff
+# --mount type=bind,src=${GOPATH}/src,dst=/root/go/src
+# --mount type=bind,src=/usr/local/go,dst=/usr/local/go
+
+set -e
+set -x
+
+export GOPATH=${HOME}/go
+export PATH=${GOPATH}/bin:/usr/local/go/bin:${PATH}
+
+apt-get update
+apt-get install -y gnupg2 curl software-properties-common python3
+
+if [ "${TEST_UPGRADE}" == "no" ]; then
+ echo "upgrade test skipped"
+else
+ apt install -y /stuff/*.deb
+ algod -v
+ if algod -v | grep -q ${FULLVERSION}; then
+ echo "already installed current version. wat?"
+ false
+ fi
+
+ mkdir -p /root/testnode
+ cp -p /var/lib/algorand/genesis/testnet/genesis.json /root/testnode
+
+ goal node start -d /root/testnode
+ goal node wait -d /root/testnode -w 60
+ goal node stop -d /root/testnode
+fi
+
+#apt-key adv --fetch-keys https://releases.algorand.com/key.pub
+apt-key add /stuff/key.pub
+add-apt-repository "deb http://${DC_IP}:8111/ stable main"
+apt-get update
+apt-get install -y algorand
+algod -v
+# check that the installed version is now the current version
+algod -v | grep -q ${FULLVERSION}
+
+if [ ! -d /root/testnode ]; then
+ mkdir -p /root/testnode
+ cp -p /var/lib/algorand/genesis/testnet/genesis.json /root/testnode
+fi
+
+goal node start -d /root/testnode
+goal node wait -d /root/testnode -w 60
+goal node stop -d /root/testnode
+
+echo UBUNTU_DOCKER_TEST_OK
diff --git a/scripts/build_release_upload.sh b/scripts/build_release_upload.sh
new file mode 100644
index 0000000000..138ecb9a71
--- /dev/null
+++ b/scripts/build_release_upload.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+. ${HOME}/build_env
+set -e
+set -x
+
+# persistent storage of repo manager scratch space is on EFS
+if [ ! -z "${AWS_EFS_MOUNT}" ]; then
+ if mount|grep -q /data; then
+ echo /data already mounted
+ else
+ sudo mkdir -p /data
+ sudo mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport "${AWS_EFS_MOUNT}":/ /data
+ # make environment for release_deb.sh
+ sudo mkdir -p /data/_aptly
+ sudo chown -R ${USER} /data/_aptly
+ export APTLY_DIR=/data/_aptly
+ fi
+fi
+
+cd ${GOPATH}/src/github.com/algorand/go-algorand
+
+. ${HOME}/build_env
+git push origin
+git push origin ${TAG}
+
+cd ${PKG_ROOT}
+
+if [ ! -z "${S3_PREFIX}" ]; then
+ aws s3 sync --quiet --exclude dev\* --exclude master\* --exclude nightly\* --exclude stable\* --acl public-read ./ ${S3_PREFIX}/${CHANNEL}/${RSTAMP}_${FULLVERSION}/
+fi
+
+# copy .rpm file to intermediate yum repo scratch space, actual publish manually later
+if [ ! -d /data/yumrepo ]; then
+ sudo mkdir -p /data/yumrepo
+ sudo chown ${USER} /data/yumrepo
+fi
+cp -p -n *.rpm *.rpm.sig /data/yumrepo
+
+cd ${HOME}
+STATUSFILE=build_status_${CHANNEL}_${FULLVERSION}
+echo "ami-id:" > "${STATUSFILE}"
+curl --silent http://169.254.169.254/latest/meta-data/ami-id >> "${STATUSFILE}"
+cat <>"${STATUSFILE}"
+
+
+go version:
+EOF
+go version >>"${STATUSFILE}"
+cat <>"${STATUSFILE}"
+
+go env:
+EOF
+go env >>"${STATUSFILE}"
+cat <>"${STATUSFILE}"
+
+build_env:
+EOF
+cat <${HOME}/build_env>>"${STATUSFILE}"
+cat <>"${STATUSFILE}"
+
+dpkg-l:
+EOF
+dpkg -l >>"${STATUSFILE}"
+gpg --clearsign "${STATUSFILE}"
+gzip "${STATUSFILE}.asc"
+if [ ! -z "${S3_PREFIX_BUILDLOG}" ]; then
+ aws s3 cp --quiet "${STATUSFILE}.asc.gz" "${S3_PREFIX_BUILDLOG}/${RSTAMP}/${STATUSFILE}.asc.gz"
+fi
+
+date "+build_release done uploading %Y%m%d_%H%M%S"
+
+# NEXT: release_deb.sh
diff --git a/scripts/centos-build.Dockerfile b/scripts/centos-build.Dockerfile
index 310d852227..ad6c520ebb 100644
--- a/scripts/centos-build.Dockerfile
+++ b/scripts/centos-build.Dockerfile
@@ -1,6 +1,6 @@
FROM centos:7
WORKDIR /root
RUN yum install -y epel-release https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
-RUN yum install -y autoconf awscli git gnupg2 nfs-utils python36 sqlite3 boost-devel expect jq libtool gcc-c++ libstdc++-devel libstdc++-static rpmdevtools
+RUN yum install -y autoconf awscli git gnupg2 nfs-utils python36 sqlite3 boost-devel expect jq libtool gcc-c++ libstdc++-devel libstdc++-static rpmdevtools createrepo rpm-sign bzip2
ENTRYPOINT ["/bin/bash"]
diff --git a/scripts/configure_dev-deps.sh b/scripts/configure_dev-deps.sh
index 1391eca766..ff05da3444 100755
--- a/scripts/configure_dev-deps.sh
+++ b/scripts/configure_dev-deps.sh
@@ -1,5 +1,7 @@
#!/usr/bin/env bash
+set -x
+
go get -u golang.org/x/lint/golint
go get -u github.com/golang/dep/cmd/dep
go get -u golang.org/x/tools/cmd/stringer
diff --git a/scripts/get_current_installers.py b/scripts/get_current_installers.py
new file mode 100755
index 0000000000..ec3fe3da3d
--- /dev/null
+++ b/scripts/get_current_installers.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+#
+# pip install boto3
+# python3 get_current_installers.py s3://bucket/prefix
+
+import re
+import sys
+
+import boto3
+
+def get_stage_release_set(response):
+ prefix = None
+ they = []
+ for x in response['Contents']:
+ path = x['Key']
+ pre, fname = path.rsplit('/', 1)
+ if fname.startswith('tools_') or fname.startswith('install_') or fname.startswith('pending_'):
+ continue
+ if prefix is None:
+ prefix = pre
+ they.append(x)
+ elif prefix == pre:
+ they.append(x)
+ else:
+ break
+ return they
+
+# return (bucket,prefix)
+def parse_s3_path(path):
+ m = re.match(r's3://([^/]+)/(.*)', path)
+ if m:
+ return m.group(1), m.group(2)
+ return None, None
+
+def main():
+ bucket, prefix = parse_s3_path(sys.argv[1])
+ s3 = boto3.client('s3')
+ staging_response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix, MaxKeys=100)
+ if (not staging_response.get('KeyCount')) or ('Contents' not in staging_response):
+ sys.stderr.write('nothing found under {}\n'.format(sys.argv[1]))
+ sys.exit(1)
+ rset = get_stage_release_set(staging_response)
+ for ob in rset:
+ okey = ob['Key']
+ if okey.endswith('.rpm') or okey.endswith('.deb'):
+ _, fname = okey.rsplit('/', 1)
+ s3.download_file(bucket, okey, fname)
+ return
+
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/httpd.py b/scripts/httpd.py
new file mode 100755
index 0000000000..5c191e4171
--- /dev/null
+++ b/scripts/httpd.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import http.server
+import os
+
+server_class = getattr(http.server, 'ThreadingHTTPServer', None) or getattr(http.server, 'HTTPServer')
+
+def main():
+ import argparse
+ ap = argparse.ArgumentParser()
+ ap.add_argument('--pid', default=None)
+ ap.add_argument('--port', type=int, default=8111)
+ args = ap.parse_args()
+
+ if args.pid:
+ with open(args.pid, 'w') as fout:
+ fout.write(str(os.getpid()))
+ server = server_class(('', args.port), http.server.SimpleHTTPRequestHandler)
+ server.serve_forever()
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/promote_stable.sh b/scripts/promote_stable.sh
index 2c82e794ad..0b62532cd3 100755
--- a/scripts/promote_stable.sh
+++ b/scripts/promote_stable.sh
@@ -26,7 +26,6 @@ function init_s3cmd() {
wget https://sourceforge.net/projects/s3tools/files/s3cmd/2.0.2/s3cmd-2.0.2.tar.gz
tar -xf s3cmd-2.0.2.tar.gz
popd
- sudo apt-get install python-dateutil
S3CMD=~/s3cmd-2.0.2/s3cmd
fi
}
diff --git a/scripts/release_deb.sh b/scripts/release_deb.sh
index fea211c68d..44d7cfa0c4 100755
--- a/scripts/release_deb.sh
+++ b/scripts/release_deb.sh
@@ -43,12 +43,6 @@ cat <${HOME}/.aptly.conf
"bucket":"algorand-releases",
"acl":"public-read",
"prefix":"deb"
- },
- "algorand-dev-deb-repo": {
- "region":"us-east-1",
- "bucket":"algorand-dev-deb-repo",
- "acl":"public-read",
- "prefix":"deb"
}
},
"SwiftPublishEndpoints": {}
@@ -56,7 +50,9 @@ cat <${HOME}/.aptly.conf
EOF
mkdir -p $GOPATH/src/github.com/aptly-dev
-git clone https://github.com/aptly-dev/aptly $GOPATH/src/github.com/aptly-dev/aptly || true
+if [ ! -d $GOPATH/src/github.com/aptly-dev/aptly ]; then
+ git clone https://github.com/aptly-dev/aptly $GOPATH/src/github.com/aptly-dev/aptly
+fi
(cd $GOPATH/src/github.com/aptly-dev/aptly && git fetch)
# As of 2019-06-06 release tag v1.3.0 is 2018-May, GnuPG 2 support was added in October but they haven't tagged a new release yet. Hash below seems to work so far.
(cd $GOPATH/src/github.com/aptly-dev/aptly && git checkout e2d6a53de5ee03814b3fe19a8954a09a5c2969b9)
diff --git a/scripts/travis/release_packages.sh b/scripts/travis/release_packages.sh
index 27d5df31a0..0386337f83 100755
--- a/scripts/travis/release_packages.sh
+++ b/scripts/travis/release_packages.sh
@@ -30,7 +30,6 @@ function init_s3cmd() {
wget https://sourceforge.net/projects/s3tools/files/s3cmd/2.0.2/s3cmd-2.0.2.tar.gz
tar -xf s3cmd-2.0.2.tar.gz
popd
- sudo apt-get install python-dateutil
S3CMD=~/s3cmd-2.0.2/s3cmd
fi
}
diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
index cf7956deef..4aeb08ddbb 100644
--- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
+++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go
@@ -185,13 +185,5 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) {
// helper copied from agreement/selector.go
func balanceRound(r basics.Round, cparams config.ConsensusParams) basics.Round {
- if cparams.TwinSeeds {
- return r.SubSaturate(basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback))
- }
-
- lookback := basics.Round(2*cparams.SeedRefreshInterval + cparams.SeedLookback + 1)
- if cparams.IncorrectBalLookback {
- return r.SubSaturate(lookback) + 2
- }
- return r.SubSaturate(lookback)
+ return r.SubSaturate(basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback))
}
diff --git a/test/e2e-go/features/transactions/close_account_test.go b/test/e2e-go/features/transactions/close_account_test.go
index 74656076c2..ec0d518838 100644
--- a/test/e2e-go/features/transactions/close_account_test.go
+++ b/test/e2e-go/features/transactions/close_account_test.go
@@ -30,7 +30,7 @@ func TestAccountsCanClose(t *testing.T) {
a := require.New(t)
var fixture fixtures.RestClientFixture
- fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV4.json"))
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV10.json"))
defer fixture.Shutdown()
client := fixture.LibGoalClient
@@ -58,11 +58,11 @@ func TestAccountsCanClose(t *testing.T) {
a.NoError(err)
// Transfer some money to acct0 and wait.
- tx, err := client.SendPaymentFromUnencryptedWallet(baseAcct, acct0, 1, 1000000, nil)
+ tx, err := client.SendPaymentFromUnencryptedWallet(baseAcct, acct0, 1000, 10000000, nil)
a.NoError(err)
fixture.WaitForConfirmedTxn(status.LastRound+10, baseAcct, tx.ID().String())
- tx, err = client.SendPaymentFromWallet(walletHandle, nil, acct0, acct1, 1, 100000, nil, acct2, 0, 0)
+ tx, err = client.SendPaymentFromWallet(walletHandle, nil, acct0, acct1, 1000, 1000000, nil, acct2, 0, 0)
a.NoError(err)
fixture.WaitForConfirmedTxn(status.LastRound+10, acct0, tx.ID().String())
@@ -76,6 +76,6 @@ func TestAccountsCanClose(t *testing.T) {
a.NoError(err)
a.True(bal0 == 0)
- a.True(bal1 >= 100000)
- a.True(bal2 >= 899999)
+ a.True(bal1 >= 1000000)
+ a.True(bal2 >= 8999000)
}
diff --git a/test/e2e-go/features/transactions/goOnlineGoOffline_test.go b/test/e2e-go/features/transactions/goOnlineGoOffline_test.go
index c583e904c7..48a657cbd0 100644
--- a/test/e2e-go/features/transactions/goOnlineGoOffline_test.go
+++ b/test/e2e-go/features/transactions/goOnlineGoOffline_test.go
@@ -33,8 +33,8 @@ func TestAccountsCanChangeOnlineState(t *testing.T) {
testAccountsCanChangeOnlineState(t, filepath.Join("nettemplates", "TwoNodesPartlyOffline.json"))
}
-func TestAccountsCanChangeOnlineStateV6(t *testing.T) {
- testAccountsCanChangeOnlineState(t, filepath.Join("nettemplates", "TwoNodesPartlyOfflineV6.json"))
+func TestAccountsCanChangeOnlineStateV7(t *testing.T) {
+ testAccountsCanChangeOnlineState(t, filepath.Join("nettemplates", "TwoNodesPartlyOfflineV7.json"))
}
func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) {
diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go
index 6c67401f56..4b5eb7e472 100644
--- a/test/e2e-go/features/transactions/sendReceive_test.go
+++ b/test/e2e-go/features/transactions/sendReceive_test.go
@@ -43,18 +43,6 @@ func TestAccountsCanSendMoney(t *testing.T) {
testAccountsCanSendMoney(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
}
-// this test checks that we can still send money in protocol v3,
-// which adds support for fine-grained ephemeral keys.
-func TestAccountsCanSendMoneyV3(t *testing.T) {
- testAccountsCanSendMoney(t, filepath.Join("nettemplates", "TwoNodes50EachV3.json"))
-}
-
-// this test checks that we can still send money in protocol v4,
-// which adds MinBalance.
-func TestAccountsCanSendMoneyV4(t *testing.T) {
- testAccountsCanSendMoney(t, filepath.Join("nettemplates", "TwoNodes50EachV4.json"))
-}
-
func testAccountsCanSendMoney(t *testing.T, templatePath string) {
t.Parallel()
a := require.New(t)
diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go
index e4972710f3..a9ce7ed73b 100644
--- a/test/e2e-go/restAPI/restClient_test.go
+++ b/test/e2e-go/restAPI/restClient_test.go
@@ -127,7 +127,7 @@ func waitForRoundOne(t *testing.T, testClient libgoal.Client) {
select {
case err := <-errchan:
require.NoError(t, err)
- case <-time.After(30 * time.Second):
+ case <-time.After(1 * time.Minute): // Wait 1 minute (same as WaitForRound)
close(quit)
t.Fatalf("%s: timeout waiting for round 1", t.Name())
}
diff --git a/test/e2e-go/upgrades/send_receive_upgrade_test.go b/test/e2e-go/upgrades/send_receive_upgrade_test.go
index dc19bd31ae..09692ff187 100644
--- a/test/e2e-go/upgrades/send_receive_upgrade_test.go
+++ b/test/e2e-go/upgrades/send_receive_upgrade_test.go
@@ -40,26 +40,6 @@ func GenerateRandomBytes(n int) []byte {
// this test checks that two accounts can send money to one another
// across a protocol upgrade.
-func TestAccountsCanSendMoneyAcrossUpgradeV2toV3(t *testing.T) {
- testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV2Upgrade.json"))
-}
-
-func TestAccountsCanSendMoneyAcrossUpgradeV3toV4(t *testing.T) {
- testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV3Upgrade.json"))
-}
-
-func TestAccountsCanSendMoneyAcrossUpgradeV4toV5(t *testing.T) {
- testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV4Upgrade.json"))
-}
-
-func TestAccountsCanSendMoneyAcrossUpgradeV5toV6(t *testing.T) {
- testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV5Upgrade.json"))
-}
-
-func TestAccountsCanSendMoneyAcrossUpgradeV6toV7(t *testing.T) {
- testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV6Upgrade.json"))
-}
-
func TestAccountsCanSendMoneyAcrossUpgradeV7toV8(t *testing.T) {
testAccountsCanSendMoneyAcrossUpgrade(t, filepath.Join("nettemplates", "TwoNodes50EachV7Upgrade.json"))
}
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index 0b29c7e922..200216f0dc 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -278,6 +278,14 @@ func (f *LibGoalFixture) ShutdownImpl(preserveData bool) {
}
}
+// intercept baseFixture.failOnError so we can clean up any algods that are still alive
+func (f *LibGoalFixture) failOnError(err error, message string) {
+ if err != nil {
+ f.network.Stop(f.binDir)
+ f.baseFixture.failOnError(err, message)
+ }
+}
+
// PrimaryDataDir returns the data directory for the PrimaryNode for the network
func (f *LibGoalFixture) PrimaryDataDir() string {
return f.network.PrimaryDataDir()
diff --git a/test/testdata/nettemplates/TwoNodes50EachV2Upgrade.json b/test/testdata/nettemplates/TwoNodes50EachV10.json
similarity index 93%
rename from test/testdata/nettemplates/TwoNodes50EachV2Upgrade.json
rename to test/testdata/nettemplates/TwoNodes50EachV10.json
index 5f6cfd5f02..ede46e2362 100644
--- a/test/testdata/nettemplates/TwoNodes50EachV2Upgrade.json
+++ b/test/testdata/nettemplates/TwoNodes50EachV10.json
@@ -1,7 +1,7 @@
{
"Genesis": {
"NetworkName": "tbd",
- "ConsensusProtocol": "test-fast-upgrade-v2",
+ "ConsensusProtocol": "v10",
"Wallets": [
{
"Name": "Wallet1",
diff --git a/test/testdata/nettemplates/TwoNodes50EachV3Upgrade.json b/test/testdata/nettemplates/TwoNodes50EachV3Upgrade.json
deleted file mode 100644
index d5f05bdbe5..0000000000
--- a/test/testdata/nettemplates/TwoNodes50EachV3Upgrade.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "Genesis": {
- "NetworkName": "tbd",
- "ConsensusProtocol": "test-fast-upgrade-v3",
- "Wallets": [
- {
- "Name": "Wallet1",
- "Stake": 50,
- "Online": true
- },
- {
- "Name": "Wallet2",
- "Stake": 50,
- "Online": true
- }
- ]
- },
- "Nodes": [
- {
- "Name": "Primary",
- "IsRelay": true,
- "Wallets": [
- { "Name": "Wallet1",
- "ParticipationOnly": false }
- ]
- },
- {
- "Name": "Node",
- "Wallets": [
- { "Name": "Wallet2",
- "ParticipationOnly": false }
- ]
- }
- ]
-}
diff --git a/test/testdata/nettemplates/TwoNodes50EachV4Upgrade.json b/test/testdata/nettemplates/TwoNodes50EachV4Upgrade.json
deleted file mode 100644
index 6970fa8c2d..0000000000
--- a/test/testdata/nettemplates/TwoNodes50EachV4Upgrade.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "Genesis": {
- "NetworkName": "tbd",
- "ConsensusProtocol": "test-fast-upgrade-v4",
- "Wallets": [
- {
- "Name": "Wallet1",
- "Stake": 50,
- "Online": true
- },
- {
- "Name": "Wallet2",
- "Stake": 50,
- "Online": true
- }
- ]
- },
- "Nodes": [
- {
- "Name": "Primary",
- "IsRelay": true,
- "Wallets": [
- { "Name": "Wallet1",
- "ParticipationOnly": false }
- ]
- },
- {
- "Name": "Node",
- "Wallets": [
- { "Name": "Wallet2",
- "ParticipationOnly": false }
- ]
- }
- ]
-}
diff --git a/test/testdata/nettemplates/TwoNodes50EachV5Upgrade.json b/test/testdata/nettemplates/TwoNodes50EachV5Upgrade.json
deleted file mode 100644
index 85dce7c17f..0000000000
--- a/test/testdata/nettemplates/TwoNodes50EachV5Upgrade.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "Genesis": {
- "NetworkName": "tbd",
- "ConsensusProtocol": "test-fast-upgrade-v5",
- "Wallets": [
- {
- "Name": "Wallet1",
- "Stake": 50,
- "Online": true
- },
- {
- "Name": "Wallet2",
- "Stake": 50,
- "Online": true
- }
- ]
- },
- "Nodes": [
- {
- "Name": "Primary",
- "IsRelay": true,
- "Wallets": [
- { "Name": "Wallet1",
- "ParticipationOnly": false }
- ]
- },
- {
- "Name": "Node",
- "Wallets": [
- { "Name": "Wallet2",
- "ParticipationOnly": false }
- ]
- }
- ]
-}
diff --git a/test/testdata/nettemplates/TwoNodes50EachV6Upgrade.json b/test/testdata/nettemplates/TwoNodes50EachV6Upgrade.json
deleted file mode 100644
index 3b0ab7f600..0000000000
--- a/test/testdata/nettemplates/TwoNodes50EachV6Upgrade.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "Genesis": {
- "NetworkName": "tbd",
- "ConsensusProtocol": "test-fast-upgrade-v6",
- "Wallets": [
- {
- "Name": "Wallet1",
- "Stake": 50,
- "Online": true
- },
- {
- "Name": "Wallet2",
- "Stake": 50,
- "Online": true
- }
- ]
- },
- "Nodes": [
- {
- "Name": "Primary",
- "IsRelay": true,
- "Wallets": [
- { "Name": "Wallet1",
- "ParticipationOnly": false }
- ]
- },
- {
- "Name": "Node",
- "Wallets": [
- { "Name": "Wallet2",
- "ParticipationOnly": false }
- ]
- }
- ]
-}
diff --git a/test/testdata/nettemplates/TwoNodes50EachV4.json b/test/testdata/nettemplates/TwoNodes50EachV7.json
similarity index 95%
rename from test/testdata/nettemplates/TwoNodes50EachV4.json
rename to test/testdata/nettemplates/TwoNodes50EachV7.json
index 919f87f756..cbe0ea7ed0 100644
--- a/test/testdata/nettemplates/TwoNodes50EachV4.json
+++ b/test/testdata/nettemplates/TwoNodes50EachV7.json
@@ -1,7 +1,7 @@
{
"Genesis": {
"NetworkName": "tbd",
- "ConsensusProtocol": "v4",
+ "ConsensusProtocol": "v7",
"Wallets": [
{
"Name": "Wallet1",
diff --git a/test/testdata/nettemplates/TwoNodesPartlyOfflineV6.json b/test/testdata/nettemplates/TwoNodesPartlyOfflineV7.json
similarity index 96%
rename from test/testdata/nettemplates/TwoNodesPartlyOfflineV6.json
rename to test/testdata/nettemplates/TwoNodesPartlyOfflineV7.json
index f622119d52..26aef60485 100644
--- a/test/testdata/nettemplates/TwoNodesPartlyOfflineV6.json
+++ b/test/testdata/nettemplates/TwoNodesPartlyOfflineV7.json
@@ -1,7 +1,7 @@
{
"Genesis": {
"NetworkName": "tbd",
- "ConsensusProtocol": "v6",
+ "ConsensusProtocol": "v7",
"Wallets": [
{
"Name": "Offline1",
diff --git a/tools/network/cloudflare/cloudflare.go b/tools/network/cloudflare/cloudflare.go
index a4be998911..bc6f912295 100644
--- a/tools/network/cloudflare/cloudflare.go
+++ b/tools/network/cloudflare/cloudflare.go
@@ -30,6 +30,12 @@ const (
AutomaticTTL = 1
)
+// ErrUserNotPermitted is used when a user that is not permitted in a given zone attempt to perform an operation on that zone.
+var ErrUserNotPermitted = fmt.Errorf("user not permitted in zone")
+
+// ErrDuplicateZoneNameFound is used when a user that is not permitted in a given zone attempt to perform an operation on that zone.
+var ErrDuplicateZoneNameFound = fmt.Errorf("more than a single zone name found to match the requested zone name")
+
// Cred contains the credentials used to authenticate with the cloudflare API.
type Cred struct {
authEmail string
@@ -295,6 +301,36 @@ func (c *Cred) GetZones(ctx context.Context) (zones []Zone, err error) {
return zones, err
}
+// GetZoneID returns a zoneID that matches the requested zoneDomainName.
+func (c *Cred) GetZoneID(ctx context.Context, zoneDomainName string) (zoneID string, err error) {
+ zones, err := c.GetZones(ctx)
+ if err != nil {
+ return
+ }
+ if len(zones) == 0 {
+ err = ErrUserNotPermitted
+ return
+ }
+ zoneDomainName = strings.ToLower(zoneDomainName)
+ var matchingZone Zone
+ for _, zone := range zones {
+ if zoneDomainName == strings.ToLower(zone.DomainName) {
+ // found a match.
+ if matchingZone.ZoneID != "" {
+ // we already had a previous match ?!
+ err = ErrDuplicateZoneNameFound
+ return
+ }
+ matchingZone = zone
+ }
+ }
+ if matchingZone.ZoneID == "" {
+ err = fmt.Errorf("no zones matching %s for specified credentials", zoneDomainName)
+ return
+ }
+ return matchingZone.ZoneID, nil
+}
+
// ExportZone exports the zone into a BIND config bytes array
func (d *DNS) ExportZone(ctx context.Context) (exportedZoneBytes []byte, err error) {
request, err := exportZoneRequest(d.zoneID, d.authEmail, d.authKey)