From 79402f2f035de8699bc732c9a9732a45bf4f4037 Mon Sep 17 00:00:00 2001 From: Matheus Degiovani Date: Tue, 29 Nov 2022 11:36:04 -0300 Subject: [PATCH] rpctest: Remove package This removes the now unused rpctest package. --- go.mod | 2 +- rpctest/README.md | 30 -- rpctest/debug.go | 40 -- rpctest/doc.go | 12 - rpctest/memwallet.go | 610 ---------------------------- rpctest/node.go | 404 ------------------- rpctest/rpc_harness.go | 441 --------------------- rpctest/rpc_harness_test.go | 741 ----------------------------------- rpctest/simnet_miner.go | 198 ---------- rpctest/utils.go | 269 ------------- rpctest/votingwallet.go | 642 ------------------------------ rpctest/votingwallet_test.go | 146 ------- 12 files changed, 1 insertion(+), 3534 deletions(-) delete mode 100644 rpctest/README.md delete mode 100644 rpctest/debug.go delete mode 100644 rpctest/doc.go delete mode 100644 rpctest/memwallet.go delete mode 100644 rpctest/node.go delete mode 100644 rpctest/rpc_harness.go delete mode 100644 rpctest/rpc_harness_test.go delete mode 100644 rpctest/simnet_miner.go delete mode 100644 rpctest/utils.go delete mode 100644 rpctest/votingwallet.go delete mode 100644 rpctest/votingwallet_test.go diff --git a/go.mod b/go.mod index 47aa3c2775..3a42c820dc 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,6 @@ require ( github.com/decred/dcrd/dcrjson/v4 v4.0.0 github.com/decred/dcrd/dcrutil/v4 v4.0.0 github.com/decred/dcrd/gcs/v4 v4.0.0 - github.com/decred/dcrd/hdkeychain/v3 v3.1.0 github.com/decred/dcrd/lru v1.1.1 github.com/decred/dcrd/math/uint256 v1.0.0 github.com/decred/dcrd/peer/v3 v3.0.0 @@ -47,6 +46,7 @@ require ( github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect github.com/dchest/siphash v1.2.2 // indirect github.com/decred/dcrd/dcrec/edwards/v2 v2.0.2 // indirect + github.com/decred/dcrd/hdkeychain/v3 v3.1.0 // indirect github.com/golang/snappy v0.0.4 // indirect ) diff --git a/rpctest/README.md b/rpctest/README.md deleted file mode 100644 index 8b40e20adc..0000000000 --- a/rpctest/README.md +++ /dev/null @@ -1,30 +0,0 @@ -rpctest -======= - -[![Build Status](https://github.com/decred/dcrd/workflows/Build%20and%20Test/badge.svg)](https://github.com/decred/dcrd/actions) -[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) -[![Doc](https://img.shields.io/badge/doc-reference-blue.svg)](https://pkg.go.dev/github.com/decred/dcrd/rpctest) - -Package rpctest provides a dcrd-specific RPC testing harness crafting and -executing integration tests by driving a `dcrd` instance via the `RPC` -interface. Each instance of an active harness comes equipped with a simple -in-memory HD wallet capable of properly syncing to the generated chain, -creating new addresses, and crafting fully signed transactions paying to an -arbitrary set of outputs. - -This package was designed specifically to act as an RPC testing harness for -`dcrd`. However, the constructs presented are general enough to be adapted to -any project wishing to programmatically drive a `dcrd` instance of its -systems/integration tests. - -## Installation and Updating - -This package is part of the `github.com/decred/dcrd` module. Use the standard -go tooling for working with modules to incorporate it. - -## License - - -Package rpctest is licensed under the [copyfree](http://copyfree.org) ISC -License. - diff --git a/rpctest/debug.go b/rpctest/debug.go deleted file mode 100644 index 4d3a52dad2..0000000000 --- a/rpctest/debug.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2020 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package rpctest - -import ( - "testing" -) - -// This package is very hard to debug so we add a couple of variables that -// enable debug and tracing output. Leave them false before committing to -// master. -var ( - debug bool // Set to true to enable additional verbosity. - trace bool // Set to true to enable tracing. -) - -func init() { - debug = false - trace = false -} - -func logf(t *testing.T, format string, args ...interface{}) { - t.Logf(format, args...) -} - -func tracef(t *testing.T, format string, args ...interface{}) { - if !trace { - return - } - t.Logf(format, args...) -} - -func debugf(t *testing.T, format string, args ...interface{}) { - if !debug { - return - } - t.Logf(format, args...) -} diff --git a/rpctest/doc.go b/rpctest/doc.go deleted file mode 100644 index dd048929e1..0000000000 --- a/rpctest/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package rpctest provides a dcrd-specific RPC testing harness crafting and -// executing integration tests by driving a `dcrd` instance via the `RPC` -// interface. Each instance of an active harness comes equipped with a simple -// in-memory HD wallet capable of properly syncing to the generated chain, -// creating new addresses, and crafting fully signed transactions paying to an -// arbitrary set of outputs. -// -// This package was designed specifically to act as an RPC testing harness for -// `dcrd`. However, the constructs presented are general enough to be adapted to -// any project wishing to programmatically drive a `dcrd` instance of its -// systems/integration tests. -package rpctest diff --git a/rpctest/memwallet.go b/rpctest/memwallet.go deleted file mode 100644 index 80a8e27d16..0000000000 --- a/rpctest/memwallet.go +++ /dev/null @@ -1,610 +0,0 @@ -// Copyright (c) 2016-2017 The btcsuite developers -// Copyright (c) 2017-2022 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package rpctest - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - "sync" - "testing" - - "github.com/decred/dcrd/blockchain/standalone/v2" - "github.com/decred/dcrd/chaincfg/chainhash" - "github.com/decred/dcrd/chaincfg/v3" - "github.com/decred/dcrd/dcrec" - "github.com/decred/dcrd/dcrec/secp256k1/v4" - "github.com/decred/dcrd/dcrutil/v4" - "github.com/decred/dcrd/hdkeychain/v3" - "github.com/decred/dcrd/rpcclient/v8" - "github.com/decred/dcrd/txscript/v4" - "github.com/decred/dcrd/txscript/v4/sign" - "github.com/decred/dcrd/txscript/v4/stdaddr" - "github.com/decred/dcrd/wire" -) - -const ( - // noTreasury signifies the treasury agenda should be treated as though - // it is inactive. It is used to increase the readability of the - // tests. - noTreasury = false -) - -var ( - // hdSeed is the BIP 32 seed used by the memWallet to initialize it's - // HD root key. This value is hard coded in order to ensure - // deterministic behavior across test runs. - hdSeed = [chainhash.HashSize]byte{ - 0x79, 0xa6, 0x1a, 0xdb, 0xc6, 0xe5, 0xa2, 0xe1, - 0x39, 0xd2, 0x71, 0x3a, 0x54, 0x6e, 0xc7, 0xc8, - 0x75, 0x63, 0x2e, 0x75, 0xf1, 0xdf, 0x9c, 0x3f, - 0xa6, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - } -) - -// utxo represents an unspent output spendable by the memWallet. The maturity -// height of the transaction is recorded in order to properly observe the -// maturity period of direct coinbase outputs. -type utxo struct { - pkScript []byte - value dcrutil.Amount - maturityHeight int64 - keyIndex uint32 - isLocked bool -} - -// isMature returns true if the target utxo is considered "mature" at the -// passed block height. Otherwise, false is returned. -func (u *utxo) isMature(height int64) bool { - return height >= u.maturityHeight -} - -// chainUpdate encapsulates an update to the current main chain. This struct is -// used to sync up the memWallet each time a new block is connected to the main -// chain. -type chainUpdate struct { - blockHeight int64 - filteredTxns []*dcrutil.Tx -} - -// undoEntry is functionally the opposite of a chainUpdate. An undoEntry is -// created for each new block received, then stored in a log in order to -// properly handle block re-orgs. -type undoEntry struct { - utxosDestroyed map[wire.OutPoint]*utxo - utxosCreated []wire.OutPoint -} - -// memWallet is a simple in-memory wallet whose purpose is to provide basic -// wallet functionality to the harness. The wallet uses a hard-coded HD key -// hierarchy which promotes reproducibility between harness test runs. -type memWallet struct { - coinbaseKey *secp256k1.PrivateKey - coinbaseAddr stdaddr.Address - - // hdRoot is the root master private key for the wallet. - hdRoot *hdkeychain.ExtendedKey - - // hdIndex is the next available key index offset from the hdRoot. - hdIndex uint32 - - // currentHeight is the latest height the wallet is known to be synced - // to. - currentHeight int64 - - // addrs tracks all addresses belonging to the wallet. The addresses - // are indexed by their keypath from the hdRoot. - addrs map[uint32]stdaddr.Address - - // utxos is the set of utxos spendable by the wallet. - utxos map[wire.OutPoint]*utxo - - // reorgJournal is a map storing an undo entry for each new block - // received. Once a block is disconnected, the undo entry for the - // particular height is evaluated, thereby rewinding the effect of the - // disconnected block on the wallet's set of spendable utxos. - reorgJournal map[int64]*undoEntry - - chainUpdates []*chainUpdate - chainUpdateSignal chan struct{} - chainMtx sync.Mutex - - net *chaincfg.Params - - t *testing.T - - rpc *rpcclient.Client - - sync.RWMutex -} - -// newMemWallet creates and returns a fully initialized instance of the -// memWallet given a particular blockchain's parameters. -func newMemWallet(t *testing.T, net *chaincfg.Params, harnessID uint32) (*memWallet, error) { - // The wallet's final HD seed is: hdSeed || harnessID. This method - // ensures that each harness instance uses a deterministic root seed - // based on its harness ID. - var harnessHDSeed [chainhash.HashSize + 4]byte - copy(harnessHDSeed[:], hdSeed[:]) - binary.BigEndian.PutUint32(harnessHDSeed[:chainhash.HashSize], harnessID) - - hdRoot, err := hdkeychain.NewMaster(harnessHDSeed[:], net) - if err != nil { - return nil, nil - } - - // The first child key from the hd root is reserved as the coinbase - // generation address. - coinbaseChild, err := hdRoot.Child(0) - if err != nil { - return nil, err - } - coinbaseKey, err := coinbaseChild.SerializedPrivKey() - if err != nil { - return nil, err - } - coinbaseAddr, err := keyToAddr(coinbaseKey, net) - if err != nil { - return nil, err - } - - // Track the coinbase generation address to ensure we properly track - // newly generated coins we can spend. - addrs := make(map[uint32]stdaddr.Address) - addrs[0] = coinbaseAddr - - return &memWallet{ - net: net, - coinbaseKey: secp256k1.PrivKeyFromBytes(coinbaseKey), - coinbaseAddr: coinbaseAddr, - hdIndex: 1, - hdRoot: hdRoot, - addrs: addrs, - t: t, - utxos: make(map[wire.OutPoint]*utxo), - chainUpdateSignal: make(chan struct{}), - reorgJournal: make(map[int64]*undoEntry), - }, nil -} - -// Start launches all goroutines required for the wallet to function properly. -func (m *memWallet) Start() { - go m.chainSyncer() -} - -// SyncedHeight returns the height the wallet is known to be synced to. -// -// This function is safe for concurrent access. -func (m *memWallet) SyncedHeight() int64 { - m.RLock() - defer m.RUnlock() - return m.currentHeight -} - -// SetRPCClient saves the passed rpc connection to dcrd as the wallet's -// personal rpc connection. -func (m *memWallet) SetRPCClient(rpcClient *rpcclient.Client) { - m.rpc = rpcClient -} - -// IngestBlock is a call-back which is to be triggered each time a new block is -// connected to the main chain. Ingesting a block updates the wallet's internal -// utxo state based on the outputs created and destroyed within each block. -func (m *memWallet) IngestBlock(header []byte, filteredTxns [][]byte) { - tracef(m.t, "memwallet.IngestBlock") - defer tracef(m.t, "memwallet.IngestBlock exit") - - var hdr wire.BlockHeader - if err := hdr.FromBytes(header); err != nil { - panic(err) - } - height := int64(hdr.Height) - - txns := make([]*dcrutil.Tx, 0, len(filteredTxns)) - for _, txBytes := range filteredTxns { - tx, err := dcrutil.NewTxFromBytes(txBytes) - if err != nil { - panic(err) - } - txns = append(txns, tx) - } - - // Append this new chain update to the end of the queue of new chain - // updates. - m.chainMtx.Lock() - m.chainUpdates = append(m.chainUpdates, &chainUpdate{height, txns}) - m.chainMtx.Unlock() - - // Launch a goroutine to signal the chainSyncer that a new update is - // available. We do this in a new goroutine in order to avoid blocking - // the main loop of the rpc client. - go func() { - m.chainUpdateSignal <- struct{}{} - }() -} - -// chainSyncer is a goroutine dedicated to processing new blocks in order to -// keep the wallet's utxo state up to date. -// -// NOTE: This MUST be run as a goroutine. -func (m *memWallet) chainSyncer() { - tracef(m.t, "memwallet.chainSyncer") - defer tracef(m.t, "memwallet.chainSyncer exit") - - var update *chainUpdate - - for range m.chainUpdateSignal { - // A new update is available, so pop the new chain update from - // the front of the update queue. - m.chainMtx.Lock() - update = m.chainUpdates[0] - m.chainUpdates[0] = nil // Set to nil to prevent GC leak. - m.chainUpdates = m.chainUpdates[1:] - m.chainMtx.Unlock() - - // Update the latest synced height, then process each filtered - // transaction in the block creating and destroying utxos within - // the wallet as a result. - m.Lock() - m.currentHeight = update.blockHeight - undo := &undoEntry{ - utxosDestroyed: make(map[wire.OutPoint]*utxo), - } - for _, tx := range update.filteredTxns { - mtx := tx.MsgTx() - isCoinbase := standalone.IsCoinBaseTx(mtx, noTreasury) - txHash := mtx.TxHash() - m.evalOutputs(mtx.TxOut, &txHash, isCoinbase, undo) - m.evalInputs(mtx.TxIn, undo) - } - - // Finally, record the undo entry for this block so we can - // properly update our internal state in response to the block - // being re-org'd from the main chain. - m.reorgJournal[update.blockHeight] = undo - m.Unlock() - } -} - -// evalOutputs evaluates each of the passed outputs, creating a new matching -// utxo within the wallet if we're able to spend the output. -func (m *memWallet) evalOutputs(outputs []*wire.TxOut, txHash *chainhash.Hash, isCoinbase bool, undo *undoEntry) { - tracef(m.t, "memwallet.evalOutputs") - defer tracef(m.t, "memwallet.evalOutputs exit") - - for i, output := range outputs { - pkScript := output.PkScript - - // Scan all the addresses we currently control to see if the - // output is paying to us. - for keyIndex, addr := range m.addrs { - pkHash := addr.(stdaddr.Hash160er).Hash160() - if !bytes.Contains(pkScript, pkHash[:]) { - continue - } - - // If this is a coinbase output, then we mark the - // maturity height at the proper block height in the - // future. - var maturityHeight int64 - if isCoinbase { - maturityHeight = m.currentHeight + int64(m.net.CoinbaseMaturity) - } - - op := wire.OutPoint{Hash: *txHash, Index: uint32(i)} - m.utxos[op] = &utxo{ - value: dcrutil.Amount(output.Value), - keyIndex: keyIndex, - maturityHeight: maturityHeight, - pkScript: pkScript, - } - undo.utxosCreated = append(undo.utxosCreated, op) - } - } -} - -// evalInputs scans all the passed inputs, destroying any utxos within the -// wallet which are spent by an input. -func (m *memWallet) evalInputs(inputs []*wire.TxIn, undo *undoEntry) { - tracef(m.t, "memwallet.evalInputs") - defer tracef(m.t, "memwallet.evalInputs exit") - - for _, txIn := range inputs { - op := txIn.PreviousOutPoint - oldUtxo, ok := m.utxos[op] - if !ok { - continue - } - - undo.utxosDestroyed[op] = oldUtxo - delete(m.utxos, op) - } -} - -// UnwindBlock is a call-back which is to be executed each time a block is -// disconnected from the main chain. Unwinding a block undoes the effect that a -// particular block had on the wallet's internal utxo state. -func (m *memWallet) UnwindBlock(header []byte) { - tracef(m.t, "memwallet.UnwindBlock") - defer tracef(m.t, "memwallet.UnwindBlock exit") - - var hdr wire.BlockHeader - if err := hdr.FromBytes(header); err != nil { - panic(err) - } - height := int64(hdr.Height) - - m.Lock() - defer m.Unlock() - - undo := m.reorgJournal[height] - - for _, utxo := range undo.utxosCreated { - delete(m.utxos, utxo) - } - - for outPoint, utxo := range undo.utxosDestroyed { - m.utxos[outPoint] = utxo - } - - delete(m.reorgJournal, height) -} - -// newAddress returns a new address from the wallet's hd key chain. It also -// loads the address into the RPC client's transaction filter to ensure any -// transactions that involve it are delivered via the notifications. -func (m *memWallet) newAddress(ctx context.Context) (stdaddr.Address, error) { - tracef(m.t, "memwallet.newAddress") - defer tracef(m.t, "memwallet.newAddress exit") - - index := m.hdIndex - - childKey, err := m.hdRoot.Child(index) - if err != nil { - return nil, err - } - privKey, err := childKey.SerializedPrivKey() - if err != nil { - return nil, err - } - - addr, err := keyToAddr(privKey, m.net) - if err != nil { - return nil, err - } - - err = m.rpc.LoadTxFilter(ctx, false, - []stdaddr.Address{addr}, nil) - if err != nil { - return nil, err - } - - m.addrs[index] = addr - - m.hdIndex++ - - return addr, nil -} - -// NewAddress returns a fresh address spendable by the wallet. -// -// This function is safe for concurrent access. -func (m *memWallet) NewAddress(ctx context.Context) (stdaddr.Address, error) { - m.Lock() - defer m.Unlock() - - return m.newAddress(ctx) -} - -// fundTx attempts to fund a transaction sending amt coins. The coins are -// selected such that the final amount spent pays enough fees as dictated by -// the passed fee rate. The passed fee rate should be expressed in -// atoms-per-byte. -// -// NOTE: The memWallet's mutex must be held when this function is called. -func (m *memWallet) fundTx(ctx context.Context, tx *wire.MsgTx, amt dcrutil.Amount, feeRate dcrutil.Amount) error { - tracef(m.t, "memwallet.fundTx") - defer tracef(m.t, "memwallet.fundTx exit") - - const ( - // spendSize is the largest number of bytes of a sigScript - // which spends a p2pkh output: OP_DATA_73 OP_DATA_33 - spendSize = 1 + 73 + 1 + 33 - ) - - var ( - amtSelected dcrutil.Amount - txSize int - ) - - for outPoint, utxo := range m.utxos { - // Skip any outputs that are still currently immature or are - // currently locked. - if !utxo.isMature(m.currentHeight) || utxo.isLocked { - continue - } - - amtSelected += utxo.value - - // Add the selected output to the transaction, updating the - // current tx size while accounting for the size of the future - // sigScript. - tx.AddTxIn(wire.NewTxIn(&outPoint, int64(utxo.value), nil)) - txSize = tx.SerializeSize() + spendSize*len(tx.TxIn) - - // Calculate the fee required for the txn at this point - // observing the specified fee rate. If we don't have enough - // coins from he current amount selected to pay the fee, then - // continue to grab more coins. - reqFee := dcrutil.Amount(txSize * int(feeRate)) - if amtSelected-reqFee < amt { - continue - } - - // If we have any change left over, then add an additional - // output to the transaction reserved for change. - changeVal := amtSelected - amt - reqFee - if changeVal > 0 { - addr, err := m.newAddress(ctx) - if err != nil { - return err - } - pkScriptVer, pkScript := addr.PaymentScript() - changeOutput := &wire.TxOut{ - Value: int64(changeVal), - Version: pkScriptVer, - PkScript: pkScript, - } - tx.AddTxOut(changeOutput) - } - - return nil - } - - // If we've reached this point, then coin selection failed due to an - // insufficient amount of coins. - return fmt.Errorf("not enough funds for coin selection") -} - -// SendOutputs creates, then sends a transaction paying to the specified output -// while observing the passed fee rate. The passed fee rate should be expressed -// in atoms-per-byte. -func (m *memWallet) SendOutputs(ctx context.Context, outputs []*wire.TxOut, feeRate dcrutil.Amount) (*chainhash.Hash, error) { - tracef(m.t, "memwallet.SendOutputs") - defer tracef(m.t, "memwallet.SendOutputs exit") - - tx, err := m.CreateTransaction(ctx, outputs, feeRate) - if err != nil { - return nil, err - } - - return m.rpc.SendRawTransaction(ctx, tx, true) -} - -// CreateTransaction returns a fully signed transaction paying to the specified -// outputs while observing the desired fee rate. The passed fee rate should be -// expressed in atoms-per-byte. -// -// This function is safe for concurrent access. -func (m *memWallet) CreateTransaction(ctx context.Context, outputs []*wire.TxOut, feeRate dcrutil.Amount) (*wire.MsgTx, error) { - tracef(m.t, "memwallet.CreateTransaction") - defer tracef(m.t, "memwallet.CreateTransaction exit") - - m.Lock() - defer m.Unlock() - - tx := wire.NewMsgTx() - - // Tally up the total amount to be sent in order to perform coin - // selection shortly below. - var outputAmt dcrutil.Amount - for _, output := range outputs { - outputAmt += dcrutil.Amount(output.Value) - tx.AddTxOut(output) - } - - // Attempt to fund the transaction with spendable utxos. - if err := m.fundTx(ctx, tx, outputAmt, feeRate); err != nil { - return nil, err - } - - // Populate all the selected inputs with valid sigScript for spending. - // Along the way record all outputs being spent in order to avoid a - // potential double spend. - spentOutputs := make([]*utxo, 0, len(tx.TxIn)) - for i, txIn := range tx.TxIn { - outPoint := txIn.PreviousOutPoint - utxo := m.utxos[outPoint] - - extendedKey, err := m.hdRoot.Child(utxo.keyIndex) - if err != nil { - return nil, err - } - - privKey, err := extendedKey.SerializedPrivKey() - if err != nil { - return nil, err - } - - sigScript, err := sign.SignatureScript(tx, i, utxo.pkScript, - txscript.SigHashAll, privKey, dcrec.STEcdsaSecp256k1, true) - if err != nil { - return nil, err - } - - txIn.SignatureScript = sigScript - - spentOutputs = append(spentOutputs, utxo) - } - - // As these outputs are now being spent by this newly created - // transaction, mark the outputs are "locked". This action ensures - // these outputs won't be double spent by any subsequent transactions. - // These locked outputs can be freed via a call to UnlockOutputs. - for _, utxo := range spentOutputs { - utxo.isLocked = true - } - - return tx, nil -} - -// UnlockOutputs unlocks any outputs which were previously locked due to -// being selected to fund a transaction via the CreateTransaction method. -// -// This function is safe for concurrent access. -func (m *memWallet) UnlockOutputs(inputs []*wire.TxIn) { - tracef(m.t, "memwallet.UnlockOutputs") - defer tracef(m.t, "memwallet.UnlockOutputs exit") - - m.Lock() - defer m.Unlock() - - for _, input := range inputs { - utxo, ok := m.utxos[input.PreviousOutPoint] - if !ok { - continue - } - - utxo.isLocked = false - } -} - -// ConfirmedBalance returns the confirmed balance of the wallet. -// -// This function is safe for concurrent access. -func (m *memWallet) ConfirmedBalance() dcrutil.Amount { - tracef(m.t, "memwallet.ConfirmedBalance") - defer tracef(m.t, "memwallet.ConfirmedBalance exit") - - m.RLock() - defer m.RUnlock() - - var balance dcrutil.Amount - for _, utxo := range m.utxos { - // Prevent any immature or locked outputs from contributing to - // the wallet's total confirmed balance. - if !utxo.isMature(m.currentHeight) || utxo.isLocked { - continue - } - - balance += utxo.value - } - - return balance -} - -// keyToAddr maps the passed private to corresponding p2pkh address. -func keyToAddr(serializedPrivKey []byte, net *chaincfg.Params) (stdaddr.Address, error) { - key := secp256k1.PrivKeyFromBytes(serializedPrivKey) - serializedKey := key.PubKey().SerializeCompressed() - pubKeyAddr, err := stdaddr.NewAddressPubKeyEcdsaSecp256k1V0Raw( - serializedKey, net) - if err != nil { - return nil, err - } - return pubKeyAddr.AddressPubKeyHash(), nil -} diff --git a/rpctest/node.go b/rpctest/node.go deleted file mode 100644 index 1937520b60..0000000000 --- a/rpctest/node.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Copyright (c) 2017-2022 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package rpctest - -import ( - "bufio" - "crypto/elliptic" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "sync" - "testing" - "time" - - "github.com/decred/dcrd/certgen" - rpc "github.com/decred/dcrd/rpcclient/v8" -) - -// nodeConfig contains all the args, and data required to launch a dcrd process -// and connect the rpc client to it. -type nodeConfig struct { - rpcUser string - rpcPass string - listen string - rpcListen string - rpcConnect string - dataDir string - logDir string - profile string - debugLevel string - extra []string - prefix string - - pathToDCRD string - endpoint string - certFile string - keyFile string - certificates []byte -} - -// newConfig returns a newConfig with all default values. -func newConfig(prefix, certFile, keyFile string, extra []string) (*nodeConfig, error) { - a := &nodeConfig{ - listen: "127.0.0.1:18555", - rpcListen: "127.0.0.1:18556", - rpcUser: "user", - rpcPass: "pass", - extra: extra, - prefix: prefix, - - endpoint: "ws", - certFile: certFile, - keyFile: keyFile, - } - if err := a.setDefaults(); err != nil { - return nil, err - } - return a, nil -} - -// setDefaults sets the default values of the config. It also creates the -// temporary data, and log directories which must be cleaned up with a call to -// cleanup(). -func (n *nodeConfig) setDefaults() error { - n.dataDir = filepath.Join(n.prefix, "data") - n.logDir = filepath.Join(n.prefix, "logs") - cert, err := os.ReadFile(n.certFile) - if err != nil { - return err - } - n.certificates = cert - return nil -} - -// arguments returns an array of arguments that be used to launch the dcrd -// process. -func (n *nodeConfig) arguments() []string { - args := []string{} - if n.rpcUser != "" { - // --rpcuser - args = append(args, fmt.Sprintf("--rpcuser=%s", n.rpcUser)) - } - if n.rpcPass != "" { - // --rpcpass - args = append(args, fmt.Sprintf("--rpcpass=%s", n.rpcPass)) - } - if n.listen != "" { - // --listen - args = append(args, fmt.Sprintf("--listen=%s", n.listen)) - } - if n.rpcListen != "" { - // --rpclisten - args = append(args, fmt.Sprintf("--rpclisten=%s", n.rpcListen)) - } - if n.rpcConnect != "" { - // --rpcconnect - args = append(args, fmt.Sprintf("--rpcconnect=%s", n.rpcConnect)) - } - // --rpccert - args = append(args, fmt.Sprintf("--rpccert=%s", n.certFile)) - // --rpckey - args = append(args, fmt.Sprintf("--rpckey=%s", n.keyFile)) - // --txindex - args = append(args, "--txindex") - if n.dataDir != "" { - // --datadir - args = append(args, fmt.Sprintf("--datadir=%s", n.dataDir)) - } - if n.logDir != "" { - // --logdir - args = append(args, fmt.Sprintf("--logdir=%s", n.logDir)) - } - if n.profile != "" { - // --profile - args = append(args, fmt.Sprintf("--profile=%s", n.profile)) - } - if n.debugLevel != "" { - // --debuglevel - args = append(args, fmt.Sprintf("--debuglevel=%s", n.debugLevel)) - } - // --allowunsyncedmining - args = append(args, "--allowunsyncedmining") - args = append(args, n.extra...) - return args -} - -// command returns the exec.Cmd which will be used to start the dcrd process. -func (n *nodeConfig) command() *exec.Cmd { - return exec.Command(n.pathToDCRD, n.arguments()...) -} - -// rpcConnConfig returns the rpc connection config that can be used to connect -// to the dcrd process that is launched via Start(). -func (n *nodeConfig) rpcConnConfig() rpc.ConnConfig { - return rpc.ConnConfig{ - Host: n.rpcListen, - Endpoint: n.endpoint, - User: n.rpcUser, - Pass: n.rpcPass, - Certificates: n.certificates, - DisableAutoReconnect: true, - } -} - -// String returns the string representation of this nodeConfig. -func (n *nodeConfig) String() string { - return n.prefix -} - -// node houses the necessary state required to configure, launch, and manage a -// dcrd process. -type node struct { - config *nodeConfig - - cmd *exec.Cmd - pidFile string - stderr io.ReadCloser - stdout io.ReadCloser - wg sync.WaitGroup - pid int - - dataDir string - - t *testing.T -} - -// logf is identical to n.t.Logf but it prepends the pid of this node. -func (n *node) logf(format string, args ...interface{}) { - pid := strconv.Itoa(n.pid) + " " - logf(n.t, pid+format, args...) -} - -// tracef is identical to debug.go.tracef but it prepends the pid of this -// node. -func (n *node) tracef(format string, args ...interface{}) { - if !trace { - return - } - pid := strconv.Itoa(n.pid) + " " - tracef(n.t, pid+format, args...) -} - -// buildNode creates a new temporary directory and node and saves the location -// to a package level variable where it is used for all tests. pathToDCRDMtx -// must be held for writes. -func buildNode(t *testing.T) error { - testNodeDir, err := os.MkdirTemp("", "rpctestdcrdnode") - if err != nil { - return err - } - pathToDCRD = filepath.Join(testNodeDir, "dcrd") - if runtime.GOOS == "windows" { - pathToDCRD += ".exe" - } - debugf(t, "test node located at: %v\n", pathToDCRD) - // Determine import path of this package. - _, rpctestDir, _, ok := runtime.Caller(1) - if !ok { - return fmt.Errorf("cannot get path to dcrd source code") - } - dcrdPkgPath := filepath.Join(rpctestDir, "..", "..") - // Build dcrd and output an executable in a static temp path. - cmd := exec.Command("go", "build", "-o", pathToDCRD, dcrdPkgPath) - err = cmd.Run() - if err != nil { - return fmt.Errorf("failed to build dcrd: %v", err) - } - return nil -} - -// newNode creates a new node instance according to the passed config. dataDir -// will be used to hold a file recording the pid of the launched process, and -// as the base for the log and data directories for dcrd. If pathToDCRD has a -// non-zero value, the executable located there is used. -func newNode(t *testing.T, config *nodeConfig, dataDir string) (*node, error) { - // Create the dcrd node used for tests if not created yet. - pathToDCRDMtx.Lock() - if pathToDCRD == "" { - if err := buildNode(t); err != nil { - pathToDCRDMtx.Unlock() - return nil, err - } - } - config.pathToDCRD = pathToDCRD - pathToDCRDMtx.Unlock() - return &node{ - config: config, - dataDir: dataDir, - cmd: config.command(), - t: t, - }, nil -} - -// start creates a new dcrd process, and writes its pid in a file reserved for -// recording the pid of the launched process. This file can be used to -// terminate the process in case of a hang, or panic. In the case of a failing -// test case, or panic, it is important that the process be stopped via stop(), -// otherwise, it will persist unless explicitly killed. -func (n *node) start() error { - var err error - - var pid sync.WaitGroup - pid.Add(1) - - // Redirect stderr. - n.stderr, err = n.cmd.StderrPipe() - if err != nil { - return err - } - n.wg.Add(1) - go func() { - defer n.wg.Done() - pid.Wait() // Block until pid is available - r := bufio.NewReader(n.stderr) - for { - line, err := r.ReadBytes('\n') - if errors.Is(err, io.EOF) { - n.tracef("stderr: EOF") - return - } - n.logf("stderr: %s", line) - } - }() - - // Redirect stdout. - n.stdout, err = n.cmd.StdoutPipe() - if err != nil { - return err - } - n.wg.Add(1) - go func() { - defer n.wg.Done() - pid.Wait() // Block until pid is available - r := bufio.NewReader(n.stdout) - for { - line, err := r.ReadBytes('\n') - if errors.Is(err, io.EOF) { - n.tracef("stdout: EOF") - return - } - n.tracef("stdout: %s", line) - } - }() - - // Launch command and store pid. - if err := n.cmd.Start(); err != nil { - return err - } - n.pid = n.cmd.Process.Pid - - // Unblock pipes now pid is available - pid.Done() - - f, err := os.Create(filepath.Join(n.config.String(), "dcrd.pid")) - if err != nil { - return err - } - - n.pidFile = f.Name() - if _, err = fmt.Fprintf(f, "%d\n", n.cmd.Process.Pid); err != nil { - return err - } - - return f.Close() -} - -// stop interrupts the running dcrd process, and waits until it exits -// properly. On windows, interrupt is not supported, so a kill signal is used -// instead -func (n *node) stop() error { - n.tracef("stop %p %p", n.cmd, n.cmd.Process) - defer n.tracef("stop done") - - if n.cmd == nil || n.cmd.Process == nil { - // return if not properly initialized - // or error starting the process - return nil - } - - // Send kill command - n.tracef("stop send kill") - var err error - if runtime.GOOS == "windows" { - err = n.cmd.Process.Signal(os.Kill) - } else { - err = n.cmd.Process.Signal(os.Interrupt) - } - if err != nil { - n.t.Logf("stop Signal error: %v", err) - } - - // Wait for pipes. - n.tracef("stop wg") - n.wg.Wait() - - // Wait for command to exit. - n.tracef("stop cmd.Wait") - err = n.cmd.Wait() - if err != nil { - n.t.Logf("stop cmd.Wait error: %v", err) - } - return nil -} - -// cleanup cleanups process and args files. The file housing the pid of the -// created process will be deleted, as well as any directories created by the -// process. -func (n *node) cleanup() error { - n.tracef("cleanup") - defer n.tracef("cleanup done") - - if n.pidFile != "" { - if err := os.Remove(n.pidFile); err != nil { - n.t.Logf("unable to remove file %s: %v", n.pidFile, - err) - return err - } - } - - return nil -} - -// shutdown terminates the running dcrd process, and cleans up all -// file/directories created by node. -func (n *node) shutdown() error { - n.tracef("shutdown") - defer n.tracef("shutdown done") - - if err := n.stop(); err != nil { - n.t.Logf("shutdown stop error: %v", err) - return err - } - return n.cleanup() -} - -// genCertPair generates a key/cert pair to the paths provided. -func genCertPair(certFile, keyFile string) error { - org := "rpctest autogenerated cert" - validUntil := time.Now().Add(10 * 365 * 24 * time.Hour) - cert, key, err := certgen.NewTLSCertPair(elliptic.P521(), org, - validUntil, nil) - if err != nil { - return err - } - - // Write cert and key files. - if err = os.WriteFile(certFile, cert, 0644); err != nil { - return err - } - if err = os.WriteFile(keyFile, key, 0600); err != nil { - os.Remove(certFile) - return err - } - - return nil -} diff --git a/rpctest/rpc_harness.go b/rpctest/rpc_harness.go deleted file mode 100644 index 9535d2aab0..0000000000 --- a/rpctest/rpc_harness.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Copyright (c) 2017-2022 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package rpctest - -import ( - "context" - "fmt" - "net" - "os" - "path/filepath" - "strconv" - "sync" - "testing" - "time" - - "github.com/decred/dcrd/chaincfg/chainhash" - "github.com/decred/dcrd/chaincfg/v3" - "github.com/decred/dcrd/dcrutil/v4" - "github.com/decred/dcrd/rpcclient/v8" - "github.com/decred/dcrd/txscript/v4/stdaddr" - "github.com/decred/dcrd/wire" -) - -const ( - // These constants define the minimum and maximum p2p and rpc port - // numbers used by a test harness. The min port is inclusive while the - // max port is exclusive. - minPeerPort = 10000 - maxPeerPort = 35000 - minRPCPort = maxPeerPort - maxRPCPort = 60000 -) - -var ( - // XXX these variables are accessed in what should be accessor - // functions yet it is all global - - // current number of active test nodes. - numTestInstances = 0 - - // processID is the process ID of the current running process. It is - // used to calculate ports based upon it when launching an rpc - // harnesses. The intent is to allow multiple process to run in - // parallel without port collisions. - // - // It should be noted however that there is still some small probability - // that there will be port collisions either due to other processes - // running or simply due to the stars aligning on the process IDs. - processID = os.Getpid() - - // testInstances is a private package-level slice used to keep track of - // all active test harnesses. This global can be used to perform - // various "joins", shutdown several active harnesses after a test, - // etc. - testInstances = make(map[string]*Harness) - - // Used to protest concurrent access to above declared variables. - harnessStateMtx sync.RWMutex - - // pathToDCRD points to the test node. It is supplied through - // NewWithDCRD or created on the first call to newNode and used - // throughout the life of this package. - pathToDCRD string - pathToDCRDMtx sync.RWMutex -) - -// HarnessTestCase represents a test-case which utilizes an instance of the -// Harness to exercise functionality. -type HarnessTestCase func(ctx context.Context, r *Harness, t *testing.T) - -// Harness fully encapsulates an active dcrd process to provide a unified -// platform for creating rpc driven integration tests involving dcrd. The -// active dcrd node will typically be run in simnet mode in order to allow for -// easy generation of test blockchains. The active dcrd process is fully -// managed by Harness, which handles the necessary initialization, and teardown -// of the process along with any temporary directories created as a result. -// Multiple Harness instances may be run concurrently, in order to allow for -// testing complex scenarios involving multiple nodes. The harness also -// includes an in-memory wallet to streamline various classes of tests. -type Harness struct { - // ActiveNet is the parameters of the blockchain the Harness belongs - // to. - ActiveNet *chaincfg.Params - - Node *rpcclient.Client - node *node - handlers *rpcclient.NotificationHandlers - - wallet *memWallet - - testNodeDir string - maxConnRetries int - nodeNum int - - t *testing.T - - sync.Mutex -} - -// SetPathToDCRD sets the package level dcrd executable. All calls to New will -// use the dcrd located there throughout their life. If not set upon the first -// call to New, a dcrd will be created in a temporary directory and pathToDCRD -// set automatically. -// -// NOTE: This function is safe for concurrent access, but care must be taken -// when setting different paths and using New, as whatever is at pathToDCRD at -// the time will be identified with that node. -func SetPathToDCRD(fnScopePathToDCRD string) { - pathToDCRDMtx.Lock() - pathToDCRD = fnScopePathToDCRD - pathToDCRDMtx.Unlock() -} - -// New creates and initializes new instance of the rpc test harness. -// Optionally, websocket handlers and a specified configuration may be passed. -// In the case that a nil config is passed, a default configuration will be -// used. If pathToDCRD has not been set and working within the dcrd repository, -// a dcrd executable created from the directory at rpctest/../ (dcrd repo's -// root directory) will be created in a temporary directory. pathToDCRD will be -// set as that file's location. If pathToDCRD has already been set, the -// executable at that location will be used. -// -// NOTE: This function is safe for concurrent access, but care must be taken -// when calling New with different dcrd executables, as whatever is at -// pathToDCRD at the time will be identified with that node. -func New(t *testing.T, activeNet *chaincfg.Params, handlers *rpcclient.NotificationHandlers, extraArgs []string) (*Harness, error) { - harnessStateMtx.Lock() - defer harnessStateMtx.Unlock() - - // Add a flag for the appropriate network type based on the provided - // chain params. - switch activeNet.Net { - case wire.MainNet: - // No extra flags since mainnet is the default - case wire.TestNet3: - extraArgs = append(extraArgs, "--testnet") - case wire.SimNet: - extraArgs = append(extraArgs, "--simnet") - case wire.RegNet: - extraArgs = append(extraArgs, "--regnet") - default: - return nil, fmt.Errorf("rpctest.New must be called with one " + - "of the supported chain networks") - } - - harnessID := strconv.Itoa(numTestInstances) - nodeTestData, err := os.MkdirTemp("", "rpctest-"+harnessID) - if err != nil { - return nil, err - } - debugf(t, "temp dir: %v\n", nodeTestData) - - certFile := filepath.Join(nodeTestData, "rpc.cert") - keyFile := filepath.Join(nodeTestData, "rpc.key") - if err := genCertPair(certFile, keyFile); err != nil { - return nil, err - } - - wallet, err := newMemWallet(t, activeNet, uint32(numTestInstances)) - if err != nil { - return nil, err - } - - miningAddr := fmt.Sprintf("--miningaddr=%s", wallet.coinbaseAddr) - extraArgs = append(extraArgs, miningAddr) - - config, err := newConfig(nodeTestData, certFile, keyFile, extraArgs) - if err != nil { - return nil, err - } - - // Uncomment and change to enable additional dcrd debug/trace output. - // config.debugLevel = "TXMP=trace,TRSY=trace,RPCS=trace,PEER=trace" - - // Generate p2p+rpc listening addresses. - config.listen, config.rpcListen = generateListeningAddresses() - - // Create the testing node bounded to the simnet. - node, err := newNode(t, config, nodeTestData) - if err != nil { - return nil, err - } - nodeNum := numTestInstances - numTestInstances++ // XXX this really should be the length of the harness map. - - if handlers == nil { - handlers = &rpcclient.NotificationHandlers{} - } - - // If a handler for the OnBlockConnected/OnBlockDisconnected callback - // has already been set, then we create a wrapper callback which - // executes both the currently registered callback, and the mem - // wallet's callback. - if handlers.OnBlockConnected != nil { - obc := handlers.OnBlockConnected - handlers.OnBlockConnected = func(header []byte, filteredTxns [][]byte) { - wallet.IngestBlock(header, filteredTxns) - obc(header, filteredTxns) - } - } else { - // Otherwise, we can claim the callback ourselves. - handlers.OnBlockConnected = wallet.IngestBlock - } - if handlers.OnBlockDisconnected != nil { - obd := handlers.OnBlockDisconnected - handlers.OnBlockDisconnected = func(header []byte) { - wallet.UnwindBlock(header) - obd(header) - } - } else { - handlers.OnBlockDisconnected = wallet.UnwindBlock - } - - h := &Harness{ - handlers: handlers, - node: node, - maxConnRetries: 20, - testNodeDir: nodeTestData, - ActiveNet: activeNet, - nodeNum: nodeNum, - wallet: wallet, - t: t, - } - - // Track this newly created test instance within the package level - // global map of all active test instances. - testInstances[h.testNodeDir] = h - - return h, nil -} - -// SetUp initializes the rpc test state. Initialization includes: starting up a -// simnet node, creating a websockets client and connecting to the started -// node, and finally: optionally generating and submitting a testchain with a -// configurable number of mature coinbase outputs coinbase outputs. -// -// NOTE: This method and TearDown should always be called from the same -// goroutine as they are not concurrent safe. -func (h *Harness) SetUp(ctx context.Context, createTestChain bool, numMatureOutputs uint32) error { - // Start the dcrd node itself. This spawns a new process which will be - // managed - if err := h.node.start(); err != nil { - return err - } - if err := h.connectRPCClient(); err != nil { - return err - } - h.wallet.Start() - - // Filter transactions that pay to the coinbase associated with the - // wallet. - filterAddrs := []stdaddr.Address{h.wallet.coinbaseAddr} - if err := h.Node.LoadTxFilter(ctx, true, filterAddrs, nil); err != nil { - return err - } - - // Ensure dcrd properly dispatches our registered call-back for each new - // block. Otherwise, the memWallet won't function properly. - if err := h.Node.NotifyBlocks(ctx); err != nil { - return err - } - - tracef(h.t, "createTestChain %v numMatureOutputs %v", createTestChain, - numMatureOutputs) - // Create a test chain with the desired number of mature coinbase - // outputs. - if createTestChain && numMatureOutputs != 0 { - // Include an extra block to account for the premine block. - numToGenerate := (uint32(h.ActiveNet.CoinbaseMaturity) + - numMatureOutputs) + 1 - tracef(h.t, "Generate: %v", numToGenerate) - _, err := h.Node.Generate(ctx, numToGenerate) - if err != nil { - return err - } - } - - // Block until the wallet has fully synced up to the tip of the main - // chain. - _, height, err := h.Node.GetBestBlock(ctx) - if err != nil { - return err - } - tracef(h.t, "Best block height: %v", height) - ticker := time.NewTicker(time.Millisecond * 100) - for range ticker.C { - walletHeight := h.wallet.SyncedHeight() - if walletHeight == height { - break - } - } - tracef(h.t, "Synced: %v", height) - - return nil -} - -// TearDown stops the running rpc test instance. All created processes are -// killed, and temporary directories removed. -// -// NOTE: This method and SetUp should always be called from the same goroutine -// as they are not concurrent safe. -func (h *Harness) TearDown() error { - tracef(h.t, "TearDown %p %p", h.Node, h.node) - defer tracef(h.t, "TearDown done") - - if h.Node != nil { - tracef(h.t, "TearDown: Node") - h.Node.Shutdown() - } - - tracef(h.t, "TearDown: node") - if err := h.node.shutdown(); err != nil { - return err - } - - if !(debug || trace) { - if err := os.RemoveAll(h.testNodeDir); err != nil { - return err - } - } - - tracef(h.t, "TearDown deleting %v", h.node.pid) - delete(testInstances, h.testNodeDir) - - return nil -} - -// connectRPCClient attempts to establish an RPC connection to the created dcrd -// process belonging to this Harness instance. If the initial connection -// attempt fails, this function will retry h.maxConnRetries times, backing off -// the time between subsequent attempts. If after h.maxConnRetries attempts, -// we're not able to establish a connection, this function returns with an -// error. -func (h *Harness) connectRPCClient() error { - var client *rpcclient.Client - var err error - - rpcConf := h.node.config.rpcConnConfig() - for i := 0; i < h.maxConnRetries; i++ { - if client, err = rpcclient.New(&rpcConf, h.handlers); err != nil { - time.Sleep(time.Duration(i) * 50 * time.Millisecond) - continue - } - break - } - - if client == nil { - return fmt.Errorf("connection timeout") - } - - h.Node = client - h.wallet.SetRPCClient(client) - return nil -} - -// NewAddress returns a fresh address spendable by the Harness' internal -// wallet. -// -// This function is safe for concurrent access. -func (h *Harness) NewAddress(ctx context.Context) (stdaddr.Address, error) { - return h.wallet.NewAddress(ctx) -} - -// ConfirmedBalance returns the confirmed balance of the Harness' internal -// wallet. -// -// This function is safe for concurrent access. -func (h *Harness) ConfirmedBalance() dcrutil.Amount { - return h.wallet.ConfirmedBalance() -} - -// SendOutputs creates, signs, and finally broadcasts a transaction spending -// the harness' available mature coinbase outputs creating new outputs -// according to targetOutputs. -// -// This function is safe for concurrent access. -func (h *Harness) SendOutputs(ctx context.Context, targetOutputs []*wire.TxOut, feeRate dcrutil.Amount) (*chainhash.Hash, error) { - return h.wallet.SendOutputs(ctx, targetOutputs, feeRate) -} - -// CreateTransaction returns a fully signed transaction paying to the specified -// outputs while observing the desired fee rate. The passed fee rate should be -// expressed in atoms-per-byte. Any unspent outputs selected as inputs for -// the crafted transaction are marked as unspendable in order to avoid -// potential double-spends by future calls to this method. If the created -// transaction is cancelled for any reason then the selected inputs MUST be -// freed via a call to UnlockOutputs. Otherwise, the locked inputs won't be -// returned to the pool of spendable outputs. -// -// This function is safe for concurrent access. -func (h *Harness) CreateTransaction(ctx context.Context, targetOutputs []*wire.TxOut, feeRate dcrutil.Amount) (*wire.MsgTx, error) { - return h.wallet.CreateTransaction(ctx, targetOutputs, feeRate) -} - -// UnlockOutputs unlocks any outputs which were previously marked as -// unspendable due to being selected to fund a transaction via the -// CreateTransaction method. -// -// This function is safe for concurrent access. -func (h *Harness) UnlockOutputs(inputs []*wire.TxIn) { - h.wallet.UnlockOutputs(inputs) -} - -// RPCConfig returns the harnesses current rpc configuration. This allows other -// potential RPC clients created within tests to connect to a given test -// harness instance. -func (h *Harness) RPCConfig() rpcclient.ConnConfig { - return h.node.config.rpcConnConfig() -} - -// P2PAddress returns the harness node's configured listening address for P2P -// connections. -// -// Note that to connect two different harnesses, it's preferable to use the -// ConnectNode() function, which handles cases like already connected peers and -// ensures the connection actually takes place. -func (h *Harness) P2PAddress() string { - return h.node.config.listen -} - -// generateListeningAddresses returns two strings representing listening -// addresses designated for the current rpc test. If there haven't been any -// test instances created, the default ports are used. Otherwise, in order to -// support multiple test nodes running at once, the p2p and rpc port are -// incremented after each initialization. -func generateListeningAddresses() (string, string) { - localhost := "127.0.0.1" - - portString := func(minPort, maxPort int) string { - port := minPort + numTestInstances + ((20 * processID) % - (maxPort - minPort)) - return strconv.Itoa(port) - } - - p2p := net.JoinHostPort(localhost, portString(minPeerPort, maxPeerPort)) - rpc := net.JoinHostPort(localhost, portString(minRPCPort, maxRPCPort)) - return p2p, rpc -} diff --git a/rpctest/rpc_harness_test.go b/rpctest/rpc_harness_test.go deleted file mode 100644 index e32269606f..0000000000 --- a/rpctest/rpc_harness_test.go +++ /dev/null @@ -1,741 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Copyright (c) 2017-2022 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -// This file is ignored during the regular tests due to the following build tag. -//go:build rpctest -// +build rpctest - -package rpctest - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/decred/dcrd/chaincfg/chainhash" - "github.com/decred/dcrd/chaincfg/v3" - "github.com/decred/dcrd/dcrutil/v4" - dcrdtypes "github.com/decred/dcrd/rpc/jsonrpc/types/v4" - "github.com/decred/dcrd/wire" -) - -const ( - numMatureOutputs = 25 -) - -func testSendOutputs(ctx context.Context, r *Harness, t *testing.T) { - tracef(t, "testSendOutputs start") - defer tracef(t, "testSendOutputs end") - - genSpend := func(amt dcrutil.Amount) *chainhash.Hash { - // Grab a fresh address from the wallet. - addr, err := r.NewAddress(ctx) - if err != nil { - t.Fatalf("unable to get new address: %v", err) - } - - // Next, send amt to this address, spending from one of our - // mature coinbase outputs. - addrScriptVer, addrScript := addr.PaymentScript() - output := newTxOut(int64(amt), addrScriptVer, addrScript) - txid, err := r.SendOutputs(ctx, []*wire.TxOut{output}, 10) - if err != nil { - t.Fatalf("coinbase spend failed: %v", err) - } - return txid - } - - assertTxMined := func(ctx context.Context, txid *chainhash.Hash, blockHash *chainhash.Hash) { - block, err := r.Node.GetBlock(ctx, blockHash) - if err != nil { - t.Fatalf("unable to get block: %v", err) - } - - numBlockTxns := len(block.Transactions) - if numBlockTxns < 2 { - t.Fatalf("crafted transaction wasn't mined, block should have "+ - "at least %v transactions instead has %v", 2, numBlockTxns) - } - - minedTx := block.Transactions[1] - txHash := minedTx.TxHash() - if txHash != *txid { - t.Fatalf("txid's don't match, %v vs %v", txHash, txid) - } - } - - // First, generate a small spend which will require only a single - // input. - txid := genSpend(dcrutil.Amount(5 * dcrutil.AtomsPerCoin)) - - // Generate a single block, the transaction the wallet created should - // be found in this block. - if err := r.Node.RegenTemplate(ctx); err != nil { - t.Fatalf("unable to regenerate block template: %v", err) - } - time.Sleep(time.Millisecond * 500) - blockHashes, err := r.Node.Generate(ctx, 1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - assertTxMined(ctx, txid, blockHashes[0]) - - // Next, generate a spend much greater than the block reward. This - // transaction should also have been mined properly. - txid = genSpend(dcrutil.Amount(5000 * dcrutil.AtomsPerCoin)) - if err := r.Node.RegenTemplate(ctx); err != nil { - t.Fatalf("unable to regenerate block template: %v", err) - } - time.Sleep(time.Millisecond * 500) - blockHashes, err = r.Node.Generate(ctx, 1) - if err != nil { - t.Fatalf("unable to generate single block: %v", err) - } - assertTxMined(ctx, txid, blockHashes[0]) - - // Generate another block to ensure the transaction is removed from the - // mempool. - if _, err := r.Node.Generate(ctx, 1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } -} - -func assertConnectedTo(ctx context.Context, t *testing.T, nodeA *Harness, nodeB *Harness) { - tracef(t, "assertConnectedTo start") - defer tracef(t, "assertConnectedTo end") - - nodeAPeers, err := nodeA.Node.GetPeerInfo(ctx) - if err != nil { - t.Fatalf("unable to get nodeA's peer info") - } - - nodeAddr := nodeB.node.config.listen - addrFound := false - for _, peerInfo := range nodeAPeers { - if peerInfo.Addr == nodeAddr { - addrFound = true - tracef(t, "found %v", nodeAddr) - break - } - } - - if !addrFound { - t.Fatal("nodeA not connected to nodeB") - } -} - -func testConnectNode(ctx context.Context, r *Harness, t *testing.T) { - tracef(t, "testConnectNode start") - defer tracef(t, "testConnectNode end") - - // Create a fresh test harness. - harness, err := New(t, chaincfg.RegNetParams(), nil, nil) - if err != nil { - t.Fatal(err) - } - if err := harness.SetUp(ctx, false, 0); err != nil { - t.Fatalf("unable to complete rpctest setup: %v", err) - } - defer func() { - tracef(t, "testConnectNode: calling harness.TearDown") - harness.TearDown() - }() - - // Establish a p2p connection from our new local harness to the main - // harness. - if err := ConnectNode(ctx, harness, r); err != nil { - t.Fatalf("unable to connect local to main harness: %v", err) - } - - // The main harness should show up in our local harness' peer's list, - // and vice verse. - assertConnectedTo(ctx, t, harness, r) -} - -func assertNotConnectedTo(ctx context.Context, t *testing.T, nodeA *Harness, nodeB *Harness) { - tracef(t, "assertNotConnectedTo start") - defer tracef(t, "assertNotConnectedTo end") - - nodeAPeers, err := nodeA.Node.GetPeerInfo(ctx) - if err != nil { - t.Fatalf("unable to get nodeA's peer info") - } - - nodeAddr := nodeB.node.config.listen - addrFound := false - for _, peerInfo := range nodeAPeers { - if peerInfo.Addr == nodeAddr { - addrFound = true - break - } - } - - if addrFound { - t.Fatal("nodeA is connected to nodeB") - } -} - -func testDisconnectNode(ctx context.Context, r *Harness, t *testing.T) { - tracef(t, "testDisconnectNode start") - defer tracef(t, "testDisconnectNode end") - - // Create a fresh test harness. - harness, err := New(t, chaincfg.RegNetParams(), nil, nil) - if err != nil { - t.Fatal(err) - } - if err := harness.SetUp(ctx, false, 0); err != nil { - t.Fatalf("unable to complete rpctest setup: %v", err) - } - defer harness.TearDown() - - // Establish a p2p connection from our new local harness to the main - // harness. - if err := ConnectNode(ctx, harness, r); err != nil { - t.Fatalf("unable to connect local to main harness: %v", err) - } - - // Sanity check. - assertConnectedTo(ctx, t, harness, r) - - // Disconnect the nodes. - if err := RemoveNode(ctx, harness, r); err != nil { - t.Fatalf("unable to disconnect local to main harness: %v", err) - } - - assertNotConnectedTo(ctx, t, harness, r) - - // Re-connect the nodes. We'll perform the test in the reverse direction now - // and assert that the nodes remain connected and that RemoveNode() fails. - if err := ConnectNode(ctx, harness, r); err != nil { - t.Fatalf("unable to connect local to main harness: %v", err) - } - - // Sanity check. - assertConnectedTo(ctx, t, harness, r) - - // Try to disconnect the nodes in the reverse direction. This should fail, - // as the nodes are connected in the harness->r direction. - if err := RemoveNode(ctx, r, harness); err == nil { - t.Fatalf("removeNode on unconnected peers should return an error") - } - - // Ensure the nodes remain connected after trying to disconnect them in the - // reverse order. - assertConnectedTo(ctx, t, harness, r) -} - -func testNodesConnected(ctx context.Context, r *Harness, t *testing.T) { - tracef(t, "testNodesConnected start") - defer tracef(t, "testNodesConnected end") - - // Create a fresh test harness. - harness, err := New(t, chaincfg.RegNetParams(), nil, nil) - if err != nil { - t.Fatal(err) - } - if err := harness.SetUp(ctx, false, 0); err != nil { - t.Fatalf("unable to complete rpctest setup: %v", err) - } - defer harness.TearDown() - - // Establish a p2p connection from our new local harness to the main - // harness. - if err := ConnectNode(ctx, harness, r); err != nil { - t.Fatalf("unable to connect local to main harness: %v", err) - } - - // Sanity check. - assertConnectedTo(ctx, t, harness, r) - - // Ensure nodes are still connected. - assertConnectedTo(ctx, t, harness, r) - - testCases := []struct { - name string - allowReverse bool - expected bool - from *Harness - to *Harness - }{ - // The existing connection is h->r. - {"!allowReverse, h->r", false, true, harness, r}, - {"allowReverse, h->r", true, true, harness, r}, - {"!allowReverse, r->h", false, false, r, harness}, - {"allowReverse, r->h", true, true, r, harness}, - } - - for _, tc := range testCases { - actual, err := NodesConnected(ctx, tc.from, tc.to, tc.allowReverse) - if err != nil { - t.Fatalf("unable to determine node connection: %v", err) - } - if actual != tc.expected { - t.Fatalf("test case %s: actual result (%v) differs from expected "+ - "(%v)", tc.name, actual, tc.expected) - } - } - - // Disconnect the nodes. - if err := RemoveNode(ctx, harness, r); err != nil { - t.Fatalf("unable to disconnect local to main harness: %v", err) - } - - // Sanity check. - assertNotConnectedTo(ctx, t, harness, r) - - // All test cases must return false now. - for _, tc := range testCases { - actual, err := NodesConnected(ctx, tc.from, tc.to, tc.allowReverse) - if err != nil { - t.Fatalf("unable to determine node connection: %v", err) - } - if actual { - t.Fatalf("test case %s: nodes connected after commanded to "+ - "disconnect", tc.name) - } - } -} - -func testTearDownAll(t *testing.T) { - tracef(t, "testTearDownAll start") - defer tracef(t, "testTearDownAll end") - - // Grab a local copy of the currently active harnesses before - // attempting to tear them all down. - initialActiveHarnesses := ActiveHarnesses() - - // Tear down all currently active harnesses. - if err := TearDownAll(); err != nil { - t.Fatalf("unable to teardown all harnesses: %v", err) - } - - // The global testInstances map should now be fully purged with no - // active test harnesses remaining. - if len(ActiveHarnesses()) != 0 { - t.Fatalf("test harnesses still active after TearDownAll") - } - - for _, harness := range initialActiveHarnesses { - // Ensure all test directories have been deleted. - if _, err := os.Stat(harness.testNodeDir); err == nil { - if !(debug || trace) { - t.Errorf("created test datadir was not deleted.") - } - } - } -} - -func testActiveHarnesses(_ context.Context, r *Harness, t *testing.T) { - tracef(t, "testActiveHarnesses start") - defer tracef(t, "testActiveHarnesses end") - - numInitialHarnesses := len(ActiveHarnesses()) - - // Create a single test harness. - harness1, err := New(t, chaincfg.RegNetParams(), nil, nil) - if err != nil { - t.Fatal(err) - } - defer harness1.TearDown() - - // With the harness created above, a single harness should be detected - // as active. - numActiveHarnesses := len(ActiveHarnesses()) - if !(numActiveHarnesses > numInitialHarnesses) { - t.Fatalf("ActiveHarnesses not updated, should have an " + - "additional test harness listed.") - } -} - -func testJoinMempools(ctx context.Context, r *Harness, t *testing.T) { - tracef(t, "testJoinMempools start") - defer tracef(t, "testJoinMempools end") - - // Assert main test harness has no transactions in its mempool. - pooledHashes, err := r.Node.GetRawMempool(ctx, dcrdtypes.GRMAll) - if err != nil { - t.Fatalf("unable to get mempool for main test harness: %v", err) - } - if len(pooledHashes) != 0 { - t.Fatal("main test harness mempool not empty") - } - - // Create a local test harness with only the genesis block. The nodes - // will be synced below so the same transaction can be sent to both - // nodes without it being an orphan. - harness, err := New(t, chaincfg.RegNetParams(), nil, nil) - if err != nil { - t.Fatal(err) - } - if err := harness.SetUp(ctx, false, 0); err != nil { - t.Fatalf("unable to complete rpctest setup: %v", err) - } - defer harness.TearDown() - - nodeSlice := []*Harness{r, harness} - - // Both mempools should be considered synced as they are empty. - // Therefore, this should return instantly. - if err := JoinNodes(ctx, nodeSlice, Mempools); err != nil { - t.Fatalf("unable to join node on mempools: %v", err) - } - - // Generate a coinbase spend to a new address within the main harness' - // mempool. - addr, err := r.NewAddress(ctx) - if err != nil { - t.Fatalf("unable to get new address: %v", err) - } - addrScriptVer, addrScript := addr.PaymentScript() - if err != nil { - t.Fatalf("unable to generate pkscript to addr: %v", err) - } - output := newTxOut(5e8, addrScriptVer, addrScript) - testTx, err := r.CreateTransaction(ctx, []*wire.TxOut{output}, 10) - if err != nil { - t.Fatalf("coinbase spend failed: %v", err) - } - if _, err := r.Node.SendRawTransaction(ctx, testTx, true); err != nil { - t.Fatalf("send transaction failed: %v", err) - } - - // Wait until the transaction shows up to ensure the two mempools are - // not the same. - harnessSynced := make(chan error) - go func() { - for { - poolHashes, err := r.Node.GetRawMempool(ctx, dcrdtypes.GRMAll) - if err != nil { - err = fmt.Errorf("failed to retrieve harness mempool: %w", err) - harnessSynced <- err - return - } - if len(poolHashes) > 0 { - break - } - time.Sleep(time.Millisecond * 100) - } - harnessSynced <- nil - }() - - select { - case err := <-harnessSynced: - if err != nil { - t.Fatal(err) - } - case <-time.After(time.Minute): - t.Fatal("harness node never received transaction") - } - - // This select case should fall through to the default as the goroutine - // should be blocked on the JoinNodes call. - poolsSynced := make(chan error) - go func() { - if err := JoinNodes(ctx, nodeSlice, Mempools); err != nil { - err = fmt.Errorf("unable to join node on mempools: %w", err) - poolsSynced <- err - return - } - poolsSynced <- nil - }() - select { - case err := <-poolsSynced: - if err != nil { - t.Fatal(err) - } - t.Fatal("mempools detected as synced yet harness has a new tx") - default: - } - - // Establish an outbound connection from the local harness to the main - // harness and wait for the chains to be synced. - if err := ConnectNode(ctx, harness, r); err != nil { - t.Fatalf("unable to connect harnesses: %v", err) - } - if err := JoinNodes(ctx, nodeSlice, Blocks); err != nil { - t.Fatalf("unable to join node on blocks: %v", err) - } - - // Send the transaction to the local harness which will result in synced - // mempools. - if _, err := harness.Node.SendRawTransaction(ctx, testTx, true); err != nil { - t.Fatalf("send transaction failed: %v", err) - } - - // Select once again with a special timeout case after 1 minute. The - // goroutine above should now be blocked on sending into the unbuffered - // channel. The send should immediately succeed. In order to avoid the - // test hanging indefinitely, a 1 minute timeout is in place. - select { - case err := <-poolsSynced: - if err != nil { - t.Fatal(err) - } - case <-time.After(time.Minute): - t.Fatal("mempools never detected as synced") - } -} - -func testJoinBlocks(ctx context.Context, r *Harness, t *testing.T) { - tracef(t, "testJoinBlocks start") - defer tracef(t, "testJoinBlocks end") - - // Create a second harness with only the genesis block so it is behind - // the main harness. - harness, err := New(t, chaincfg.RegNetParams(), nil, nil) - if err != nil { - t.Fatal(err) - } - if err := harness.SetUp(ctx, false, 0); err != nil { - t.Fatalf("unable to complete rpctest setup: %v", err) - } - defer harness.TearDown() - - nodeSlice := []*Harness{r, harness} - blocksSynced := make(chan error) - go func() { - if err := JoinNodes(ctx, nodeSlice, Blocks); err != nil { - blocksSynced <- fmt.Errorf("unable to join node on blocks: %w", err) - return - } - blocksSynced <- nil - }() - - // This select case should fall through to the default as the goroutine - // should be blocked on the JoinNodes calls. - select { - case err := <-blocksSynced: - if err != nil { - t.Fatal(err) - } - t.Fatalf("blocks detected as synced yet local harness is behind") - default: - } - - // Connect the local harness to the main harness which will sync the - // chains. - if err := ConnectNode(ctx, harness, r); err != nil { - t.Fatalf("unable to connect harnesses: %v", err) - } - - // Select once again with a special timeout case after 1 minute. The - // goroutine above should now be blocked on sending into the unbuffered - // channel. The send should immediately succeed. In order to avoid the - // test hanging indefinitely, a 1 minute timeout is in place. - select { - case err := <-blocksSynced: - if err != nil { - t.Fatal(err) - } - case <-time.After(time.Minute): - t.Fatalf("blocks never detected as synced") - } -} - -func testMemWalletReorg(ctx context.Context, r *Harness, t *testing.T) { - tracef(t, "testMemWalletReorg start") - defer tracef(t, "testMemWalletReorg end") - - // Create a fresh harness, we'll be using the main harness to force a - // re-org on this local harness. - harness, err := New(t, chaincfg.RegNetParams(), nil, nil) - if err != nil { - t.Fatal(err) - } - if err := harness.SetUp(ctx, true, 5); err != nil { - t.Fatalf("unable to complete rpctest setup: %v", err) - } - defer harness.TearDown() - - // Ensure the internal wallet has the expected balance. - expectedBalance := dcrutil.Amount(5 * 300 * dcrutil.AtomsPerCoin) - walletBalance := harness.ConfirmedBalance() - if expectedBalance != walletBalance { - t.Fatalf("wallet balance incorrect: expected %v, got %v", - expectedBalance, walletBalance) - } - - // Now connect this local harness to the main harness then wait for - // their chains to synchronize. - if err := ConnectNode(ctx, harness, r); err != nil { - t.Fatalf("unable to connect harnesses: %v", err) - } - nodeSlice := []*Harness{r, harness} - if err := JoinNodes(ctx, nodeSlice, Blocks); err != nil { - t.Fatalf("unable to join node on blocks: %v", err) - } - - // The original wallet should now have a balance of 0 Coin as its entire - // chain should have been decimated in favor of the main harness' - // chain. - expectedBalance = dcrutil.Amount(0) - walletBalance = harness.ConfirmedBalance() - if expectedBalance != walletBalance { - t.Fatalf("wallet balance incorrect: expected %v, got %v", - expectedBalance, walletBalance) - } -} - -func testMemWalletLockedOutputs(ctx context.Context, r *Harness, t *testing.T) { - tracef(t, "testMemWalletLockedOutputs start") - defer tracef(t, "testMemWalletLockedOutputs end") - - // Obtain the initial balance of the wallet at this point. - startingBalance := r.ConfirmedBalance() - - // First, create a signed transaction spending some outputs. - addr, err := r.NewAddress(ctx) - if err != nil { - t.Fatalf("unable to generate new address: %v", err) - } - pkScriptVer, pkScript := addr.PaymentScript() - outputAmt := dcrutil.Amount(50 * dcrutil.AtomsPerCoin) - output := newTxOut(int64(outputAmt), pkScriptVer, pkScript) - tx, err := r.CreateTransaction(ctx, []*wire.TxOut{output}, 10) - if err != nil { - t.Fatalf("unable to create transaction: %v", err) - } - - // The current wallet balance should now be at least 50 Coin less - // (accounting for fees) than the period balance - currentBalance := r.ConfirmedBalance() - if !(currentBalance <= startingBalance-outputAmt) { - t.Fatalf("spent outputs not locked: previous balance %v, "+ - "current balance %v", startingBalance, currentBalance) - } - - // Now unlocked all the spent inputs within the unbroadcast signed - // transaction. The current balance should now be exactly that of the - // starting balance. - r.UnlockOutputs(tx.TxIn) - currentBalance = r.ConfirmedBalance() - if currentBalance != startingBalance { - t.Fatalf("current and starting balance should now match: "+ - "expected %v, got %v", startingBalance, currentBalance) - } -} - -func TestHarness(t *testing.T) { - var err error - mainHarness, err := New(t, chaincfg.RegNetParams(), nil, nil) - if err != nil { - t.Fatalf("unable to create main harness: %v", err) - } - - // Initialize the main mining node with a chain of length 42, providing - // 25 mature coinbases to allow spending from for testing purposes. - ctx := context.Background() - if err = mainHarness.SetUp(ctx, true, numMatureOutputs); err != nil { - // Even though the harness was not fully setup, it still needs - // to be torn down to ensure all resources such as temp - // directories are cleaned up. The error is intentionally - // ignored since this is already an error path and nothing else - // could be done about it anyways. - _ = mainHarness.TearDown() - t.Fatalf("unable to setup test chain: %v", err) - } - - // Cleanup when we exit. - defer func() { - // Clean up any active harnesses that are still currently - // running. - if len(ActiveHarnesses()) > 0 { - if err := TearDownAll(); err != nil { - t.Fatalf("unable to tear down chain: %v", err) - } - } - }() - - // We should have the expected amount of mature unspent outputs. - expectedBalance := dcrutil.Amount(numMatureOutputs * 300 * dcrutil.AtomsPerCoin) - harnessBalance := mainHarness.ConfirmedBalance() - if harnessBalance != expectedBalance { - t.Fatalf("expected wallet balance of %v instead have %v", - expectedBalance, harnessBalance) - } - - // Current tip should be at a height of numMatureOutputs plus the - // required number of blocks for coinbase maturity plus an additional - // block for the premine block. - nodeInfo, err := mainHarness.Node.GetInfo(ctx) - if err != nil { - t.Fatalf("unable to execute getinfo on node: %v", err) - } - coinbaseMaturity := uint32(mainHarness.ActiveNet.CoinbaseMaturity) - expectedChainHeight := numMatureOutputs + coinbaseMaturity + 1 - if uint32(nodeInfo.Blocks) != expectedChainHeight { - t.Errorf("Chain height is %v, should be %v", - nodeInfo.Blocks, expectedChainHeight) - } - - // Skip tests when running with -short - if !testing.Short() { - tests := []struct { - name string - f func(context.Context, *Harness, *testing.T) - }{ - { - f: testSendOutputs, - name: "testSendOutputs", - }, - { - f: testConnectNode, - name: "testConnectNode", - }, - { - f: testDisconnectNode, - name: "testDisconnectNode", - }, - { - f: testNodesConnected, - name: "testNodesConnected", - }, - { - f: testActiveHarnesses, - name: "testActiveHarnesses", - }, - { - f: testJoinBlocks, - name: "testJoinBlocks", - }, - { - f: testJoinMempools, // Depends on results of testJoinBlocks - name: "testJoinMempools", - }, - { - f: testMemWalletReorg, - name: "testMemWalletReorg", - }, - { - f: testMemWalletLockedOutputs, - name: "testMemWalletLockedOutputs", - }, - } - - for _, testCase := range tests { - t.Logf("=== Running test: %v ===", testCase.name) - - c := make(chan struct{}) - go func() { - testCase.f(ctx, mainHarness, t) - c <- struct{}{} - }() - - // Go wait for 10 seconds - select { - case <-c: - case <-time.After(10 * time.Second): - t.Logf("Test timeout, aborting running nodes") - PanicAll(t) - os.Exit(1) - } - } - } - - testTearDownAll(t) -} diff --git a/rpctest/simnet_miner.go b/rpctest/simnet_miner.go deleted file mode 100644 index 21f77692b6..0000000000 --- a/rpctest/simnet_miner.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright (c) 2020-2022 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package rpctest - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "math" - "runtime" - "time" - - "github.com/decred/dcrd/blockchain/standalone/v2" - "github.com/decred/dcrd/chaincfg/chainhash" - dcrdtypes "github.com/decred/dcrd/rpc/jsonrpc/types/v4" - "github.com/decred/dcrd/rpcclient/v8" - "github.com/decred/dcrd/wire" -) - -// solveBlock attempts to find a nonce which makes the passed block header hash -// to a value less than the target difficulty. When a successful solution is -// found, true is returned and the nonce field of the passed header is updated -// with the solution. False is returned if no solution exists. -func solveBlock(header *wire.BlockHeader) bool { - // sbResult is used by the solver goroutines to send results. - type sbResult struct { - found bool - nonce uint32 - } - - // solver accepts a block header and a nonce range to test. It is - // intended to be run as a goroutine. - targetDifficulty := standalone.CompactToBig(header.Bits) - quit := make(chan bool) - results := make(chan sbResult) - solver := func(hdr wire.BlockHeader, startNonce, stopNonce uint32) { - // We need to modify the nonce field of the header, so make sure - // we work with a copy of the original header. - for i := startNonce; i >= startNonce && i <= stopNonce; i++ { - select { - case <-quit: - results <- sbResult{false, 0} - return - default: - hdr.Nonce = i - hash := hdr.BlockHash() - if standalone.HashToBig(&hash).Cmp( - targetDifficulty) <= 0 { - - results <- sbResult{true, i} - return - } - } - } - results <- sbResult{false, 0} - } - - startNonce := uint32(1) - stopNonce := uint32(math.MaxUint32) - numCores := uint32(runtime.NumCPU()) - noncesPerCore := (stopNonce - startNonce) / numCores - for i := uint32(0); i < numCores; i++ { - rangeStart := startNonce + (noncesPerCore * i) - rangeStop := startNonce + (noncesPerCore * (i + 1)) - 1 - if i == numCores-1 { - rangeStop = stopNonce - } - go solver(*header, rangeStart, rangeStop) - } - var foundResult bool - for i := uint32(0); i < numCores; i++ { - result := <-results - if !foundResult && result.found { - close(quit) - header.Nonce = result.nonce - foundResult = true - } - } - - return foundResult -} - -func waitPredicate(pred func() bool, timeout time.Duration) error { - const pollInterval = 20 * time.Millisecond - - exitTimer := time.After(timeout) - for { - <-time.After(pollInterval) - - select { - case <-exitTimer: - return fmt.Errorf("predicate not satisfied after time out") - default: - } - - if pred() { - return nil - } - } -} - -// AdjustedSimnetMiner is an alternative miner function that instead of relying -// on the backing node to mine a block, fetches the work required for the next -// block and mines the block itself while adjusting the timestamp so that (on -// simnet) no difficulty increase is trigered. After finding a block, it -// automatically publishes it to the underlying node. -// -// This is only applicable for tests that run on simnet or other networks that -// have a target block per count of 1 second. -func AdjustedSimnetMiner(ctx context.Context, client *rpcclient.Client, nb uint32) ([]*chainhash.Hash, error) { - // Fetch the current template. This might fail if there aren't enough - // tickets in the mempool yet, so perform a few tries. - var prevWork *dcrdtypes.GetWorkResult - err := waitPredicate(func() bool { - var err error - prevWork, err = client.GetWork(ctx) - return err == nil - }, time.Second*10) - if err != nil { - return nil, err - } - - // Force regeneration of the block template prior to generating this - // set of blocks so that it's current, then wait for a bit for it to be - // updated. - err = client.RegenTemplate(ctx) - if err != nil { - return nil, fmt.Errorf("unable to regenerate block template: %v", err) - } - - // Wait until the template changes or some time has passed. - waitPredicate(func() bool { - work, err := client.GetWork(ctx) - if err != nil { - return false - } - return work.Data != prevWork.Data - }, time.Second) - - hashes := make([]*chainhash.Hash, nb) - for i := uint32(0); i < nb; i++ { - work, err := client.GetWork(ctx) - if err != nil { - return nil, err - } - - workBytes, err := hex.DecodeString(work.Data) - if err != nil { - return nil, err - } - - var header wire.BlockHeader - err = header.FromBytes(workBytes) - if err != nil { - return nil, err - } - - // For block heights other then the premine, register header as - // one second after the previous block to ensure difficulty - // does not increase. - if header.Height > 1 { - prevBlock, err := client.GetBlock(ctx, &header.PrevBlock) - if err != nil { - return nil, err - } - - header.Timestamp = prevBlock.Header.Timestamp.Add(time.Second) - } - solved := solveBlock(&header) - if !solved { - return nil, errors.New("unable to solve block") - } - - var extraBytes [12]byte - workBytes, err = header.Bytes() - if err != nil { - return nil, err - } - workBytes = append(workBytes, extraBytes[:]...) - workData := hex.EncodeToString(workBytes) - accepted, err := client.GetWorkSubmit(ctx, workData) - if err != nil { - return nil, err - } - - if !accepted { - return nil, errors.New("solved block was not accepted") - } - - bh := header.BlockHash() - hashes[i] = &bh - } - - return hashes, nil -} diff --git a/rpctest/utils.go b/rpctest/utils.go deleted file mode 100644 index bb1c347071..0000000000 --- a/rpctest/utils.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright (c) 2016 The btcsuite developers -// Copyright (c) 2017-2022 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package rpctest - -import ( - "context" - "reflect" - "runtime" - "syscall" - "testing" - "time" - - dcrdtypes "github.com/decred/dcrd/rpc/jsonrpc/types/v4" - "github.com/decred/dcrd/rpcclient/v8" -) - -// JoinType is an enum representing a particular type of "node join". A node -// join is a synchronization tool used to wait until a subset of nodes have a -// consistent state with respect to an attribute. -type JoinType uint8 - -const ( - // Blocks is a JoinType which waits until all nodes share the same - // block height. - Blocks JoinType = iota - - // Mempools is a JoinType which blocks until all nodes have identical - // mempool. - Mempools -) - -// JoinNodes is a synchronization tool used to block until all passed nodes are -// fully synced with respect to an attribute. This function will block for a -// period of time, finally returning once all nodes are synced according to the -// passed JoinType. This function be used to ensure all active test -// harnesses are at a consistent state before proceeding to an assertion or -// check within rpc tests. -func JoinNodes(ctx context.Context, nodes []*Harness, joinType JoinType) error { - switch joinType { - case Blocks: - return syncBlocks(ctx, nodes) - case Mempools: - return syncMempools(ctx, nodes) - } - return nil -} - -// syncMempools blocks until all nodes have identical mempools. -func syncMempools(ctx context.Context, nodes []*Harness) error { - poolsMatch := false - - for !poolsMatch { - retry: - firstPool, err := nodes[0].Node.GetRawMempool(ctx, dcrdtypes.GRMAll) - if err != nil { - return err - } - - // If all nodes have an identical mempool with respect to the - // first node, then we're done. Otherwise, drop back to the top - // of the loop and retry after a short wait period. - for _, node := range nodes[1:] { - nodePool, err := node.Node.GetRawMempool(ctx, dcrdtypes.GRMAll) - if err != nil { - return err - } - - if !reflect.DeepEqual(firstPool, nodePool) { - time.Sleep(time.Millisecond * 100) - goto retry - } - } - - poolsMatch = true - } - - return nil -} - -// syncBlocks blocks until all nodes report the same block height. -func syncBlocks(ctx context.Context, nodes []*Harness) error { - blocksMatch := false - - for !blocksMatch { - retry: - blockHeights := make(map[int64]struct{}) - - for _, node := range nodes { - blockHeight, err := node.Node.GetBlockCount(ctx) - if err != nil { - return err - } - - blockHeights[blockHeight] = struct{}{} - if len(blockHeights) > 1 { - time.Sleep(time.Millisecond * 100) - goto retry - } - } - - blocksMatch = true - } - - return nil -} - -// ConnectNode establishes a new peer-to-peer connection between the "from" -// harness and the "to" harness. The connection made is flagged as persistent, -// therefore in the case of disconnects, "from" will attempt to reestablish a -// connection to the "to" harness. -func ConnectNode(ctx context.Context, from *Harness, to *Harness) error { - tracef(from.t, "ConnectNode start") - defer tracef(from.t, "ConnectNode end") - - peerInfo, err := from.Node.GetPeerInfo(ctx) - if err != nil { - return err - } - numPeers := len(peerInfo) - tracef(from.t, "ConnectNode numPeers: %v", numPeers) - - targetAddr := to.node.config.listen - if err := from.Node.AddNode(ctx, targetAddr, rpcclient.ANAdd); err != nil { - return err - } - tracef(from.t, "ConnectNode targetAddr: %v", targetAddr) - - // Block until a new connection has been established. - peerInfo, err = from.Node.GetPeerInfo(ctx) - if err != nil { - return err - } - tracef(from.t, "ConnectNode peerInfo: %v", peerInfo) - for len(peerInfo) <= numPeers { - peerInfo, err = from.Node.GetPeerInfo(ctx) - if err != nil { - return err - } - } - tracef(from.t, "ConnectNode len(peerInfo): %v", len(peerInfo)) - - return nil -} - -// RemoveNode removes the peer-to-peer connection between the "from" harness and -// the "to" harness. The connection is only removed in this direction, therefore -// if the reverse connection exists, the nodes may still be connected. -// -// This function returns an error if the nodes were not previously connected. -func RemoveNode(ctx context.Context, from *Harness, to *Harness) error { - targetAddr := to.node.config.listen - if err := from.Node.AddNode(ctx, targetAddr, rpcclient.ANRemove); err != nil { - // AddNode(..., ANRemove) returns an error if the peer is not found - return err - } - - // Block until this particular connection has been dropped. - for { - peerInfo, err := from.Node.GetPeerInfo(ctx) - if err != nil { - return err - } - for _, p := range peerInfo { - if p.Addr == targetAddr { - // Nodes still connected. Skip and re-fetch the list of nodes. - continue - } - } - - // If this point is reached, then the nodes are not connected anymore. - break - } - - return nil -} - -// NodesConnected verifies whether there is a connection via the p2p interface -// between the specified nodes. If allowReverse is true, connectivity is also -// checked in the reverse direction (to->from). -func NodesConnected(ctx context.Context, from, to *Harness, allowReverse bool) (bool, error) { - peerInfo, err := from.Node.GetPeerInfo(ctx) - if err != nil { - return false, err - } - - targetAddr := to.node.config.listen - for _, p := range peerInfo { - if p.Addr == targetAddr { - return true, nil - } - } - - if !allowReverse { - return false, nil - } - - // Check in the reverse direction. - peerInfo, err = to.Node.GetPeerInfo(ctx) - if err != nil { - return false, err - } - - targetAddr = from.node.config.listen - for _, p := range peerInfo { - if p.Addr == targetAddr { - return true, nil - } - } - - return false, nil -} - -// TearDownAll tears down all active test harnesses. -// XXX harness.TearDown() can hang with mutex held. -func TearDownAll() error { - harnessStateMtx.Lock() - defer harnessStateMtx.Unlock() - - for _, harness := range testInstances { - if err := harness.TearDown(); err != nil { - return err - } - } - - return nil -} - -// ActiveHarnesses returns a slice of all currently active test harnesses. A -// test harness if considered "active" if it has been created, but not yet torn -// down. -// XXX this is dumb because whatever happens after this call is racing over the -// Harness pointers. -func ActiveHarnesses() []*Harness { - harnessStateMtx.RLock() - defer harnessStateMtx.RUnlock() - - activeNodes := make([]*Harness, 0, len(testInstances)) - for _, harness := range testInstances { - activeNodes = append(activeNodes, harness) - } - - return activeNodes -} - -// PanicAll tears down all active test harnesses. -// XXX We ignore the mutex because it is *hopefully* locked when this is -// called. -func PanicAll(t *testing.T) { - if runtime.GOOS == "windows" { - t.Logf("sigabort not supported") - return - } - - for _, harness := range testInstances { - // This is a little wonky but works. - t.Logf("========================================================") - t.Logf("Aborting: %v", harness.node.pid) - err := harness.node.cmd.Process.Signal(syscall.SIGABRT) - if err != nil { - t.Logf("abort: %v", err) - } - - // Allows for process to dump - time.Sleep(2 * time.Second) - } -} diff --git a/rpctest/votingwallet.go b/rpctest/votingwallet.go deleted file mode 100644 index 7b19544e20..0000000000 --- a/rpctest/votingwallet.go +++ /dev/null @@ -1,642 +0,0 @@ -// Copyright (c) 2019-2022 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -package rpctest - -import ( - "context" - "fmt" - "math" - "strings" - "time" - - "github.com/decred/dcrd/blockchain/stake/v5" - "github.com/decred/dcrd/blockchain/standalone/v2" - "github.com/decred/dcrd/chaincfg/chainhash" - "github.com/decred/dcrd/chaincfg/v3" - "github.com/decred/dcrd/dcrec" - "github.com/decred/dcrd/dcrec/secp256k1/v4" - "github.com/decred/dcrd/dcrutil/v4" - dcrdtypes "github.com/decred/dcrd/rpc/jsonrpc/types/v4" - "github.com/decred/dcrd/rpcclient/v8" - "github.com/decred/dcrd/txscript/v4" - "github.com/decred/dcrd/txscript/v4/sign" - "github.com/decred/dcrd/txscript/v4/stdaddr" - "github.com/decred/dcrd/wire" -) - -var ( - // feeRate used when sending voting wallet transactions. - feeRate = dcrutil.Amount(1e4) - - // hardcodedPrivateKey used for all signing operations. - hardcodedPrivateKey = []byte{ - 0x79, 0xa6, 0x1a, 0xdb, 0xc6, 0xe5, 0xa2, 0xe1, - 0x39, 0xd2, 0x71, 0x3a, 0x54, 0x6e, 0xc7, 0xc8, - 0x75, 0x63, 0x2e, 0x75, 0xf1, 0xdf, 0x9c, 0x3f, - 0xa6, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - } - - // nullPay2SSTXChange is the pkscript used on sstxchange outputs of the - // tickets purchased by the voting wallet. This sends all change into a - // null address, effectively discarding it. - nullPay2SSTXChange = []byte{ - 0xbd, 0xa9, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, - } - - // stakebaseOutPoint is the outpoint that needs to be used in stakebase - // inputs of vote transactions. - stakebaseOutPoint = wire.OutPoint{Index: math.MaxUint32} - - // commitAmountMultiplier is a multiplier for the minimum stake difficulty, - // used to fund inputs used in purchasing tickets. This needs to be high - // enough that (minimumStakeDifficulty*commitAmountMultiplier) - - // minimumStakeDifficulty is greater than the dust limit and will allow the - // ticket to be relayed on the network. - commitAmountMultiplier = int64(4) -) - -type blockConnectedNtfn struct { - blockHeader []byte - transactions [][]byte -} - -type winningTicketsNtfn struct { - blockHash *chainhash.Hash - blockHeight int64 - winningTickets []*chainhash.Hash -} - -type ticketInfo struct { - ticketPrice int64 -} - -type utxoInfo struct { - outpoint wire.OutPoint - amount int64 -} - -// VotingWallet stores the state for a simulated voting wallet. Once it is -// started, it will receive notifications from the associated harness, purchase -// tickets and vote on blocks as necessary to keep the chain going. -// -// This currently only implements the bare minimum requirements for maintaining -// a functioning voting wallet and does not handle reorgs, multiple voting and -// ticket buying wallets, setting vote bits, expired/missed votes, etc. -// -// All operations (after initial funding) are done solely via stake -// transactions, so no additional regular transactions are published. This is -// ideal for use in test suites that require a large (greater than SVH) number -// of blocks. -type VotingWallet struct { - hn *Harness - privateKey []byte - address stdaddr.Address - c *rpcclient.Client - - blockConnectedNtfnChan chan blockConnectedNtfn - winningTicketsNtfnChan chan winningTicketsNtfn - - p2sstxVer uint16 - p2sstx []byte - commitScriptVer uint16 - commitScript []byte - p2pkh []byte - p2pkhVer uint16 - voteScriptVer uint16 - voteScript []byte - voteRetScriptVer uint16 - voteRetScript []byte - - errorReporter func(error) - - // miner is a function responsible for generating new blocks. If - // specified, then this function is used instead of directly calling - // the underlying harness' Generate(). - miner func(context.Context, uint32) ([]*chainhash.Hash, error) - - subsidyCache *standalone.SubsidyCache - - // utxos are the unspent outpoints not yet locked into a ticket. - utxos []utxoInfo - - // tickets map the outstanding unspent tickets - tickets map[chainhash.Hash]ticketInfo - - // maturingVotes tracks the votes maturing at each (future) block height, - // which will be available for purchasing new tickets. - maturingVotes map[int64][]utxoInfo - - // tspends to vote for when generating votes. - tspendVotes []*stake.TreasuryVoteTuple - - // Limit the total number of votes to that. - limitNbVotes int -} - -// NewVotingWallet creates a new minimal voting wallet for the given harness. -// This wallet should be able to maintain the chain generated by the miner node -// of the harness working after it has passed SVH (Stake Validation Height) by -// continuously buying tickets and voting on them. -func NewVotingWallet(ctx context.Context, hn *Harness) (*VotingWallet, error) { - privKey := secp256k1.PrivKeyFromBytes(hardcodedPrivateKey) - serPub := privKey.PubKey().SerializeCompressed() - h160 := stdaddr.Hash160(serPub) - addr, err := stdaddr.NewAddressPubKeyHashEcdsaSecp256k1V0(h160, hn.ActiveNet) - if err != nil { - return nil, fmt.Errorf("unable to generate address for pubkey: %v", err) - } - - p2sstxVer, p2sstx := addr.VotingRightsScript() - p2pkhVer, p2pkh := addr.PaymentScript() - - commitAmount := hn.ActiveNet.MinimumStakeDiff * commitAmountMultiplier - const voteFeeLimit = 0 - const revokeFeeLimit = 16777216 - commitScriptVer, commitScript := addr.RewardCommitmentScript(commitAmount, - voteFeeLimit, revokeFeeLimit) - - voteScriptVer := uint16(0) - voteScript, err := txscript.GenerateSSGenVotes(0x0001) - if err != nil { - return nil, fmt.Errorf("unable to prepare vote script: %v", err) - } - voteReturnScriptVer, voteReturnScript := addr.PayVoteCommitmentScript() - - // Hints for the initial sizing of the tickets and maturing votes maps. - // Given we have a deterministic purchase process, this should allow us to - // size these maps only once at setup time. - hintTicketsCap := requiredTicketCount(hn.ActiveNet) - hintMaturingVotesCap := int(hn.ActiveNet.CoinbaseMaturity) - - // Buffer length for notification channels. As long as we don't get - // notifications faster than this, we should be fine. - bufferLen := 20 - - w := &VotingWallet{ - hn: hn, - privateKey: hardcodedPrivateKey, - address: addr, - p2sstxVer: p2sstxVer, - p2sstx: p2sstx, - p2pkhVer: p2pkhVer, - p2pkh: p2pkh, - commitScriptVer: commitScriptVer, - commitScript: commitScript, - voteScriptVer: voteScriptVer, - voteScript: voteScript, - voteRetScriptVer: voteReturnScriptVer, - voteRetScript: voteReturnScript, - subsidyCache: standalone.NewSubsidyCache(hn.ActiveNet), - limitNbVotes: int(hn.ActiveNet.TicketsPerBlock), - tickets: make(map[chainhash.Hash]ticketInfo, hintTicketsCap), - maturingVotes: make(map[int64][]utxoInfo, hintMaturingVotesCap), - blockConnectedNtfnChan: make(chan blockConnectedNtfn, bufferLen), - winningTicketsNtfnChan: make(chan winningTicketsNtfn, bufferLen), - } - - handlers := &rpcclient.NotificationHandlers{ - OnBlockConnected: w.onBlockConnected, - OnWinningTickets: w.onWinningTickets, - } - - rpcConf := hn.RPCConfig() - for i := 0; i < 20; i++ { - if w.c, err = rpcclient.New(&rpcConf, handlers); err != nil { - time.Sleep(time.Duration(i) * 50 * time.Millisecond) - continue - } - break - } - if w.c == nil { - return nil, fmt.Errorf("unable to connect to miner node") - } - - if err = w.c.NotifyBlocks(ctx); err != nil { - return nil, fmt.Errorf("unable to subscribe to block notifications: %v", err) - } - if err = w.c.NotifyWinningTickets(ctx); err != nil { - return nil, fmt.Errorf("unable to subscribe to winning tickets notification: %v", err) - } - - return w, nil -} - -// Start stars the goroutines necessary for this voting wallet to function. -func (w *VotingWallet) Start(ctx context.Context) error { - value := w.hn.ActiveNet.MinimumStakeDiff * commitAmountMultiplier - - // Create enough outputs to perform the voting, each with twice the amount - // of the minimum ticket price. - // - // The number of required outputs is twice the coinbase maturity, since - // we buy TicketsPerBlock tickets per block, starting at SVH-TM. At SVH, - // TicketsPerBlock tickets will mature and be selected to vote (given they - // are the only ones in the live ticket pool). - // - // Every following block we purchase the same amount of tickets, such that - // TicketsPerBlock are maturing. - nbOutputs := requiredTicketCount(w.hn.ActiveNet) - outputs := make([]*wire.TxOut, nbOutputs) - - for i := 0; i < nbOutputs; i++ { - outputs[i] = wire.NewTxOut(value, w.p2pkh) - } - - txid, err := w.hn.SendOutputs(ctx, outputs, feeRate) - if err != nil { - return fmt.Errorf("unable to fund voting wallet: %v", err) - } - - // Build the outstanding utxos for ticket buying. These will be the first - // nbOutputs outputs from txid (assuming the SendOutputs() from above always - // sends the change last). - utxos := make([]utxoInfo, nbOutputs) - for i := 0; i < nbOutputs; i++ { - utxos[i] = utxoInfo{ - outpoint: wire.OutPoint{Hash: *txid, Index: uint32(i), Tree: wire.TxTreeRegular}, - amount: value, - } - } - w.utxos = utxos - - go w.handleNotifications(ctx) - - return nil -} - -// SetErrorReporting allows users of the voting wallet to specify a function -// that will be called whenever an error happens while purchasing tickets or -// generating votes. -func (w *VotingWallet) SetErrorReporting(f func(err error)) { - w.errorReporter = f -} - -// SetMiner allows users of the voting wallet to specify a function that will -// be used to mine new blocks instead of using the regular Generate function of -// the configured rpcclient. -// -// This allows callers to use a custom function to generate blocks, such as one -// that allows faster mining in simnet. -func (w *VotingWallet) SetMiner(f func(context.Context, uint32) ([]*chainhash.Hash, error)) { - w.miner = f -} - -// LimitNbVotes limits the number of votes issued by the voting wallet to the -// given amount, which is useful for testing scenarios where less than the -// total number of votes per block are cast in the network. -// -// Note that due to limitations in the current implementation of the voting -// wallet, you can only reduce this amount (never increase it) and simnet -// voting will stop once CoinbaseMaturity blocks have passed (so this needs to -// be used only at the end of a test run). -func (w *VotingWallet) LimitNbVotes(newLimit int) error { - if newLimit < 0 { - return fmt.Errorf("cannot use negative number of votes") - } - - if newLimit > w.limitNbVotes { - return fmt.Errorf("cannot increase number of votes") - } - - w.limitNbVotes = newLimit - return nil -} - -// GenerateBlocks generates blocks while ensuring the chain will continue past -// SVH indefinitely. This will generate a block then wait for the votes from -// this wallet to be sent and tickets to be purchased before either generating -// the next block or returning. -// -// This function will either return the hashes of the generated blocks or an -// error if, after generating a candidate block, votes and tickets aren't -// submitted in a timely fashion. -func (w *VotingWallet) GenerateBlocks(ctx context.Context, nb uint32) ([]*chainhash.Hash, error) { - _, startHeight, err := w.c.GetBestBlock(ctx) - if err != nil { - return nil, err - } - - nbVotes := w.limitNbVotes - nbTickets := int(w.hn.ActiveNet.TicketsPerBlock) - hashes := make([]*chainhash.Hash, nb) - - miner := w.c.Generate - if w.miner != nil { - miner = w.miner - } - - for i := uint32(0); i < nb; i++ { - // genHeight is the height of the _next_ block (the one that will be - // generated once we call generate()). - genHeight := startHeight + int64(i) + 1 - - h, err := miner(ctx, 1) - if err != nil { - return nil, fmt.Errorf("unable to generate block at height %d: %v", - genHeight, err) - } - hashes[i] = h[0] - - needsVotes := genHeight >= (w.hn.ActiveNet.StakeValidationHeight - 1) - needsTickets := genHeight >= ticketPurchaseStartHeight(w.hn.ActiveNet) - - timeout := time.After(time.Second * 5) - testTimeout := time.After(time.Millisecond * 2) - gotAllReqs := !needsVotes && !needsTickets - for !gotAllReqs { - select { - case <-timeout: - mempoolTickets, _ := w.c.GetRawMempool(ctx, dcrdtypes.GRMTickets) - mempoolVotes, _ := w.c.GetRawMempool(ctx, dcrdtypes.GRMVotes) - var notGot []string - if len(mempoolVotes) != nbVotes { - notGot = append(notGot, "votes") - } - if len(mempoolTickets) != nbTickets { - notGot = append(notGot, "tickets") - } - - return nil, fmt.Errorf("timeout waiting for %s "+ - "at height %d", strings.Join(notGot, ","), genHeight) - case <-ctx.Done(): - return nil, fmt.Errorf("wallet is stopping") - case <-testTimeout: - mempoolTickets, _ := w.c.GetRawMempool(ctx, dcrdtypes.GRMTickets) - mempoolVotes, _ := w.c.GetRawMempool(ctx, dcrdtypes.GRMVotes) - - gotAllReqs = (!needsTickets || (len(mempoolTickets) >= nbVotes)) && - (!needsVotes || (len(mempoolVotes) >= nbVotes)) - testTimeout = time.After(time.Millisecond * 2) - } - } - } - - return hashes, nil -} - -func (w *VotingWallet) logError(err error) { - if w.errorReporter != nil { - w.errorReporter(err) - } -} - -func (w *VotingWallet) onBlockConnected(blockHeader []byte, transactions [][]byte) { - w.blockConnectedNtfnChan <- blockConnectedNtfn{ - blockHeader: blockHeader, - transactions: transactions, - } -} - -// newTxOut returns a new transaction output with the given parameters. -func newTxOut(amount int64, pkScriptVer uint16, pkScript []byte) *wire.TxOut { - return &wire.TxOut{ - Value: amount, - Version: pkScriptVer, - PkScript: pkScript, - } -} - -func (w *VotingWallet) handleBlockConnectedNtfn(ctx context.Context, ntfn *blockConnectedNtfn) { - var header wire.BlockHeader - err := header.FromBytes(ntfn.blockHeader) - if err != nil { - w.logError(err) - return - } - - blockHeight := int64(header.Height) - purchaseHeight := ticketPurchaseStartHeight(w.hn.ActiveNet) - if blockHeight < purchaseHeight { - // No need to purchase tickets yet. - return - } - - // Purchase TicketsPerBlock tickets. - nbTickets := int(w.hn.ActiveNet.TicketsPerBlock) - if len(w.utxos) < nbTickets { - w.logError(fmt.Errorf("number of available utxos (%d) less than "+ - "number of tickets to purchase (%d)", len(w.utxos), nbTickets)) - return - } - - // Use a slightly higher ticket price than the current minimum, to allow us - // to ignore stakediff changes at exactly the next block (where purchasing - // at the current value would cause our tickets to be rejected). - ticketPrice := header.SBits + (header.SBits / 6) - commitAmount := w.hn.ActiveNet.MinimumStakeDiff * commitAmountMultiplier - - // Select utxos to use and mark them used. - utxos := make([]utxoInfo, nbTickets) - copy(utxos, w.utxos[len(w.utxos)-nbTickets:]) - w.utxos = w.utxos[:len(w.utxos)-nbTickets] - - tickets := make([]wire.MsgTx, nbTickets) - for i := 0; i < nbTickets; i++ { - changeAmount := utxos[i].amount - commitAmount - - t := &tickets[i] - t.Version = wire.TxVersion - t.AddTxIn(wire.NewTxIn(&utxos[i].outpoint, wire.NullValueIn, nil)) - t.AddTxOut(newTxOut(ticketPrice, w.p2sstxVer, w.p2sstx)) - t.AddTxOut(newTxOut(0, w.commitScriptVer, w.commitScript)) - t.AddTxOut(wire.NewTxOut(changeAmount, nullPay2SSTXChange)) - - prevScript := w.p2pkh - if utxos[i].outpoint.Tree == wire.TxTreeStake { - prevScript = w.voteRetScript - } - - sig, err := sign.SignatureScript(t, 0, prevScript, txscript.SigHashAll, - w.privateKey, dcrec.STEcdsaSecp256k1, true) - if err != nil { - w.logError(fmt.Errorf("failed to sign ticket tx: %v", err)) - return - } - t.TxIn[0].SignatureScript = sig - } - - // Submit all tickets to the network. - promises := make([]*rpcclient.FutureSendRawTransactionResult, nbTickets) - for i := 0; i < nbTickets; i++ { - promises[i] = w.c.SendRawTransactionAsync(ctx, &tickets[i], true) - } - - for i := 0; i < nbTickets; i++ { - h, err := promises[i].Receive() - if err != nil { - w.logError(fmt.Errorf("unable to send ticket tx: %v", err)) - return - } - - w.tickets[*h] = ticketInfo{ - ticketPrice: ticketPrice, - } - } - - // Mark all maturing votes (if any) as available for spending. - if maturingVotes, has := w.maturingVotes[blockHeight]; has { - w.utxos = append(w.utxos, maturingVotes...) - delete(w.maturingVotes, blockHeight) - } -} - -func (w *VotingWallet) onWinningTickets(blockHash *chainhash.Hash, blockHeight int64, - winningTickets []*chainhash.Hash) { - - w.winningTicketsNtfnChan <- winningTicketsNtfn{ - blockHash: blockHash, - blockHeight: blockHeight, - winningTickets: winningTickets, - } -} - -func (w *VotingWallet) handleWinningTicketsNtfn(ctx context.Context, ntfn *winningTicketsNtfn) { - blockRefScript, err := txscript.GenerateSSGenBlockRef(*ntfn.blockHash, - uint32(ntfn.blockHeight)) - if err != nil { - w.logError(fmt.Errorf("unable to generate ssgen block ref: %v", err)) - return - } - - // Always consider the subsidy split enabled since the test voting wallet - // is only used with simnet where the agenda is always active. - const isSubsidySplitEnabled = true - stakebaseValue := w.subsidyCache.CalcStakeVoteSubsidyV2(ntfn.blockHeight, - isSubsidySplitEnabled) - - // Create the votes. nbVotes is the number of tickets from the wallet that - // voted. - votes := make([]wire.MsgTx, w.limitNbVotes) - nbVotes := 0 - - var ( - ticket ticketInfo - myTicket bool - ) - - for _, wt := range ntfn.winningTickets { - if ticket, myTicket = w.tickets[*wt]; !myTicket { - continue - } - - voteRetValue := ticket.ticketPrice + stakebaseValue - - // Create a corresponding vote transaction. - vote := &votes[nbVotes] - nbVotes++ - vote.Version = wire.TxVersion - vote.AddTxIn(wire.NewTxIn( - &stakebaseOutPoint, stakebaseValue, w.hn.ActiveNet.StakeBaseSigScript, - )) - vote.AddTxIn(wire.NewTxIn( - wire.NewOutPoint(wt, 0, wire.TxTreeStake), - wire.NullValueIn, nil, - )) - vote.AddTxOut(wire.NewTxOut(0, blockRefScript)) - vote.AddTxOut(newTxOut(0, w.voteScriptVer, w.voteScript)) - vote.AddTxOut(newTxOut(voteRetValue, w.voteRetScriptVer, w.voteRetScript)) - - // If there are tspends to vote for, create an additional - // output. - if len(w.tspendVotes) > 0 { - n := len(w.tspendVotes) - opReturnLen := 2 + chainhash.HashSize*n + n - opReturnData := make([]byte, 0, opReturnLen) - opReturnData = append(opReturnData, 'T', 'V') - for _, v := range w.tspendVotes { - opReturnData = append(opReturnData, v.Hash[:]...) - opReturnData = append(opReturnData, byte(v.Vote)) - } - var bldr txscript.ScriptBuilder - bldr.AddOp(txscript.OP_RETURN) - bldr.AddData(opReturnData) - voteScript, err := bldr.Script() - if err != nil { - w.logError(fmt.Errorf("unable to construct vote script: %v", err)) - return - } - vote.AddTxOut(wire.NewTxOut(0, voteScript)) - vote.Version = wire.TxVersionTreasury - } - - sig, err := sign.SignatureScript(vote, 1, w.p2sstx, txscript.SigHashAll, - w.privateKey, dcrec.STEcdsaSecp256k1, true) - if err != nil { - w.logError(fmt.Errorf("failed to sign ticket tx: %v", err)) - return - } - vote.TxIn[1].SignatureScript = sig - - err = stake.CheckSSGen(vote) - if err != nil { - w.logError(fmt.Errorf("transaction is not a valid vote: %v", err)) - return - } - - // Limit the total number of issued votes if requested. - if nbVotes >= w.limitNbVotes { - break - } - } - - newUtxos := make([]utxoInfo, nbVotes) - - // Publish the votes. - promises := make([]*rpcclient.FutureSendRawTransactionResult, nbVotes) - for i := 0; i < nbVotes; i++ { - promises[i] = w.c.SendRawTransactionAsync(ctx, &votes[i], true) - } - for i := 0; i < nbVotes; i++ { - h, err := promises[i].Receive() - if err != nil { - w.logError(fmt.Errorf("unable to send vote tx: %v", err)) - return - } - newUtxos[i] = utxoInfo{ - outpoint: wire.OutPoint{Hash: *h, Index: 2, Tree: wire.TxTreeStake}, - amount: votes[i].TxOut[2].Value, - } - } - - maturingHeight := ntfn.blockHeight + int64(w.hn.ActiveNet.CoinbaseMaturity) - w.maturingVotes[maturingHeight] = newUtxos -} - -// handleNotifications handles all notifications. This blocks until the passed -// context is cancelled and MUST be run on a separate goroutine. -func (w *VotingWallet) handleNotifications(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case ntfn := <-w.blockConnectedNtfnChan: - w.handleBlockConnectedNtfn(ctx, &ntfn) - case ntfn := <-w.winningTicketsNtfnChan: - w.handleWinningTicketsNtfn(ctx, &ntfn) - } - } -} - -// VoteForTSpends sets the wallet to vote for the provided tspends when -// creating vote transactions. -func (w *VotingWallet) VoteForTSpends(votes []*stake.TreasuryVoteTuple) { - w.tspendVotes = votes -} - -// ticketPurchaseStartHeight returns the block height where ticket buying -// needs to start so that there will be enough mature tickets for voting -// once SVH is reached. -func ticketPurchaseStartHeight(net *chaincfg.Params) int64 { - return net.StakeValidationHeight - int64(net.TicketMaturity) - 2 -} - -// requiredTicketCount returns the number of tickets required to maintain the -// network functioning past SVH, assuming only as many tickets as votes will -// be purchased at every block. -func requiredTicketCount(net *chaincfg.Params) int { - return int((net.CoinbaseMaturity + net.TicketMaturity + 2) * net.TicketsPerBlock) -} diff --git a/rpctest/votingwallet_test.go b/rpctest/votingwallet_test.go deleted file mode 100644 index b8cd68b9d2..0000000000 --- a/rpctest/votingwallet_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) 2019-2022 The Decred developers -// Use of this source code is governed by an ISC -// license that can be found in the LICENSE file. - -// This file is ignored during the regular tests due to the following build tag. -//go:build rpctest -// +build rpctest - -package rpctest - -import ( - "context" - "os" - "testing" - - "github.com/decred/dcrd/chaincfg/v3" - "github.com/decred/dcrd/rpcclient/v8" -) - -// testCanPassSVH tests whether the wallet can maintain the chain going past SVH -// (stake validation height). -func testCanPassSVH(ctx context.Context, t *testing.T, vw *VotingWallet) { - - // Store the current (starting) height. - _, startHeight, err := vw.hn.Node.GetBestBlock(ctx) - if err != nil { - t.Fatalf("unable to obtain best block: %v", err) - } - - // Generate enough blocks to get us past SVH. - targetHeight := vw.hn.ActiveNet.StakeValidationHeight * 2 - if targetHeight < startHeight { - targetHeight = startHeight + 10 - } - - for h := startHeight + 1; h <= targetHeight; h++ { - // Try and generate a block at this height. - _, err := vw.GenerateBlocks(ctx, 1) - if err != nil { - t.Fatal(err) - } - - // Verify whether a block was actually generated (after SVH, this will - // imply the wallet was successfully voting on blocks). - _, actualHeight, err := vw.hn.Node.GetBestBlock(ctx) - if err != nil { - t.Fatalf("unable to obtain best block: %v", err) - } - if actualHeight != h { - t.Fatalf("block was not mined at height %d (got %d as best height)", - h, actualHeight) - } - } - - t.Logf("Generated up to block %d\n", targetHeight) -} - -func TestMinimalVotingWallet(t *testing.T) { - // Skip tests when running with -short - if testing.Short() { - t.Skip("Skipping minimal voting wallet in short mode") - } - - var handlers *rpcclient.NotificationHandlers - net := chaincfg.SimNetParams() - - logDir := "./dcrdlogs" - extraArgs := []string{ - "--debuglevel=debug", - } - - info, err := os.Stat(logDir) - if err != nil && !os.IsNotExist(err) { - t.Fatalf("error stating log dir: %v", err) - } - if info != nil { - if !info.IsDir() { - t.Fatalf("logdir (%s) is not a dir", logDir) - } - err = os.RemoveAll(logDir) - if err != nil { - t.Fatalf("error removing logdir: %v", err) - } - } - - hn, err := New(t, net, handlers, extraArgs) - if err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - err = hn.SetUp(ctx, true, 8) - if err != nil { - t.Fatal(err) - } - defer hn.TearDown() - - type testCase struct { - name string - f func(ctx context.Context, t *testing.T, vw *VotingWallet) - } - - testCases := []testCase{ - { - name: "can get past SVH", - f: testCanPassSVH, - }, - } - - for _, tc := range testCases { - var vw *VotingWallet - success := t.Run(tc.name, func(t1 *testing.T) { - vw, err = NewVotingWallet(ctx, hn) - if err != nil { - t1.Fatalf("unable to create voting wallet for test: %v", err) - } - - err = vw.Start(ctx) - if err != nil { - t1.Fatalf("unable to setup voting wallet: %v", err) - } - - vw.SetErrorReporting(func(vwerr error) { - t.Fatalf("voting wallet errored: %v", vwerr) - }) - - tc.f(ctx, t1, vw) - }) - - if vw != nil { - vw.SetErrorReporting(nil) - cancel() - } - - if !success { - break - } - } - - err = hn.TearDown() - if err != nil { - t.Fatalf("errored while tearing down test harness: %v", err) - } -}