diff --git a/.gitignore b/.gitignore index 5c29ccd9b5..1e5a40ca27 100644 --- a/.gitignore +++ b/.gitignore @@ -43,6 +43,8 @@ btcutil/psbt/coverage.txt # vim *.swp +*.swo +/.vim # Binaries produced by "make build" /addblock diff --git a/CHANGES b/CHANGES index fd59a88672..4e359222b8 100644 --- a/CHANGES +++ b/CHANGES @@ -782,7 +782,7 @@ Changes in 0.8.0-beta (Sun May 25 2014) recent reference client changes (https://github.com/conformal/btcd/issues/100) - Raise the maximum signature script size to support standard 15-of-15 - multi-signature pay-to-sript-hash transactions with compressed pubkeys + multi-signature pay-to-script-hash transactions with compressed pubkeys to remain compatible with the reference client (https://github.com/conformal/btcd/issues/128) - Reduce max bytes allowed for a standard nulldata transaction to 40 for diff --git a/Dockerfile b/Dockerfile index 5ed3e63c10..a715e89ba6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ # # docker build . -t yourregistry/btcd # -# You can use the following command to buid an arm64v8 container: +# You can use the following command to build an arm64v8 container: # # docker build . -t yourregistry/btcd --build-arg ARCH=arm64v8 # diff --git a/Makefile b/Makefile index 5bfb1aa6a5..e17e6446d7 100644 --- a/Makefile +++ b/Makefile @@ -39,8 +39,10 @@ define print echo $(GREEN)$1$(NC) endef +#? default: Run `make build` default: build +#? all: Run `make build` and `make check` all: build check # ============ @@ -55,6 +57,7 @@ $(GOACC_BIN): @$(call print, "Fetching go-acc") $(DEPGET) $(GOACC_PKG)@$(GOACC_COMMIT) +#? goimports: Install goimports goimports: @$(call print, "Installing goimports.") $(DEPGET) $(GOIMPORTS_PKG) @@ -63,6 +66,7 @@ goimports: # INSTALLATION # ============ +#? build: Build all binaries, place them in project directory build: @$(call print, "Building all binaries") $(GOBUILD) $(PKG) @@ -71,6 +75,7 @@ build: $(GOBUILD) $(PKG)/cmd/findcheckpoint $(GOBUILD) $(PKG)/cmd/addblock +#? install: Install all binaries, place them in $GOPATH/bin install: @$(call print, "Installing all binaries") $(GOINSTALL) $(PKG) @@ -79,6 +84,7 @@ install: $(GOINSTALL) $(PKG)/cmd/findcheckpoint $(GOINSTALL) $(PKG)/cmd/addblock +#? release-install: Install btcd and btcctl release binaries, place them in $GOPATH/bin release-install: @$(call print, "Installing btcd and btcctl release binaries") env CGO_ENABLED=0 $(GOINSTALL) -trimpath -ldflags="-s -w -buildid=" $(PKG) @@ -88,8 +94,10 @@ release-install: # TESTING # ======= +#? check: Run `make unit` check: unit +#? unit: Run unit tests unit: @$(call print, "Running unit tests.") $(GOTEST_DEV) ./... -test.timeout=20m @@ -97,6 +105,7 @@ unit: cd btcutil; $(GOTEST_DEV) ./... -test.timeout=20m cd btcutil/psbt; $(GOTEST_DEV) ./... -test.timeout=20m +#? unit-cover: Run unit coverage tests unit-cover: $(GOACC_BIN) @$(call print, "Running unit coverage tests.") $(GOACC_BIN) ./... @@ -109,6 +118,7 @@ unit-cover: $(GOACC_BIN) cd btcutil/psbt; $(GOACC_BIN) ./... +#? unit-race: Run unit race tests unit-race: @$(call print, "Running unit race tests.") env CGO_ENABLED=1 GORACE="history_size=7 halt_on_errors=1" $(GOTEST) -race -test.timeout=20m ./... @@ -120,19 +130,27 @@ unit-race: # UTILITIES # ========= +#? fmt: Fix imports and formatting source fmt: goimports @$(call print, "Fixing imports.") goimports -w $(GOFILES_NOVENDOR) @$(call print, "Formatting source.") gofmt -l -w -s $(GOFILES_NOVENDOR) +#? lint: Lint source lint: $(LINT_BIN) @$(call print, "Linting source.") $(LINT) +#? clean: Clean source clean: @$(call print, "Cleaning source.$(NC)") $(RM) coverage.txt btcec/coverage.txt btcutil/coverage.txt btcutil/psbt/coverage.txt + +#? tidy-module: Run 'go mod tidy' for all modules +tidy-module: + echo "Running 'go mod tidy' for all modules" + scripts/tidy_modules.sh .PHONY: all \ default \ @@ -144,3 +162,10 @@ clean: fmt \ lint \ clean + +#? help: Get more info on make commands +help: Makefile + @echo " Choose a command run in btcd:" + @sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /' + +.PHONY: help diff --git a/addrmgr/addrmanager_internal_test.go b/addrmgr/addrmanager_internal_test.go index 1d13f78e6e..38218b15f7 100644 --- a/addrmgr/addrmanager_internal_test.go +++ b/addrmgr/addrmanager_internal_test.go @@ -1,7 +1,6 @@ package addrmgr import ( - "io/ioutil" "math/rand" "net" "os" @@ -12,7 +11,7 @@ import ( ) // randAddr generates a *wire.NetAddressV2 backed by a random IPv4/IPv6 -// address. +// address. Some of the returned addresses may not be routable. func randAddr(t *testing.T) *wire.NetAddressV2 { t.Helper() @@ -40,6 +39,23 @@ func randAddr(t *testing.T) *wire.NetAddressV2 { ) } +// routableRandAddr generates a *wire.NetAddressV2 backed by a random IPv4/IPv6 +// address that is always routable. +func routableRandAddr(t *testing.T) *wire.NetAddressV2 { + t.Helper() + + var addr *wire.NetAddressV2 + + // If the address is not routable, try again. + routable := false + for !routable { + addr = randAddr(t) + routable = IsRoutable(addr) + } + + return addr +} + // assertAddr ensures that the two addresses match. The timestamp is not // checked as it does not affect uniquely identifying a specific address. func assertAddr(t *testing.T, got, expected *wire.NetAddressV2) { @@ -91,7 +107,7 @@ func TestAddrManagerSerialization(t *testing.T) { // We'll start by creating our address manager backed by a temporary // directory. - tempDir, err := ioutil.TempDir("", "addrmgr") + tempDir, err := os.MkdirTemp("", "addrmgr") if err != nil { t.Fatalf("unable to create temp dir: %v", err) } @@ -104,9 +120,9 @@ func TestAddrManagerSerialization(t *testing.T) { expectedAddrs := make(map[string]*wire.NetAddressV2, numAddrs) for i := 0; i < numAddrs; i++ { - addr := randAddr(t) + addr := routableRandAddr(t) expectedAddrs[NetAddressKey(addr)] = addr - addrMgr.AddAddress(addr, randAddr(t)) + addrMgr.AddAddress(addr, routableRandAddr(t)) } // Now that the addresses have been added, we should be able to retrieve @@ -131,7 +147,7 @@ func TestAddrManagerV1ToV2(t *testing.T) { // We'll start by creating our address manager backed by a temporary // directory. - tempDir, err := ioutil.TempDir("", "addrmgr") + tempDir, err := os.MkdirTemp("", "addrmgr") if err != nil { t.Fatalf("unable to create temp dir: %v", err) } @@ -149,9 +165,9 @@ func TestAddrManagerV1ToV2(t *testing.T) { expectedAddrs := make(map[string]*wire.NetAddressV2, numAddrs) for i := 0; i < numAddrs; i++ { - addr := randAddr(t) + addr := routableRandAddr(t) expectedAddrs[NetAddressKey(addr)] = addr - addrMgr.AddAddress(addr, randAddr(t)) + addrMgr.AddAddress(addr, routableRandAddr(t)) } // Then, we'll persist these addresses to disk and restart the address @@ -168,7 +184,7 @@ func TestAddrManagerV1ToV2(t *testing.T) { addrMgr.loadPeers() addrs := addrMgr.getAddresses() if len(addrs) != len(expectedAddrs) { - t.Fatalf("expected to find %d adddresses, found %d", + t.Fatalf("expected to find %d addresses, found %d", len(expectedAddrs), len(addrs)) } for _, addr := range addrs { diff --git a/blockchain/accept.go b/blockchain/accept.go index 935963148f..4adc2f6127 100644 --- a/blockchain/accept.go +++ b/blockchain/accept.go @@ -84,9 +84,11 @@ func (b *BlockChain) maybeAcceptBlock(block *btcutil.Block, flags BehaviorFlags) // Notify the caller that the new block was accepted into the block // chain. The caller would typically want to react by relaying the // inventory to other peers. - b.chainLock.Unlock() - b.sendNotification(NTBlockAccepted, block) - b.chainLock.Lock() + func() { + b.chainLock.Unlock() + defer b.chainLock.Lock() + b.sendNotification(NTBlockAccepted, block) + }() return isMainChain, nil } diff --git a/blockchain/blockindex.go b/blockchain/blockindex.go index ca3235f79f..5273cb488b 100644 --- a/blockchain/blockindex.go +++ b/blockchain/blockindex.go @@ -135,6 +135,20 @@ func newBlockNode(blockHeader *wire.BlockHeader, parent *blockNode) *blockNode { return &node } +// Equals compares all the fields of the block node except for the parent and +// ancestor and returns true if they're equal. +func (node *blockNode) Equals(other *blockNode) bool { + return node.hash == other.hash && + node.workSum.Cmp(other.workSum) == 0 && + node.height == other.height && + node.version == other.version && + node.bits == other.bits && + node.nonce == other.nonce && + node.timestamp == other.timestamp && + node.merkleRoot == other.merkleRoot && + node.status == other.status +} + // Header constructs a block header from the node and returns it. // // This function is safe for concurrent access. @@ -260,6 +274,28 @@ func (node *blockNode) RelativeAncestorCtx(distance int32) HeaderCtx { return ancestor } +// IsAncestor returns if the other node is an ancestor of this block node. +func (node *blockNode) IsAncestor(otherNode *blockNode) bool { + // Return early as false if the otherNode is nil. + if otherNode == nil { + return false + } + + ancestor := node.Ancestor(otherNode.height) + if ancestor == nil { + return false + } + + // If the otherNode has the same height as me, then the returned + // ancestor will be me. Return false since I'm not an ancestor of me. + if node.height == ancestor.height { + return false + } + + // Return true if the fetched ancestor is other node. + return ancestor.Equals(otherNode) +} + // RelativeAncestor returns the ancestor block node a relative 'distance' blocks // before this node. This is equivalent to calling Ancestor with the node's // height minus provided distance. diff --git a/blockchain/chain.go b/blockchain/chain.go index 60420022ac..952d0bc279 100644 --- a/blockchain/chain.go +++ b/blockchain/chain.go @@ -569,7 +569,7 @@ func (b *BlockChain) getReorganizeNodes(node *blockNode) (*list.List, *list.List // // This function MUST be called with the chain state lock held (for writes). func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, - view *UtxoViewpoint, stxos []SpentTxOut) error { + stxos []SpentTxOut) error { // Make sure it's extending the end of the best chain. prevHash := &block.MsgBlock().Header.PrevBlock @@ -611,18 +611,6 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, curTotalTxns+numTxns, CalcPastMedianTime(node), ) - // If a utxoviewpoint was passed in, we'll be writing that viewpoint - // directly to the database on disk. In order for the database to be - // consistent, we must flush the cache before writing the viewpoint. - if view != nil { - err = b.db.Update(func(dbTx database.Tx) error { - return b.utxoCache.flush(dbTx, FlushRequired, state) - }) - if err != nil { - return err - } - } - // Atomically insert info into the database. err = b.db.Update(func(dbTx database.Tx) error { // If the pruneTarget isn't 0, we should attempt to delete older blocks @@ -676,16 +664,6 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, return err } - // Update the utxo set using the state of the utxo view. This - // entails removing all of the utxos spent and adding the new - // ones created by the block. - // - // A nil viewpoint is a no-op. - err = dbPutUtxoView(dbTx, view) - if err != nil { - return err - } - // Update the transaction spend journal by adding a record for // the block that contains all txos spent by it. err = dbPutSpendJournalEntry(dbTx, block.Hash(), stxos) @@ -709,12 +687,6 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, return err } - // Prune fully spent entries and mark all entries in the view unmodified - // now that the modifications have been committed to the database. - if view != nil { - view.commit() - } - // This node is now the end of the best chain. b.bestChain.SetTip(node) @@ -730,9 +702,11 @@ func (b *BlockChain) connectBlock(node *blockNode, block *btcutil.Block, // Notify the caller that the block was connected to the main chain. // The caller would typically want to react with actions such as // updating wallets. - b.chainLock.Unlock() - b.sendNotification(NTBlockConnected, block) - b.chainLock.Lock() + func() { + b.chainLock.Unlock() + defer b.chainLock.Lock() + b.sendNotification(NTBlockConnected, block) + }() // Since we may have changed the UTXO cache, we make sure it didn't exceed its // maximum size. If we're pruned and have flushed already, this will be a no-op. @@ -796,6 +770,15 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view return err } + // Flush the cache on every disconnect. Since the code for + // reorganization modifies the database directly, the cache + // will be left in an inconsistent state if we don't flush it + // prior to the dbPutUtxoView that happens below. + err = b.utxoCache.flush(dbTx, FlushRequired, state) + if err != nil { + return err + } + // Update the utxo set using the state of the utxo view. This // entails restoring all of the utxos spent and removing the new // ones created by the block. @@ -853,9 +836,11 @@ func (b *BlockChain) disconnectBlock(node *blockNode, block *btcutil.Block, view // Notify the caller that the block was disconnected from the main // chain. The caller would typically want to react with actions such as // updating wallets. - b.chainLock.Unlock() - b.sendNotification(NTBlockDisconnected, block) - b.chainLock.Lock() + func() { + b.chainLock.Unlock() + defer b.chainLock.Lock() + b.sendNotification(NTBlockDisconnected, block) + }() return nil } @@ -880,30 +865,134 @@ func countSpentOutputs(block *btcutil.Block) int { // // This function may modify node statuses in the block index without flushing. // +// This function never leaves the utxo set in an inconsistent state for block +// disconnects. +// // This function MUST be called with the chain state lock held (for writes). func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error { - // Nothing to do if no reorganize nodes were provided. - if detachNodes.Len() == 0 && attachNodes.Len() == 0 { - return nil - } - - // The rest of the reorg depends on all STXOs already being in the database - // so we flush before reorg. - err := b.db.Update(func(dbTx database.Tx) error { - return b.utxoCache.flush(dbTx, FlushRequired, b.BestSnapshot()) - }) + // Check first that the detach and the attach nodes are valid and they + // pass verification. + detachBlocks, attachBlocks, detachSpentTxOuts, + err := b.verifyReorganizationValidity(detachNodes, attachNodes) if err != nil { return err } + // Track the old and new best chains heads. + tip := b.bestChain.Tip() + oldBest := tip + newBest := tip + + // Reset the view for the actual connection code below. This is + // required because the view was previously modified when checking if + // the reorg would be successful and the connection code requires the + // view to be valid from the viewpoint of each block being disconnected. + view := NewUtxoViewpoint() + view.SetBestHash(&b.bestChain.Tip().hash) + + // Disconnect blocks from the main chain. + for i, e := 0, detachNodes.Front(); e != nil; i, e = i+1, e.Next() { + n := e.Value.(*blockNode) + block := detachBlocks[i] + + // Load all of the utxos referenced by the block that aren't + // already in the view. + err := view.fetchInputUtxos(b.utxoCache, block) + if err != nil { + return err + } + + // Update the view to unspend all of the spent txos and remove + // the utxos created by the block. + err = view.disconnectTransactions( + b.db, block, detachSpentTxOuts[i], + ) + if err != nil { + return err + } + + // Update the database and chain state. The cache will be flushed + // here before the utxoview modifications happen to the database. + err = b.disconnectBlock(n, block, view) + if err != nil { + return err + } + + newBest = n.parent + } + + // Set the fork point only if there are nodes to attach since otherwise + // blocks are only being disconnected and thus there is no fork point. + var forkNode *blockNode + if attachNodes.Len() > 0 { + forkNode = newBest + } + + // Connect the new best chain blocks using the utxocache directly. It's more + // efficient and since we already checked that the blocks are correct and that + // the transactions connect properly, it's ok to access the cache. If we suddenly + // crash here, we are able to recover as well. + for i, e := 0, attachNodes.Front(); e != nil; i, e = i+1, e.Next() { + n := e.Value.(*blockNode) + block := attachBlocks[i] + + // Update the cache to mark all utxos referenced by the block + // as spent and add all transactions being created by this block + // to it. Also, provide an stxo slice so the spent txout + // details are generated. + stxos := make([]SpentTxOut, 0, countSpentOutputs(block)) + err = b.utxoCache.connectTransactions(block, &stxos) + if err != nil { + return err + } + + // Update the database and chain state. + err = b.connectBlock(n, block, stxos) + if err != nil { + return err + } + + newBest = n + } + + // Log the point where the chain forked and old and new best chain + // heads. + if forkNode != nil { + log.Infof("REORGANIZE: Chain forks at %v (height %v)", forkNode.hash, + forkNode.height) + } + log.Infof("REORGANIZE: Old best chain head was %v (height %v)", + &oldBest.hash, oldBest.height) + log.Infof("REORGANIZE: New best chain head is %v (height %v)", + newBest.hash, newBest.height) + + return nil +} + +// verifyReorganizationValidity will verify that the disconnects and the connects +// that are in the list are able to be processed without mutating the chain. +// +// For the attach nodes, it'll check that each of the blocks are valid and will +// change the status of the block node in the list to invalid if the block fails +// to pass verification. For the detach nodes, it'll check that the blocks being +// detached and their spend journals are present on the database. +func (b *BlockChain) verifyReorganizationValidity(detachNodes, attachNodes *list.List) ( + []*btcutil.Block, []*btcutil.Block, [][]SpentTxOut, error) { + + // Nothing to do if no reorganize nodes were provided. + if detachNodes.Len() == 0 && attachNodes.Len() == 0 { + return nil, nil, nil, nil + } + // Ensure the provided nodes match the current best chain. tip := b.bestChain.Tip() if detachNodes.Len() != 0 { firstDetachNode := detachNodes.Front().Value.(*blockNode) if firstDetachNode.hash != tip.hash { - return AssertError(fmt.Sprintf("reorganize nodes to detach are "+ - "not for the current best chain -- first detach node %v, "+ - "current chain %v", &firstDetachNode.hash, &tip.hash)) + return nil, nil, nil, + AssertError(fmt.Sprintf("reorganize nodes to detach are "+ + "not for the current best chain -- first detach node %v, "+ + "current chain %v", &firstDetachNode.hash, &tip.hash)) } } @@ -912,17 +1001,14 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error firstAttachNode := attachNodes.Front().Value.(*blockNode) lastDetachNode := detachNodes.Back().Value.(*blockNode) if firstAttachNode.parent.hash != lastDetachNode.parent.hash { - return AssertError(fmt.Sprintf("reorganize nodes do not have the "+ - "same fork point -- first attach parent %v, last detach "+ - "parent %v", &firstAttachNode.parent.hash, - &lastDetachNode.parent.hash)) + return nil, nil, nil, + AssertError(fmt.Sprintf("reorganize nodes do not have the "+ + "same fork point -- first attach parent %v, last detach "+ + "parent %v", &firstAttachNode.parent.hash, + &lastDetachNode.parent.hash)) } } - // Track the old and new best chains heads. - oldBest := tip - newBest := tip - // All of the blocks to detach and related spend journal entries needed // to unspend transaction outputs in the blocks being disconnected must // be loaded from the database during the reorg check phase below and @@ -937,7 +1023,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error // database and using that information to unspend all of the spent txos // and remove the utxos created by the blocks. view := NewUtxoViewpoint() - view.SetBestHash(&oldBest.hash) + view.SetBestHash(&tip.hash) for e := detachNodes.Front(); e != nil; e = e.Next() { n := e.Value.(*blockNode) var block *btcutil.Block @@ -947,19 +1033,20 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error return err }) if err != nil { - return err + return nil, nil, nil, err } if n.hash != *block.Hash() { - return AssertError(fmt.Sprintf("detach block node hash %v (height "+ - "%v) does not match previous parent block hash %v", &n.hash, - n.height, block.Hash())) + return nil, nil, nil, AssertError( + fmt.Sprintf("detach block node hash %v (height "+ + "%v) does not match previous parent block hash %v", + &n.hash, n.height, block.Hash())) } // Load all of the utxos referenced by the block that aren't // already in the view. - err = view.fetchInputUtxos(b.db, nil, block) + err = view.fetchInputUtxos(b.utxoCache, block) if err != nil { - return err + return nil, nil, nil, err } // Load all of the spent txos for the block from the spend @@ -970,7 +1057,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error return err }) if err != nil { - return err + return nil, nil, nil, err } // Store the loaded block and spend journal entry for later. @@ -979,17 +1066,8 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error err = view.disconnectTransactions(b.db, block, stxos) if err != nil { - return err + return nil, nil, nil, err } - - newBest = n.parent - } - - // Set the fork point only if there are nodes to attach since otherwise - // blocks are only being disconnected and thus there is no fork point. - var forkNode *blockNode - if attachNodes.Len() > 0 { - forkNode = newBest } // Perform several checks to verify each block that needs to be attached @@ -1014,7 +1092,7 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error return err }) if err != nil { - return err + return nil, nil, nil, err } // Store the loaded block for later. @@ -1024,16 +1102,15 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error // checkConnectBlock gets skipped, we still need to update the UTXO // view. if b.index.NodeStatus(n).KnownValid() { - err = view.fetchInputUtxos(b.db, nil, block) + err = view.fetchInputUtxos(b.utxoCache, block) if err != nil { - return err + return nil, nil, nil, err } err = view.connectTransactions(block, nil) if err != nil { - return err + return nil, nil, nil, err } - newBest = n continue } @@ -1054,98 +1131,12 @@ func (b *BlockChain) reorganizeChain(detachNodes, attachNodes *list.List) error b.index.SetStatusFlags(dn, statusInvalidAncestor) } } - return err + return nil, nil, nil, err } b.index.SetStatusFlags(n, statusValid) - - newBest = n } - // Reset the view for the actual connection code below. This is - // required because the view was previously modified when checking if - // the reorg would be successful and the connection code requires the - // view to be valid from the viewpoint of each block being connected or - // disconnected. - view = NewUtxoViewpoint() - view.SetBestHash(&b.bestChain.Tip().hash) - - // Disconnect blocks from the main chain. - for i, e := 0, detachNodes.Front(); e != nil; i, e = i+1, e.Next() { - n := e.Value.(*blockNode) - block := detachBlocks[i] - - // Load all of the utxos referenced by the block that aren't - // already in the view. - err := view.fetchInputUtxos(b.db, nil, block) - if err != nil { - return err - } - - // Update the view to unspend all of the spent txos and remove - // the utxos created by the block. - err = view.disconnectTransactions(b.db, block, - detachSpentTxOuts[i]) - if err != nil { - return err - } - - // Update the database and chain state. - err = b.disconnectBlock(n, block, view) - if err != nil { - return err - } - } - - // Connect the new best chain blocks. - for i, e := 0, attachNodes.Front(); e != nil; i, e = i+1, e.Next() { - n := e.Value.(*blockNode) - block := attachBlocks[i] - - // Load all of the utxos referenced by the block that aren't - // already in the view. - err := view.fetchInputUtxos(b.db, nil, block) - if err != nil { - return err - } - - // Update the view to mark all utxos referenced by the block - // as spent and add all transactions being created by this block - // to it. Also, provide an stxo slice so the spent txout - // details are generated. - stxos := make([]SpentTxOut, 0, countSpentOutputs(block)) - err = view.connectTransactions(block, &stxos) - if err != nil { - return err - } - - // Update the database and chain state. - err = b.connectBlock(n, block, view, stxos) - if err != nil { - return err - } - } - - // We call the flush at the end to update the last flush hash to the new - // best tip. - err = b.db.Update(func(dbTx database.Tx) error { - return b.utxoCache.flush(dbTx, FlushRequired, b.BestSnapshot()) - }) - if err != nil { - return err - } - - // Log the point where the chain forked and old and new best chain - // heads. - if forkNode != nil { - log.Infof("REORGANIZE: Chain forks at %v (height %v)", forkNode.hash, - forkNode.height) - } - log.Infof("REORGANIZE: Old best chain head was %v (height %v)", - &oldBest.hash, oldBest.height) - log.Infof("REORGANIZE: New best chain head is %v (height %v)", - newBest.hash, newBest.height) - - return nil + return detachBlocks, attachBlocks, detachSpentTxOuts, nil } // connectBestChain handles connecting the passed block to the chain while @@ -1225,7 +1216,7 @@ func (b *BlockChain) connectBestChain(node *blockNode, block *btcutil.Block, fla } // Connect the block to the main chain. - err = b.connectBlock(node, block, nil, stxos) + err = b.connectBlock(node, block, stxos) if err != nil { // If we got hit with a rule error, then we'll mark // that status of the block as invalid and flush the @@ -1821,6 +1812,234 @@ func (b *BlockChain) LocateHeaders(locator BlockLocator, hashStop *chainhash.Has return headers } +// InvalidateBlock invalidates the requested block and all its descedents. If a block +// in the best chain is invalidated, the active chain tip will be the parent of the +// invalidated block. +// +// This function is safe for concurrent access. +func (b *BlockChain) InvalidateBlock(hash *chainhash.Hash) error { + b.chainLock.Lock() + defer b.chainLock.Unlock() + + node := b.index.LookupNode(hash) + if node == nil { + // Return an error if the block doesn't exist. + return fmt.Errorf("Requested block hash of %s is not found "+ + "and thus cannot be invalidated.", hash) + } + if node.height == 0 { + return fmt.Errorf("Requested block hash of %s is a at height 0 "+ + "and is thus a genesis block and cannot be invalidated.", + node.hash) + } + + // Nothing to do if the given block is already invalid. + if node.status.KnownInvalid() { + return nil + } + + // Set the status of the block being invalidated. + b.index.SetStatusFlags(node, statusValidateFailed) + b.index.UnsetStatusFlags(node, statusValid) + + // If the block we're invalidating is not on the best chain, we simply + // mark the block and all its descendants as invalid and return. + if !b.bestChain.Contains(node) { + // Grab all the tips excluding the active tip. + tips := b.index.InactiveTips(b.bestChain) + for _, tip := range tips { + // Continue if the given inactive tip is not a descendant of the block + // being invalidated. + if !tip.IsAncestor(node) { + continue + } + + // Keep going back until we get to the block being invalidated. + // For each of the parent, we'll unset valid status and set invalid + // ancestor status. + for n := tip; n != nil && n != node; n = n.parent { + // Continue if it's already invalid. + if n.status.KnownInvalid() { + continue + } + b.index.SetStatusFlags(n, statusInvalidAncestor) + b.index.UnsetStatusFlags(n, statusValid) + } + } + + if writeErr := b.index.flushToDB(); writeErr != nil { + return fmt.Errorf("Error flushing block index "+ + "changes to disk: %v", writeErr) + } + + // Return since the block being invalidated is on a side branch. + // Nothing else left to do. + return nil + } + + // If we're here, it means a block from the active chain tip is getting + // invalidated. + // + // Grab all the nodes to detach from the active chain. + detachNodes := list.New() + for n := b.bestChain.Tip(); n != nil && n != node; n = n.parent { + // Continue if it's already invalid. + if n.status.KnownInvalid() { + continue + } + + // Change the status of the block node. + b.index.SetStatusFlags(n, statusInvalidAncestor) + b.index.UnsetStatusFlags(n, statusValid) + detachNodes.PushBack(n) + } + + // Push back the block node being invalidated. + detachNodes.PushBack(node) + + // Reorg back to the parent of the block being invalidated. + // Nothing to attach so just pass an empty list. + err := b.reorganizeChain(detachNodes, list.New()) + if err != nil { + return err + } + + if writeErr := b.index.flushToDB(); writeErr != nil { + log.Warnf("Error flushing block index changes to disk: %v", writeErr) + } + + // Grab all the tips. + tips := b.index.InactiveTips(b.bestChain) + tips = append(tips, b.bestChain.Tip()) + + // Here we'll check if the invalidation of the block in the active tip + // changes the status of the chain tips. If a side branch now has more + // worksum, it becomes the active chain tip. + var bestTip *blockNode + for _, tip := range tips { + // Skip invalid tips as they cannot become the active tip. + if tip.status.KnownInvalid() { + continue + } + + // If we have no best tips, then set this tip as the best tip. + if bestTip == nil { + bestTip = tip + } else { + // If there is an existing best tip, then compare it + // against the current tip. + if tip.workSum.Cmp(bestTip.workSum) == 1 { + bestTip = tip + } + } + } + + // Return if the best tip is the current tip. + if bestTip == b.bestChain.Tip() { + return nil + } + + // Reorganize to the best tip if a side branch is now the most work tip. + detachNodes, attachNodes := b.getReorganizeNodes(bestTip) + err = b.reorganizeChain(detachNodes, attachNodes) + + if writeErr := b.index.flushToDB(); writeErr != nil { + log.Warnf("Error flushing block index changes to disk: %v", writeErr) + } + + return err +} + +// ReconsiderBlock reconsiders the validity of the block with the given hash. +// +// This function is safe for concurrent access. +func (b *BlockChain) ReconsiderBlock(hash *chainhash.Hash) error { + b.chainLock.Lock() + defer b.chainLock.Unlock() + + log.Infof("Reconsidering block_hash=%v", hash[:]) + + reconsiderNode := b.index.LookupNode(hash) + if reconsiderNode == nil { + // Return an error if the block doesn't exist. + return fmt.Errorf("requested block hash of %s is not found "+ + "and thus cannot be reconsidered", hash) + } + + // Nothing to do if the given block is already valid. + if reconsiderNode.status.KnownValid() { + log.Infof("block_hash=%x is valid, nothing to reconsider", hash[:]) + return nil + } + + // Clear the status of the block being reconsidered. + b.index.UnsetStatusFlags(reconsiderNode, statusInvalidAncestor) + b.index.UnsetStatusFlags(reconsiderNode, statusValidateFailed) + + // Grab all the tips. + tips := b.index.InactiveTips(b.bestChain) + tips = append(tips, b.bestChain.Tip()) + + log.Debugf("Examining %v inactive chain tips for reconsideration") + + // Go through all the tips and unset the status for all the descendents of the + // block being reconsidered. + var reconsiderTip *blockNode + for _, tip := range tips { + // Continue if the given inactive tip is not a descendant of the block + // being invalidated. + if !tip.IsAncestor(reconsiderNode) { + // Set as the reconsider tip if the block node being reconsidered + // is a tip. + if tip == reconsiderNode { + reconsiderTip = reconsiderNode + } + continue + } + + // Mark the current tip as the tip being reconsidered. + reconsiderTip = tip + + // Unset the status of all the parents up until it reaches the block + // being reconsidered. + for n := tip; n != nil && n != reconsiderNode; n = n.parent { + b.index.UnsetStatusFlags(n, statusInvalidAncestor) + } + } + + // Compare the cumulative work for the branch being reconsidered. + bestTipWork := b.bestChain.Tip().workSum + if reconsiderTip.workSum.Cmp(bestTipWork) <= 0 { + log.Debugf("Tip to reconsider has less cumulative work than current "+ + "chain tip: %v vs %v", reconsiderTip.workSum, bestTipWork) + return nil + } + + // If the reconsider tip has a higher cumulative work, then reorganize + // to it after checking the validity of the nodes. + detachNodes, attachNodes := b.getReorganizeNodes(reconsiderTip) + + // We're checking if the reorganization that'll happen is actually valid. + // While this is called in reorganizeChain, we call it beforehand as the error + // returned from reorganizeChain doesn't differentiate between actual disconnect/ + // connect errors or whether the branch we're trying to fork to is invalid. + // + // The block status changes here without being flushed so we immediately flush + // the blockindex after we call this function. + _, _, _, err := b.verifyReorganizationValidity(detachNodes, attachNodes) + if writeErr := b.index.flushToDB(); writeErr != nil { + log.Warnf("Error flushing block index changes to disk: %v", writeErr) + } + if err != nil { + // If we errored out during the verification of the reorg branch, + // it's ok to return nil as we reconsidered the block and determined + // that it's invalid. + return nil + } + + return b.reorganizeChain(detachNodes, attachNodes) +} + // IndexManager provides a generic interface that the is called when blocks are // connected and disconnected to and from the tip of the main chain for the // purpose of supporting optional indexes. diff --git a/blockchain/chain_test.go b/blockchain/chain_test.go index 1ac08f9a76..b3bccf56f7 100644 --- a/blockchain/chain_test.go +++ b/blockchain/chain_test.go @@ -6,10 +6,12 @@ package blockchain import ( "fmt" + "math/rand" "reflect" "testing" "time" + "github.com/btcsuite/btcd/blockchain/internal/testhelper" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -782,7 +784,7 @@ func TestLocateInventory(t *testing.T) { &test.hashStop) } if !reflect.DeepEqual(headers, test.headers) { - t.Errorf("%s: unxpected headers -- got %v, want %v", + t.Errorf("%s: unexpected headers -- got %v, want %v", test.name, headers, test.headers) continue } @@ -795,7 +797,7 @@ func TestLocateInventory(t *testing.T) { hashes := chain.LocateBlocks(test.locator, &test.hashStop, maxAllowed) if !reflect.DeepEqual(hashes, test.hashes) { - t.Errorf("%s: unxpected hashes -- got %v, want %v", + t.Errorf("%s: unexpected hashes -- got %v, want %v", test.name, hashes, test.hashes) continue } @@ -888,7 +890,7 @@ func TestHeightToHashRange(t *testing.T) { } if !reflect.DeepEqual(hashes, test.hashes) { - t.Errorf("%s: unxpected hashes -- got %v, want %v", + t.Errorf("%s: unexpected hashes -- got %v, want %v", test.name, hashes, test.hashes) } } @@ -960,7 +962,7 @@ func TestIntervalBlockHashes(t *testing.T) { } if !reflect.DeepEqual(hashes, test.hashes) { - t.Errorf("%s: unxpected hashes -- got %v, want %v", + t.Errorf("%s: unexpected hashes -- got %v, want %v", test.name, hashes, test.hashes) } } @@ -1155,3 +1157,694 @@ func TestChainTips(t *testing.T) { } } } + +func TestIsAncestor(t *testing.T) { + // Construct a synthetic block chain with a block index consisting of + // the following structure. + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + tip := tstTip + chain := newFakeChain(&chaincfg.MainNetParams) + branch0Nodes := chainedNodes(chain.bestChain.Genesis(), 3) + for _, node := range branch0Nodes { + chain.index.SetStatusFlags(node, statusDataStored) + chain.index.SetStatusFlags(node, statusValid) + chain.index.AddNode(node) + } + chain.bestChain.SetTip(tip(branch0Nodes)) + + branch1Nodes := chainedNodes(chain.bestChain.Genesis(), 1) + for _, node := range branch1Nodes { + chain.index.SetStatusFlags(node, statusDataStored) + chain.index.SetStatusFlags(node, statusValid) + chain.index.AddNode(node) + } + + branch2Nodes := chainedNodes(chain.bestChain.Genesis(), 1) + for _, node := range branch2Nodes { + chain.index.SetStatusFlags(node, statusDataStored) + chain.index.SetStatusFlags(node, statusValidateFailed) + chain.index.AddNode(node) + } + + // Is 1 an ancestor of 3? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeTrue := branch0Nodes[2].IsAncestor(branch0Nodes[0]) + if !shouldBeTrue { + t.Errorf("TestIsAncestor fail. Node %s is an ancestor of node %s but got false", + branch0Nodes[0].hash.String(), branch0Nodes[2].hash.String()) + } + + // Is 1 an ancestor of 2? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeTrue = branch0Nodes[1].IsAncestor(branch0Nodes[0]) + if !shouldBeTrue { + t.Errorf("TestIsAncestor fail. Node %s is an ancestor of node %s but got false", + branch0Nodes[0].hash.String(), branch0Nodes[1].hash.String()) + } + + // Is the genesis an ancestor of 1? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeTrue = branch0Nodes[0].IsAncestor(chain.bestChain.Genesis()) + if !shouldBeTrue { + t.Errorf("TestIsAncestor fail. The genesis block is an ancestor of all blocks "+ + "but got false for node %s", + branch0Nodes[0].hash.String()) + } + + // Is the genesis an ancestor of 1a? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeTrue = branch1Nodes[0].IsAncestor(chain.bestChain.Genesis()) + if !shouldBeTrue { + t.Errorf("TestIsAncestor fail. The genesis block is an ancestor of all blocks "+ + "but got false for node %s", + branch1Nodes[0].hash.String()) + } + + // Is the genesis an ancestor of 1b? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeTrue = branch2Nodes[0].IsAncestor(chain.bestChain.Genesis()) + if !shouldBeTrue { + t.Errorf("TestIsAncestor fail. The genesis block is an ancestor of all blocks "+ + "but got false for node %s", + branch2Nodes[0].hash.String()) + } + + // Is 1 an ancestor of 1a? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeFalse := branch1Nodes[0].IsAncestor(branch0Nodes[0]) + if shouldBeFalse { + t.Errorf("TestIsAncestor fail. Node %s is in a different branch than "+ + "node %s but got true", branch1Nodes[0].hash.String(), + branch0Nodes[0].hash.String()) + } + + // Is 1 an ancestor of 1b? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeFalse = branch2Nodes[0].IsAncestor(branch0Nodes[0]) + if shouldBeFalse { + t.Errorf("TestIsAncestor fail. Node %s is in a different branch than "+ + "node %s but got true", branch2Nodes[0].hash.String(), + branch0Nodes[0].hash.String()) + } + + // Is 1a an ancestor of 1b? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeFalse = branch2Nodes[0].IsAncestor(branch1Nodes[0]) + if shouldBeFalse { + t.Errorf("TestIsAncestor fail. Node %s is in a different branch than "+ + "node %s but got true", branch2Nodes[0].hash.String(), + branch1Nodes[0].hash.String()) + } + + // Is 1 an ancestor of 1? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeFalse = branch0Nodes[0].IsAncestor(branch0Nodes[0]) + if shouldBeFalse { + t.Errorf("TestIsAncestor fail. Node is not an ancestor of itself but got true for node %s", + branch0Nodes[0].hash.String()) + } + + // Is the geneis an ancestor of genesis? + // + // genesis -> 1 -> 2 -> 3 (active) + // \ -> 1a (valid-fork) + // \ -> 1b (invalid) + shouldBeFalse = chain.bestChain.Genesis().IsAncestor(chain.bestChain.Genesis()) + if shouldBeFalse { + t.Errorf("TestIsAncestor fail. Node is not an ancestor of itself but got true for node %s", + chain.bestChain.Genesis().hash.String()) + } + + // Is a block from another chain an ancestor of 1b? + fakeChain := newFakeChain(&chaincfg.TestNet3Params) + shouldBeFalse = branch2Nodes[0].IsAncestor(fakeChain.bestChain.Genesis()) + if shouldBeFalse { + t.Errorf("TestIsAncestor fail. Node %s is in a different chain than "+ + "node %s but got true", fakeChain.bestChain.Genesis().hash.String(), + branch2Nodes[0].hash.String()) + } +} + +// randomSelect selects random amount of random elements from a slice and returns a +// new slice. The selected elements are removed. +func randomSelect(input []*testhelper.SpendableOut) ( + []*testhelper.SpendableOut, []*testhelper.SpendableOut) { + + selected := []*testhelper.SpendableOut{} + + // Select random elements from the input slice + amount := rand.Intn(len(input)) + for i := 0; i < amount; i++ { + // Generate a random index + randIdx := rand.Intn(len(input)) + + // Append the selected element to the new slice + selected = append(selected, input[randIdx]) + + // Remove the selected element from the input slice. + // This ensures that each selected element is unique. + input = append(input[:randIdx], input[randIdx+1:]...) + } + + return input, selected +} + +// addBlocks generates new blocks and adds them to the chain. The newly generated +// blocks will spend from the spendable outputs passed in. The returned hases are +// the hashes of the newly generated blocks. +func addBlocks(count int, chain *BlockChain, prevBlock *btcutil.Block, + allSpendableOutputs []*testhelper.SpendableOut) ( + []*chainhash.Hash, [][]*testhelper.SpendableOut, error) { + + blockHashes := make([]*chainhash.Hash, 0, count) + spendablesOuts := make([][]*testhelper.SpendableOut, 0, count) + + // Always spend everything on the first block. This ensures we get unique blocks + // every time. The random select may choose not to spend any and that results + // in getting the same block. + nextSpends := allSpendableOutputs + allSpendableOutputs = allSpendableOutputs[:0] + for b := 0; b < count; b++ { + newBlock, newSpendableOuts, err := addBlock(chain, prevBlock, nextSpends) + if err != nil { + return nil, nil, err + } + prevBlock = newBlock + + blockHashes = append(blockHashes, newBlock.Hash()) + spendablesOuts = append(spendablesOuts, newSpendableOuts) + allSpendableOutputs = append(allSpendableOutputs, newSpendableOuts...) + + // Grab utxos to be spent in the next block. + allSpendableOutputs, nextSpends = randomSelect(allSpendableOutputs) + } + + return blockHashes, spendablesOuts, nil +} + +func TestInvalidateBlock(t *testing.T) { + tests := []struct { + name string + chainGen func() (*BlockChain, []*chainhash.Hash, func()) + }{ + { + name: "one branch, invalidate once", + chainGen: func() (*BlockChain, []*chainhash.Hash, func()) { + chain, params, tearDown := utxoCacheTestChain( + "TestInvalidateBlock-one-branch-" + + "invalidate-once") + // Grab the tip of the chain. + tip := btcutil.NewBlock(params.GenesisBlock) + + // Create a chain with 11 blocks. + _, _, err := addBlocks(11, chain, tip, []*testhelper.SpendableOut{}) + if err != nil { + t.Fatal(err) + } + + // Invalidate block 5. + block, err := chain.BlockByHeight(5) + if err != nil { + t.Fatal(err) + } + invalidateHash := block.Hash() + + return chain, []*chainhash.Hash{invalidateHash}, tearDown + }, + }, + { + name: "invalidate twice", + chainGen: func() (*BlockChain, []*chainhash.Hash, func()) { + chain, params, tearDown := utxoCacheTestChain("TestInvalidateBlock-invalidate-twice") + // Grab the tip of the chain. + tip := btcutil.NewBlock(params.GenesisBlock) + + // Create a chain with 11 blocks. + _, spendableOuts, err := addBlocks(11, chain, tip, []*testhelper.SpendableOut{}) + //_, _, err := addBlocks(11, chain, tip, []*testhelper.SpendableOut{}) + if err != nil { + t.Fatal(err) + } + + // Set invalidateHash as block 5. + block, err := chain.BlockByHeight(5) + if err != nil { + t.Fatal(err) + } + invalidateHash := block.Hash() + + // Create a side chain with 7 blocks that builds on block 1. + b1, err := chain.BlockByHeight(1) + if err != nil { + t.Fatal(err) + } + altBlockHashes, _, err := addBlocks(6, chain, b1, spendableOuts[0]) + if err != nil { + t.Fatal(err) + } + + // Grab block at height 5: + // + // b2, b3, b4, b5 + // 0, 1, 2, 3 + invalidateHash2 := altBlockHashes[3] + + // Sanity checking that we grabbed the correct hash. + node := chain.index.LookupNode(invalidateHash) + if node == nil || node.height != 5 { + t.Fatalf("wanted to grab block at height 5 but got height %v", + node.height) + } + + return chain, []*chainhash.Hash{invalidateHash, invalidateHash2}, tearDown + }, + }, + { + name: "invalidate a side branch", + chainGen: func() (*BlockChain, []*chainhash.Hash, func()) { + chain, params, tearDown := utxoCacheTestChain("TestInvalidateBlock-invalidate-side-branch") + tip := btcutil.NewBlock(params.GenesisBlock) + + // Grab the tip of the chain. + tip, err := chain.BlockByHash(&chain.bestChain.Tip().hash) + if err != nil { + t.Fatal(err) + } + + // Create a chain with 11 blocks. + _, spendableOuts, err := addBlocks(11, chain, tip, []*testhelper.SpendableOut{}) + if err != nil { + t.Fatal(err) + } + + // Create a side chain with 7 blocks that builds on block 1. + b1, err := chain.BlockByHeight(1) + if err != nil { + t.Fatal(err) + } + altBlockHashes, _, err := addBlocks(6, chain, b1, spendableOuts[0]) + if err != nil { + t.Fatal(err) + } + + // Grab block at height 4: + // + // b2, b3, b4 + // 0, 1, 2 + invalidateHash := altBlockHashes[2] + + // Sanity checking that we grabbed the correct hash. + node := chain.index.LookupNode(invalidateHash) + if node == nil || node.height != 4 { + t.Fatalf("wanted to grab block at height 4 but got height %v", + node.height) + } + + return chain, []*chainhash.Hash{invalidateHash}, tearDown + }, + }, + } + + for _, test := range tests { + chain, invalidateHashes, tearDown := test.chainGen() + func() { + defer tearDown() + for _, invalidateHash := range invalidateHashes { + chainTipsBefore := chain.ChainTips() + + // Mark if we're invalidating a block that's a part of the best chain. + var bestChainBlock bool + node := chain.index.LookupNode(invalidateHash) + if chain.bestChain.Contains(node) { + bestChainBlock = true + } + + // Actual invalidation. + err := chain.InvalidateBlock(invalidateHash) + if err != nil { + t.Fatal(err) + } + + chainTipsAfter := chain.ChainTips() + + // Create a map for easy lookup. + chainTipMap := make(map[chainhash.Hash]ChainTip, len(chainTipsAfter)) + activeTipCount := 0 + for _, chainTip := range chainTipsAfter { + chainTipMap[chainTip.BlockHash] = chainTip + + if chainTip.Status == StatusActive { + activeTipCount++ + } + } + if activeTipCount != 1 { + t.Fatalf("TestInvalidateBlock fail. Expected "+ + "1 active chain tip but got %d", activeTipCount) + } + + bestTip := chain.bestChain.Tip() + + validForkCount := 0 + for _, tip := range chainTipsBefore { + // If the chaintip was an active tip and we invalidated a block + // in the active tip, assert that it's invalid now. + if bestChainBlock && tip.Status == StatusActive { + gotTip, found := chainTipMap[tip.BlockHash] + if !found { + t.Fatalf("TestInvalidateBlock fail. Expected "+ + "block %s not found in chaintips after "+ + "invalidateblock", tip.BlockHash.String()) + } + + if gotTip.Status != StatusInvalid { + t.Fatalf("TestInvalidateBlock fail. "+ + "Expected block %s to be invalid, got status: %s", + gotTip.BlockHash.String(), gotTip.Status) + } + } + + if !bestChainBlock && tip.Status != StatusActive { + gotTip, found := chainTipMap[tip.BlockHash] + if !found { + t.Fatalf("TestInvalidateBlock fail. Expected "+ + "block %s not found in chaintips after "+ + "invalidateblock", tip.BlockHash.String()) + } + + if gotTip.BlockHash == *invalidateHash && gotTip.Status != StatusInvalid { + t.Fatalf("TestInvalidateBlock fail. "+ + "Expected block %s to be invalid, got status: %s", + gotTip.BlockHash.String(), gotTip.Status) + } + } + + // If we're not invalidating the branch with an active tip, + // we expect the active tip to remain the same. + if !bestChainBlock && tip.Status == StatusActive && tip.BlockHash != bestTip.hash { + t.Fatalf("TestInvalidateBlock fail. Expected block %s as the tip but got %s", + tip.BlockHash.String(), bestTip.hash.String()) + } + + // If this tip is not invalid and not active, it should be + // lighter than the current best tip. + if tip.Status != StatusActive && tip.Status != StatusInvalid && + tip.Height > bestTip.height { + + tipNode := chain.index.LookupNode(&tip.BlockHash) + if bestTip.workSum.Cmp(tipNode.workSum) == -1 { + t.Fatalf("TestInvalidateBlock fail. Expected "+ + "block %s to be the active tip but block %s "+ + "was", tipNode.hash.String(), bestTip.hash.String()) + } + } + + if tip.Status == StatusValidFork { + validForkCount++ + } + } + + // If there are no other valid chain tips besides the active chaintip, + // we expect to have one more chain tip after the invalidate. + if validForkCount == 0 && len(chainTipsAfter) != len(chainTipsBefore)+1 { + t.Fatalf("TestInvalidateBlock fail. Expected %d chaintips but got %d", + len(chainTipsBefore)+1, len(chainTipsAfter)) + } + } + + // Try to invaliate the already invalidated hash. + err := chain.InvalidateBlock(invalidateHashes[0]) + if err != nil { + t.Fatal(err) + } + + // Try to invaliate a genesis block + err = chain.InvalidateBlock(chain.chainParams.GenesisHash) + if err == nil { + t.Fatalf("TestInvalidateBlock fail. Expected to err when trying to" + + "invalidate a genesis block.") + } + + // Try to invaliate a block that doesn't exist. + err = chain.InvalidateBlock(chaincfg.MainNetParams.GenesisHash) + if err == nil { + t.Fatalf("TestInvalidateBlock fail. Expected to err when trying to" + + "invalidate a block that doesn't exist.") + } + }() + } +} + +func TestReconsiderBlock(t *testing.T) { + tests := []struct { + name string + chainGen func() (*BlockChain, []*chainhash.Hash, func()) + }{ + { + name: "one branch, invalidate once and revalidate", + chainGen: func() (*BlockChain, []*chainhash.Hash, func()) { + chain, params, tearDown := utxoCacheTestChain("TestInvalidateBlock-one-branch-invalidate-once") + + // Create a chain with 101 blocks. + tip := btcutil.NewBlock(params.GenesisBlock) + _, _, err := addBlocks(101, chain, tip, []*testhelper.SpendableOut{}) + if err != nil { + t.Fatal(err) + } + + // Invalidate block 5. + block, err := chain.BlockByHeight(5) + if err != nil { + t.Fatal(err) + } + invalidateHash := block.Hash() + + return chain, []*chainhash.Hash{invalidateHash}, tearDown + }, + }, + { + name: "invalidate the active branch with a side branch present and revalidate", + chainGen: func() (*BlockChain, []*chainhash.Hash, func()) { + chain, params, tearDown := utxoCacheTestChain("TestReconsiderBlock-invalidate-with-side-branch") + + // Create a chain with 101 blocks. + tip := btcutil.NewBlock(params.GenesisBlock) + _, spendableOuts, err := addBlocks(101, chain, tip, []*testhelper.SpendableOut{}) + if err != nil { + t.Fatal(err) + } + + // Invalidate block 5. + block, err := chain.BlockByHeight(5) + if err != nil { + t.Fatal(err) + } + invalidateHash := block.Hash() + + // Create a side chain with 7 blocks that builds on block 1. + b1, err := chain.BlockByHeight(1) + if err != nil { + t.Fatal(err) + } + _, _, err = addBlocks(6, chain, b1, spendableOuts[0]) + if err != nil { + t.Fatal(err) + } + + return chain, []*chainhash.Hash{invalidateHash}, tearDown + }, + }, + { + name: "invalidate a side branch and revalidate it", + chainGen: func() (*BlockChain, []*chainhash.Hash, func()) { + chain, params, tearDown := utxoCacheTestChain("TestReconsiderBlock-invalidate-a-side-branch") + + // Create a chain with 101 blocks. + tip := btcutil.NewBlock(params.GenesisBlock) + _, spendableOuts, err := addBlocks(101, chain, tip, []*testhelper.SpendableOut{}) + if err != nil { + t.Fatal(err) + } + + // Create a side chain with 7 blocks that builds on block 1. + b1, err := chain.BlockByHeight(1) + if err != nil { + t.Fatal(err) + } + altBlockHashes, _, err := addBlocks(6, chain, b1, spendableOuts[0]) + if err != nil { + t.Fatal(err) + } + // Grab block at height 4: + // + // b2, b3, b4, b5 + // 0, 1, 2, 3 + invalidateHash := altBlockHashes[2] + + return chain, []*chainhash.Hash{invalidateHash}, tearDown + }, + }, + { + name: "reconsider an invalid side branch with a higher work", + chainGen: func() (*BlockChain, []*chainhash.Hash, func()) { + chain, params, tearDown := utxoCacheTestChain("TestReconsiderBlock-reconsider-an-invalid-side-branch-higher") + + tip := btcutil.NewBlock(params.GenesisBlock) + _, spendableOuts, err := addBlocks(6, chain, tip, []*testhelper.SpendableOut{}) + if err != nil { + t.Fatal(err) + } + + // Select utxos to be spent from the best block and + // modify the amount so that the block will be invalid. + nextSpends, _ := randomSelect(spendableOuts[len(spendableOuts)-1]) + nextSpends[0].Amount += testhelper.LowFee + + // Make an invalid block that best on top of the current tip. + bestBlock, err := chain.BlockByHash(&chain.BestSnapshot().Hash) + if err != nil { + t.Fatal(err) + } + invalidBlock, _, _ := newBlock(chain, bestBlock, nextSpends) + invalidateHash := invalidBlock.Hash() + + // The block validation will fail here and we'll mark the + // block as invalid in the block index. + chain.ProcessBlock(invalidBlock, BFNone) + + // Modify the amount again so it's valid. + nextSpends[0].Amount -= testhelper.LowFee + + return chain, []*chainhash.Hash{invalidateHash}, tearDown + }, + }, + { + name: "reconsider an invalid side branch with a lower work", + chainGen: func() (*BlockChain, []*chainhash.Hash, func()) { + chain, params, tearDown := utxoCacheTestChain("TestReconsiderBlock-reconsider-an-invalid-side-branch-lower") + + tip := btcutil.NewBlock(params.GenesisBlock) + _, spendableOuts, err := addBlocks(6, chain, tip, []*testhelper.SpendableOut{}) + if err != nil { + t.Fatal(err) + } + + // Select utxos to be spent from the best block and + // modify the amount so that the block will be invalid. + nextSpends, _ := randomSelect(spendableOuts[len(spendableOuts)-1]) + nextSpends[0].Amount += testhelper.LowFee + + // Make an invalid block that best on top of the current tip. + bestBlock, err := chain.BlockByHash(&chain.BestSnapshot().Hash) + if err != nil { + t.Fatal(err) + } + invalidBlock, _, _ := newBlock(chain, bestBlock, nextSpends) + invalidateHash := invalidBlock.Hash() + + // The block validation will fail here and we'll mark the + // block as invalid in the block index. + chain.ProcessBlock(invalidBlock, BFNone) + + // Modify the amount again so it's valid. + nextSpends[0].Amount -= testhelper.LowFee + + // Add more blocks to make the invalid block a + // side chain and not the most pow. + _, _, err = addBlocks(3, chain, bestBlock, []*testhelper.SpendableOut{}) + if err != nil { + t.Fatal(err) + } + + return chain, []*chainhash.Hash{invalidateHash}, tearDown + }, + }, + } + + for _, test := range tests { + chain, invalidateHashes, tearDown := test.chainGen() + func() { + defer tearDown() + for _, invalidateHash := range invalidateHashes { + // Cache the chain tips before the invalidate. Since we'll reconsider + // the invalidated block, we should come back to these tips in the end. + tips := chain.ChainTips() + expectedChainTips := make(map[chainhash.Hash]ChainTip, len(tips)) + for _, tip := range tips { + expectedChainTips[tip.BlockHash] = tip + } + + // Invalidation. + err := chain.InvalidateBlock(invalidateHash) + if err != nil { + t.Fatal(err) + } + + // Reconsideration. + err = chain.ReconsiderBlock(invalidateHash) + if err != nil { + t.Fatal(err) + } + + // Compare the tips aginst the tips we've cached. + gotChainTips := chain.ChainTips() + for _, gotChainTip := range gotChainTips { + testChainTip, found := expectedChainTips[gotChainTip.BlockHash] + if !found { + t.Errorf("TestReconsiderBlock Failed test \"%s\". Couldn't find an expected "+ + "chain tip with height %d, hash %s, branchlen %d, status \"%s\"", + test.name, testChainTip.Height, testChainTip.BlockHash.String(), + testChainTip.BranchLen, testChainTip.Status.String()) + } + + // If the invalid side branch is a lower work, we'll never + // actually process the block again until the branch becomes + // a greater work chain so it'll show up as valid-fork. + if test.name == "reconsider an invalid side branch with a lower work" && + testChainTip.BlockHash == *invalidateHash { + + testChainTip.Status = StatusValidFork + } + + if !reflect.DeepEqual(testChainTip, gotChainTip) { + t.Errorf("TestReconsiderBlock Failed test \"%s\". Expected chain tip with "+ + "height %d, hash %s, branchlen %d, status \"%s\" but got "+ + "height %d, hash %s, branchlen %d, status \"%s\"", test.name, + testChainTip.Height, testChainTip.BlockHash.String(), + testChainTip.BranchLen, testChainTip.Status.String(), + gotChainTip.Height, gotChainTip.BlockHash.String(), + gotChainTip.BranchLen, gotChainTip.Status.String()) + } + } + } + }() + } +} diff --git a/blockchain/chainio.go b/blockchain/chainio.go index 75474021f8..3340dd14a0 100644 --- a/blockchain/chainio.go +++ b/blockchain/chainio.go @@ -247,7 +247,7 @@ type SpentTxOut struct { // Amount is the amount of the output. Amount int64 - // PkScipt is the public key script for the output. + // PkScript is the public key script for the output. PkScript []byte // Height is the height of the block containing the creating tx. diff --git a/blockchain/chainio_test.go b/blockchain/chainio_test.go index 630af14e1c..e9e2c0b616 100644 --- a/blockchain/chainio_test.go +++ b/blockchain/chainio_test.go @@ -23,7 +23,7 @@ func TestErrNotInMainChain(t *testing.T) { // Ensure the stringized output for the error is as expected. if err.Error() != errStr { - t.Fatalf("errNotInMainChain retuned unexpected error string - "+ + t.Fatalf("errNotInMainChain returned unexpected error string - "+ "got %q, want %q", err.Error(), errStr) } @@ -403,7 +403,7 @@ func TestSpendJournalErrors(t *testing.T) { } // TestUtxoSerialization ensures serializing and deserializing unspent -// trasaction output entries works as expected. +// transaction output entries works as expected. func TestUtxoSerialization(t *testing.T) { t.Parallel() diff --git a/blockchain/common_test.go b/blockchain/common_test.go index 1973689ea1..12badd3ec0 100644 --- a/blockchain/common_test.go +++ b/blockchain/common_test.go @@ -14,6 +14,7 @@ import ( "strings" "time" + "github.com/btcsuite/btcd/blockchain/internal/testhelper" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -343,7 +344,7 @@ func (b *BlockChain) TstSetCoinbaseMaturity(maturity uint16) { b.chainParams.CoinbaseMaturity = maturity } -// newFakeChain returns a chain that is usable for syntetic tests. It is +// newFakeChain returns a chain that is usable for synthetic tests. It is // important to note that this chain has no database associated with it, so // it is not usable with all functions and the tests must take care when making // use of it. @@ -396,3 +397,96 @@ func newFakeNode(parent *blockNode, blockVersion int32, bits uint32, timestamp t } return newBlockNode(header, parent) } + +// addBlock adds a block to the blockchain that succeeds the previous block. +// The blocks spends all the provided spendable outputs. The new block and +// the new spendable outputs created in the block are returned. +func addBlock(chain *BlockChain, prev *btcutil.Block, spends []*testhelper.SpendableOut) ( + *btcutil.Block, []*testhelper.SpendableOut, error) { + + block, outs, err := newBlock(chain, prev, spends) + if err != nil { + return nil, nil, err + } + + _, _, err = chain.ProcessBlock(block, BFNone) + if err != nil { + return nil, nil, err + } + + return block, outs, nil +} + +// calcMerkleRoot creates a merkle tree from the slice of transactions and +// returns the root of the tree. +func calcMerkleRoot(txns []*wire.MsgTx) chainhash.Hash { + if len(txns) == 0 { + return chainhash.Hash{} + } + + utilTxns := make([]*btcutil.Tx, 0, len(txns)) + for _, tx := range txns { + utilTxns = append(utilTxns, btcutil.NewTx(tx)) + } + return CalcMerkleRoot(utilTxns, false) +} + +// newBlock creates a block to the blockchain that succeeds the previous block. +// The blocks spends all the provided spendable outputs. The new block and the +// newly spendable outputs created in the block are returned. +func newBlock(chain *BlockChain, prev *btcutil.Block, + spends []*testhelper.SpendableOut) (*btcutil.Block, []*testhelper.SpendableOut, error) { + + blockHeight := prev.Height() + 1 + txns := make([]*wire.MsgTx, 0, 1+len(spends)) + + // Create and add coinbase tx. + cb := testhelper.CreateCoinbaseTx(blockHeight, CalcBlockSubsidy(blockHeight, chain.chainParams)) + txns = append(txns, cb) + + // Spend all txs to be spent. + for _, spend := range spends { + cb.TxOut[0].Value += int64(testhelper.LowFee) + + spendTx := testhelper.CreateSpendTx(spend, testhelper.LowFee) + txns = append(txns, spendTx) + } + + // Use a timestamp that is one second after the previous block unless + // this is the first block in which case the current time is used. + var ts time.Time + if blockHeight == 1 { + ts = time.Unix(time.Now().Unix(), 0) + } else { + ts = prev.MsgBlock().Header.Timestamp.Add(time.Second) + } + + // Create the block. The nonce will be solved in the below code in + // SolveBlock. + block := btcutil.NewBlock(&wire.MsgBlock{ + Header: wire.BlockHeader{ + Version: 1, + PrevBlock: *prev.Hash(), + MerkleRoot: calcMerkleRoot(txns), + Bits: chain.chainParams.PowLimitBits, + Timestamp: ts, + Nonce: 0, // To be solved. + }, + Transactions: txns, + }) + block.SetHeight(blockHeight) + + // Solve the block. + if !testhelper.SolveBlock(&block.MsgBlock().Header) { + return nil, nil, fmt.Errorf("Unable to solve block at height %d", blockHeight) + } + + // Create spendable outs to return. + outs := make([]*testhelper.SpendableOut, len(txns)) + for i, tx := range txns { + out := testhelper.MakeSpendableOutForTx(tx, 0) + outs[i] = &out + } + + return block, outs, nil +} diff --git a/blockchain/difficulty.go b/blockchain/difficulty.go index 1fa850cc37..b1e39b9d62 100644 --- a/blockchain/difficulty.go +++ b/blockchain/difficulty.go @@ -8,31 +8,14 @@ import ( "math/big" "time" + "github.com/btcsuite/btcd/blockchain/internal/workmath" "github.com/btcsuite/btcd/chaincfg/chainhash" ) -var ( - // bigOne is 1 represented as a big.Int. It is defined here to avoid - // the overhead of creating it multiple times. - bigOne = big.NewInt(1) - - // oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid - // the overhead of creating it multiple times. - oneLsh256 = new(big.Int).Lsh(bigOne, 256) -) - // HashToBig converts a chainhash.Hash into a big.Int that can be used to // perform math comparisons. func HashToBig(hash *chainhash.Hash) *big.Int { - // A Hash is in little-endian, but the big package wants the bytes in - // big-endian, so reverse them. - buf := *hash - blen := len(buf) - for i := 0; i < blen/2; i++ { - buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i] - } - - return new(big.Int).SetBytes(buf[:]) + return workmath.HashToBig(hash) } // CompactToBig converts a compact representation of a whole number N to an @@ -60,31 +43,7 @@ func HashToBig(hash *chainhash.Hash) *big.Int { // which represent difficulty targets, thus there really is not a need for a // sign bit, but it is implemented here to stay consistent with bitcoind. func CompactToBig(compact uint32) *big.Int { - // Extract the mantissa, sign bit, and exponent. - mantissa := compact & 0x007fffff - isNegative := compact&0x00800000 != 0 - exponent := uint(compact >> 24) - - // Since the base for the exponent is 256, the exponent can be treated - // as the number of bytes to represent the full 256-bit number. So, - // treat the exponent as the number of bytes and shift the mantissa - // right or left accordingly. This is equivalent to: - // N = mantissa * 256^(exponent-3) - var bn *big.Int - if exponent <= 3 { - mantissa >>= 8 * (3 - exponent) - bn = big.NewInt(int64(mantissa)) - } else { - bn = big.NewInt(int64(mantissa)) - bn.Lsh(bn, 8*(exponent-3)) - } - - // Make it negative if the sign bit is set. - if isNegative { - bn = bn.Neg(bn) - } - - return bn + return workmath.CompactToBig(compact) } // BigToCompact converts a whole number N to a compact representation using @@ -92,41 +51,7 @@ func CompactToBig(compact uint32) *big.Int { // of precision, so values larger than (2^23 - 1) only encode the most // significant digits of the number. See CompactToBig for details. func BigToCompact(n *big.Int) uint32 { - // No need to do any work if it's zero. - if n.Sign() == 0 { - return 0 - } - - // Since the base for the exponent is 256, the exponent can be treated - // as the number of bytes. So, shift the number right or left - // accordingly. This is equivalent to: - // mantissa = mantissa / 256^(exponent-3) - var mantissa uint32 - exponent := uint(len(n.Bytes())) - if exponent <= 3 { - mantissa = uint32(n.Bits()[0]) - mantissa <<= 8 * (3 - exponent) - } else { - // Use a copy to avoid modifying the caller's original number. - tn := new(big.Int).Set(n) - mantissa = uint32(tn.Rsh(tn, 8*(exponent-3)).Bits()[0]) - } - - // When the mantissa already has the sign bit set, the number is too - // large to fit into the available 23-bits, so divide the number by 256 - // and increment the exponent accordingly. - if mantissa&0x00800000 != 0 { - mantissa >>= 8 - exponent++ - } - - // Pack the exponent, sign bit, and mantissa into an unsigned 32-bit - // int and return it. - compact := uint32(exponent<<24) | mantissa - if n.Sign() < 0 { - compact |= 0x00800000 - } - return compact + return workmath.BigToCompact(n) } // CalcWork calculates a work value from difficulty bits. Bitcoin increases @@ -140,17 +65,7 @@ func BigToCompact(n *big.Int) uint32 { // potential division by zero and really small floating point numbers, the // result adds 1 to the denominator and multiplies the numerator by 2^256. func CalcWork(bits uint32) *big.Int { - // Return a work value of zero if the passed difficulty bits represent - // a negative number. Note this should not happen in practice with valid - // blocks, but an invalid block could trigger it. - difficultyNum := CompactToBig(bits) - if difficultyNum.Sign() <= 0 { - return big.NewInt(0) - } - - // (1 << 256) / (difficultyNum + 1) - denominator := new(big.Int).Add(difficultyNum, bigOne) - return new(big.Int).Div(oneLsh256, denominator) + return workmath.CalcWork(bits) } // calcEasiestDifficulty calculates the easiest possible difficulty that a block diff --git a/blockchain/error.go b/blockchain/error.go index 1e7c879ba0..dc40222235 100644 --- a/blockchain/error.go +++ b/blockchain/error.go @@ -70,7 +70,7 @@ const ( // ErrUnexpectedDifficulty indicates specified bits do not align with // the expected value either because it doesn't match the calculated - // valued based on difficulty regarted rules or it is out of the valid + // valued based on difficulty regarded rules or it is out of the valid // range. ErrUnexpectedDifficulty diff --git a/blockchain/fullblocks_test.go b/blockchain/fullblocks_test.go index d6bcf799af..591414d1d0 100644 --- a/blockchain/fullblocks_test.go +++ b/blockchain/fullblocks_test.go @@ -146,6 +146,70 @@ func TestFullBlocks(t *testing.T) { } defer teardownFunc() + testBlockDisconnectExpectUTXO := func(item fullblocktests.BlockDisconnectExpectUTXO) { + expectedCallBack := func(notification *blockchain.Notification) { + switch notification.Type { + + case blockchain.NTBlockDisconnected: + block, ok := notification.Data.(*btcutil.Block) + if !ok { + t.Fatalf("expected a block") + } + + // Return early if the block we get isn't the relevant + // block. + if !block.Hash().IsEqual(&item.BlockHash) { + return + } + + entry, err := chain.FetchUtxoEntry(item.OutPoint) + if err != nil { + t.Fatal(err) + } + + if entry == nil || entry.IsSpent() { + t.Logf("expected utxo %v to exist but it's "+ + "nil or spent\n", item.OutPoint.String()) + t.Fatalf("expected utxo %v to exist but it's "+ + "nil or spent", item.OutPoint.String()) + } + } + } + unexpectedCallBack := func(notification *blockchain.Notification) { + switch notification.Type { + case blockchain.NTBlockDisconnected: + block, ok := notification.Data.(*btcutil.Block) + if !ok { + t.Fatalf("expected a block") + } + + // Return early if the block we get isn't the relevant + // block. + if !block.Hash().IsEqual(&item.BlockHash) { + return + } + + entry, err := chain.FetchUtxoEntry(item.OutPoint) + if err != nil { + t.Fatal(err) + } + + if entry != nil && !entry.IsSpent() { + t.Logf("unexpected utxo %v to exist but it's "+ + "not nil and not spent", item.OutPoint.String()) + t.Fatalf("unexpected utxo %v exists but it's "+ + "not nil and not spent\n", item.OutPoint.String()) + } + } + } + + if item.Expected { + chain.Subscribe(expectedCallBack) + } else { + chain.Subscribe(unexpectedCallBack) + } + } + // testAcceptedBlock attempts to process the block in the provided test // instance and ensures that it was accepted according to the flags // specified in the test. @@ -300,6 +364,8 @@ func TestFullBlocks(t *testing.T) { testOrphanOrRejectedBlock(item) case fullblocktests.ExpectedTip: testExpectedTip(item) + case fullblocktests.BlockDisconnectExpectUTXO: + testBlockDisconnectExpectUTXO(item) default: t.Fatalf("test #%d, item #%d is not one of "+ "the supported test instance types -- "+ diff --git a/blockchain/fullblocktests/generate.go b/blockchain/fullblocktests/generate.go index 4c551c05e0..2b499c4e8c 100644 --- a/blockchain/fullblocktests/generate.go +++ b/blockchain/fullblocktests/generate.go @@ -14,11 +14,11 @@ import ( "encoding/binary" "errors" "fmt" - "math" "runtime" "time" "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/blockchain/internal/testhelper" "github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg" @@ -43,16 +43,6 @@ const ( numLargeReorgBlocks = 1088 ) -var ( - // opTrueScript is simply a public key script that contains the OP_TRUE - // opcode. It is defined here to reduce garbage creation. - opTrueScript = []byte{txscript.OP_TRUE} - - // lowFee is a single satoshi and exists to make the test code more - // readable. - lowFee = btcutil.Amount(1) -) - // TestInstance is an interface that describes a specific test instance returned // by the tests generated in this package. It should be type asserted to one // of the concrete test instance types in order to test accordingly. @@ -150,30 +140,20 @@ type RejectedNonCanonicalBlock struct { // This implements the TestInstance interface. func (b RejectedNonCanonicalBlock) FullBlockTestInstance() {} -// spendableOut represents a transaction output that is spendable along with -// additional metadata such as the block its in and how much it pays. -type spendableOut struct { - prevOut wire.OutPoint - amount btcutil.Amount -} - -// makeSpendableOutForTx returns a spendable output for the given transaction -// and transaction output index within the transaction. -func makeSpendableOutForTx(tx *wire.MsgTx, txOutIndex uint32) spendableOut { - return spendableOut{ - prevOut: wire.OutPoint{ - Hash: tx.TxHash(), - Index: txOutIndex, - }, - amount: btcutil.Amount(tx.TxOut[txOutIndex].Value), - } +// BlockDisconnectExpectUTXO defines a test instance that tests an utxo to exist or not +// exist after a specified block has been disconnected. +type BlockDisconnectExpectUTXO struct { + Name string + Expected bool + BlockHash chainhash.Hash + OutPoint wire.OutPoint } -// makeSpendableOut returns a spendable output for the given block, transaction -// index within the block, and transaction output index within the transaction. -func makeSpendableOut(block *wire.MsgBlock, txIndex, txOutIndex uint32) spendableOut { - return makeSpendableOutForTx(block.Transactions[txIndex], txOutIndex) -} +// FullBlockTestInstance only exists to allow BlockDisconnectExpectUTXO to be treated as +// a TestInstance. +// +// This implements the TestInstance interface. +func (b BlockDisconnectExpectUTXO) FullBlockTestInstance() {} // testGenerator houses state used to easy the process of generating test blocks // that build from one another along with housing other useful things such as @@ -188,7 +168,7 @@ type testGenerator struct { blockHeights map[string]int32 // Used for tracking spendable coinbase outputs. - spendableOuts []spendableOut + spendableOuts []testhelper.SpendableOut prevCollectedHash chainhash.Hash // Common key for any tests which require signed transactions. @@ -240,62 +220,12 @@ func pushDataScript(items ...[]byte) []byte { return script } -// standardCoinbaseScript returns a standard script suitable for use as the -// signature script of the coinbase transaction of a new block. In particular, -// it starts with the block height that is required by version 2 blocks. -func standardCoinbaseScript(blockHeight int32, extraNonce uint64) ([]byte, error) { - return txscript.NewScriptBuilder().AddInt64(int64(blockHeight)). - AddInt64(int64(extraNonce)).Script() -} - -// opReturnScript returns a provably-pruneable OP_RETURN script with the -// provided data. -func opReturnScript(data []byte) []byte { - builder := txscript.NewScriptBuilder() - script, err := builder.AddOp(txscript.OP_RETURN).AddData(data).Script() - if err != nil { - panic(err) - } - return script -} - -// uniqueOpReturnScript returns a standard provably-pruneable OP_RETURN script -// with a random uint64 encoded as the data. -func uniqueOpReturnScript() []byte { - rand, err := wire.RandomUint64() - if err != nil { - panic(err) - } - - data := make([]byte, 8) - binary.LittleEndian.PutUint64(data[0:8], rand) - return opReturnScript(data) -} - // createCoinbaseTx returns a coinbase transaction paying an appropriate // subsidy based on the passed block height. The coinbase signature script // conforms to the requirements of version 2 blocks. func (g *testGenerator) createCoinbaseTx(blockHeight int32) *wire.MsgTx { - extraNonce := uint64(0) - coinbaseScript, err := standardCoinbaseScript(blockHeight, extraNonce) - if err != nil { - panic(err) - } - - tx := wire.NewMsgTx(1) - tx.AddTxIn(&wire.TxIn{ - // Coinbase transactions have no inputs, so previous outpoint is - // zero hash and max index. - PreviousOutPoint: *wire.NewOutPoint(&chainhash.Hash{}, - wire.MaxPrevOutIndex), - Sequence: wire.MaxTxInSequenceNum, - SignatureScript: coinbaseScript, - }) - tx.AddTxOut(&wire.TxOut{ - Value: blockchain.CalcBlockSubsidy(blockHeight, g.params), - PkScript: opTrueScript, - }) - return tx + return testhelper.CreateCoinbaseTx( + blockHeight, blockchain.CalcBlockSubsidy(blockHeight, g.params)) } // calcMerkleRoot creates a merkle tree from the slice of transactions and @@ -312,71 +242,6 @@ func calcMerkleRoot(txns []*wire.MsgTx) chainhash.Hash { return blockchain.CalcMerkleRoot(utilTxns, false) } -// solveBlock attempts to find a nonce which makes the passed block header hash -// to a value less than the target difficulty. When a successful solution is -// found true is returned and the nonce field of the passed header is updated -// with the solution. False is returned if no solution exists. -// -// NOTE: This function will never solve blocks with a nonce of 0. This is done -// so the 'nextBlock' function can properly detect when a nonce was modified by -// a munge function. -func solveBlock(header *wire.BlockHeader) bool { - // sbResult is used by the solver goroutines to send results. - type sbResult struct { - found bool - nonce uint32 - } - - // solver accepts a block header and a nonce range to test. It is - // intended to be run as a goroutine. - targetDifficulty := blockchain.CompactToBig(header.Bits) - quit := make(chan bool) - results := make(chan sbResult) - solver := func(hdr wire.BlockHeader, startNonce, stopNonce uint32) { - // We need to modify the nonce field of the header, so make sure - // we work with a copy of the original header. - for i := startNonce; i >= startNonce && i <= stopNonce; i++ { - select { - case <-quit: - return - default: - hdr.Nonce = i - hash := hdr.BlockHash() - if blockchain.HashToBig(&hash).Cmp( - targetDifficulty) <= 0 { - - results <- sbResult{true, i} - return - } - } - } - results <- sbResult{false, 0} - } - - startNonce := uint32(1) - stopNonce := uint32(math.MaxUint32) - numCores := uint32(runtime.NumCPU()) - noncesPerCore := (stopNonce - startNonce) / numCores - for i := uint32(0); i < numCores; i++ { - rangeStart := startNonce + (noncesPerCore * i) - rangeStop := startNonce + (noncesPerCore * (i + 1)) - 1 - if i == numCores-1 { - rangeStop = stopNonce - } - go solver(*header, rangeStart, rangeStop) - } - for i := uint32(0); i < numCores; i++ { - result := <-results - if result.found { - close(quit) - header.Nonce = result.nonce - return true - } - } - - return false -} - // additionalCoinbase returns a function that itself takes a block and // modifies it by adding the provided amount to coinbase subsidy. func additionalCoinbase(amount btcutil.Amount) func(*wire.MsgBlock) { @@ -429,33 +294,14 @@ func additionalTx(tx *wire.MsgTx) func(*wire.MsgBlock) { } } -// createSpendTx creates a transaction that spends from the provided spendable -// output and includes an additional unique OP_RETURN output to ensure the -// transaction ends up with a unique hash. The script is a simple OP_TRUE -// script which avoids the need to track addresses and signature scripts in the -// tests. -func createSpendTx(spend *spendableOut, fee btcutil.Amount) *wire.MsgTx { - spendTx := wire.NewMsgTx(1) - spendTx.AddTxIn(&wire.TxIn{ - PreviousOutPoint: spend.prevOut, - Sequence: wire.MaxTxInSequenceNum, - SignatureScript: nil, - }) - spendTx.AddTxOut(wire.NewTxOut(int64(spend.amount-fee), - opTrueScript)) - spendTx.AddTxOut(wire.NewTxOut(0, uniqueOpReturnScript())) - - return spendTx -} - // createSpendTxForTx creates a transaction that spends from the first output of // the provided transaction and includes an additional unique OP_RETURN output // to ensure the transaction ends up with a unique hash. The public key script // is a simple OP_TRUE script which avoids the need to track addresses and // signature scripts in the tests. The signature script is nil. func createSpendTxForTx(tx *wire.MsgTx, fee btcutil.Amount) *wire.MsgTx { - spend := makeSpendableOutForTx(tx, 0) - return createSpendTx(&spend, fee) + spend := testhelper.MakeSpendableOutForTx(tx, 0) + return testhelper.CreateSpendTx(&spend, fee) } // nextBlock builds a new block that extends the current tip associated with the @@ -477,7 +323,7 @@ func createSpendTxForTx(tx *wire.MsgTx, fee btcutil.Amount) *wire.MsgTx { // applied after all munge functions have been invoked: // - The merkle root will be recalculated unless it was manually changed // - The block will be solved unless the nonce was changed -func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers ...func(*wire.MsgBlock)) *wire.MsgBlock { +func (g *testGenerator) nextBlock(blockName string, spend *testhelper.SpendableOut, mungers ...func(*wire.MsgBlock)) *wire.MsgBlock { // Create coinbase transaction for the block using any additional // subsidy if specified. nextHeight := g.tipHeight + 1 @@ -495,7 +341,7 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers // add it to the list of transactions to include in the block. // The script is a simple OP_TRUE script in order to avoid the // need to track addresses and signature scripts in the tests. - txns = append(txns, createSpendTx(spend, fee)) + txns = append(txns, testhelper.CreateSpendTx(spend, fee)) } // Use a timestamp that is one second after the previous block unless @@ -532,7 +378,7 @@ func (g *testGenerator) nextBlock(blockName string, spend *spendableOut, mungers // Only solve the block if the nonce wasn't manually changed by a munge // function. - if block.Header.Nonce == curNonce && !solveBlock(&block.Header) { + if block.Header.Nonce == curNonce && !testhelper.SolveBlock(&block.Header) { panic(fmt.Sprintf("Unable to solve block at height %d", nextHeight)) } @@ -579,7 +425,7 @@ func (g *testGenerator) setTip(blockName string) { // oldestCoinbaseOuts removes the oldest coinbase output that was previously // saved to the generator and returns the set as a slice. -func (g *testGenerator) oldestCoinbaseOut() spendableOut { +func (g *testGenerator) oldestCoinbaseOut() testhelper.SpendableOut { op := g.spendableOuts[0] g.spendableOuts = g.spendableOuts[1:] return op @@ -588,12 +434,12 @@ func (g *testGenerator) oldestCoinbaseOut() spendableOut { // saveTipCoinbaseOut adds the coinbase tx output in the current tip block to // the list of spendable outputs. func (g *testGenerator) saveTipCoinbaseOut() { - g.spendableOuts = append(g.spendableOuts, makeSpendableOut(g.tip, 0, 0)) + g.spendableOuts = append(g.spendableOuts, testhelper.MakeSpendableOut(g.tip, 0, 0)) g.prevCollectedHash = g.tip.BlockHash() } // saveSpendableCoinbaseOuts adds all coinbase outputs from the last block that -// had its coinbase tx output colleted to the current tip. This is useful to +// had its coinbase tx output collected to the current tip. This is useful to // batch the collection of coinbase outputs once the tests reach a stable point // so they don't have to manually add them for the right tests which will // ultimately end up being the best chain. @@ -878,6 +724,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // // orphanedOrRejected creates and appends a single orphanOrRejectBlock // test instance for the current tip. + // + // blockDisconnectExpectUTXO creates and appends a BlockDisconnectExpectUTXO test + // instance with the passed in values. accepted := func() { tests = append(tests, []TestInstance{ acceptBlock(g.tipName, g.tip, true, false), @@ -904,6 +753,12 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { orphanOrRejectBlock(g.tipName, g.tip), }) } + blockDisconnectExpectUTXO := func(name string, expected bool, op wire.OutPoint, + hash chainhash.Hash) { + tests = append(tests, []TestInstance{ + BlockDisconnectExpectUTXO{name, expected, hash, op}, + }) + } // --------------------------------------------------------------------- // Generate enough blocks to have mature coinbase outputs to work with. @@ -923,7 +778,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { tests = append(tests, testInstances) // Collect spendable outputs. This simplifies the code below. - var outs []*spendableOut + var outs []*testhelper.SpendableOut for i := uint16(0); i < coinbaseMaturity; i++ { op := g.oldestCoinbaseOut() outs = append(outs, &op) @@ -936,7 +791,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // --------------------------------------------------------------------- // The comments below identify the structure of the chain being built. // - // The values in parenthesis repesent which outputs are being spent. + // The values in parenthesis represent which outputs are being spent. // // For example, b1(0) indicates the first collected spendable output // which, due to the code above to create the correct number of blocks, @@ -961,7 +816,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // \-> b3(1) g.setTip("b1") g.nextBlock("b3", outs[1]) - b3Tx1Out := makeSpendableOut(g.tip, 1, 0) + b3Tx1Out := testhelper.MakeSpendableOut(g.tip, 1, 0) acceptedToSideChainWithExpectedTip("b2") // Extend b3 fork to make the alternative chain longer and force reorg. @@ -1194,7 +1049,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { accepted() // --------------------------------------------------------------------- - // Multisig[Verify]/ChecksigVerifiy signature operation count tests. + // Multisig[Verify]/ChecksigVerify signature operation count tests. // --------------------------------------------------------------------- // Create block with max signature operations as OP_CHECKMULTISIG. @@ -1274,9 +1129,9 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // \-> b38(b37.tx[1]) // g.setTip("b35") - doubleSpendTx := createSpendTx(outs[11], lowFee) + doubleSpendTx := testhelper.CreateSpendTx(outs[11], testhelper.LowFee) g.nextBlock("b37", outs[11], additionalTx(doubleSpendTx)) - b37Tx1Out := makeSpendableOut(g.tip, 1, 0) + b37Tx1Out := testhelper.MakeSpendableOut(g.tip, 1, 0) rejected(blockchain.ErrMissingTxOut) g.setTip("b35") @@ -1312,7 +1167,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { txnsNeeded := (maxBlockSigOps / redeemScriptSigOps) + 1 prevTx := b.Transactions[1] for i := 0; i < txnsNeeded; i++ { - prevTx = createSpendTxForTx(prevTx, lowFee) + prevTx = createSpendTxForTx(prevTx, testhelper.LowFee) prevTx.TxOut[0].Value -= 2 prevTx.AddTxOut(wire.NewTxOut(2, p2shScript)) b.AddTransaction(prevTx) @@ -1332,8 +1187,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { for i := 0; i < txnsNeeded; i++ { // Create a signed transaction that spends from the // associated p2sh output in b39. - spend := makeSpendableOutForTx(b39.Transactions[i+2], 2) - tx := createSpendTx(&spend, lowFee) + spend := testhelper.MakeSpendableOutForTx(b39.Transactions[i+2], 2) + tx := testhelper.CreateSpendTx(&spend, testhelper.LowFee) sig, err := txscript.RawTxInSignature(tx, 0, redeemScript, txscript.SigHashAll, g.privKey) if err != nil { @@ -1349,7 +1204,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // the block one over the max allowed. fill := maxBlockSigOps - (txnsNeeded * redeemScriptSigOps) + 1 finalTx := b.Transactions[len(b.Transactions)-1] - tx := createSpendTxForTx(finalTx, lowFee) + tx := createSpendTxForTx(finalTx, testhelper.LowFee) tx.TxOut[0].PkScript = repeatOpcode(txscript.OP_CHECKSIG, fill) b.AddTransaction(tx) }) @@ -1363,8 +1218,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.nextBlock("b41", outs[12], func(b *wire.MsgBlock) { txnsNeeded := (maxBlockSigOps / redeemScriptSigOps) for i := 0; i < txnsNeeded; i++ { - spend := makeSpendableOutForTx(b39.Transactions[i+2], 2) - tx := createSpendTx(&spend, lowFee) + spend := testhelper.MakeSpendableOutForTx(b39.Transactions[i+2], 2) + tx := testhelper.CreateSpendTx(&spend, testhelper.LowFee) sig, err := txscript.RawTxInSignature(tx, 0, redeemScript, txscript.SigHashAll, g.privKey) if err != nil { @@ -1383,7 +1238,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { return } finalTx := b.Transactions[len(b.Transactions)-1] - tx := createSpendTxForTx(finalTx, lowFee) + tx := createSpendTxForTx(finalTx, testhelper.LowFee) tx.TxOut[0].PkScript = repeatOpcode(txscript.OP_CHECKSIG, fill) b.AddTransaction(tx) }) @@ -1413,7 +1268,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // ... -> b43(13) // \-> b44(14) g.nextBlock("b44", nil, func(b *wire.MsgBlock) { - nonCoinbaseTx := createSpendTx(outs[14], lowFee) + nonCoinbaseTx := testhelper.CreateSpendTx(outs[14], testhelper.LowFee) b.Transactions[0] = nonCoinbaseTx }) rejected(blockchain.ErrFirstTxNotCoinbase) @@ -1618,7 +1473,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.setTip("b55") b57 := g.nextBlock("b57", outs[16], func(b *wire.MsgBlock) { tx2 := b.Transactions[1] - tx3 := createSpendTxForTx(tx2, lowFee) + tx3 := createSpendTxForTx(tx2, testhelper.LowFee) b.AddTransaction(tx3) }) g.assertTipBlockNumTxns(3) @@ -1663,7 +1518,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // in the block. spendTx := b.Transactions[1] for i := 0; i < 4; i++ { - spendTx = createSpendTxForTx(spendTx, lowFee) + spendTx = createSpendTxForTx(spendTx, testhelper.LowFee) b.AddTransaction(spendTx) } @@ -1695,7 +1550,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // \-> b59(17) g.setTip("b57") g.nextBlock("b59", outs[17], func(b *wire.MsgBlock) { - b.Transactions[1].TxOut[0].Value = int64(outs[17].amount) + 1 + b.Transactions[1].TxOut[0].Value = int64(outs[17].Amount) + 1 }) rejected(blockchain.ErrSpendTooHigh) @@ -1800,7 +1655,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // ... b64(18) -> b65(19) g.setTip("b64") g.nextBlock("b65", outs[19], func(b *wire.MsgBlock) { - tx3 := createSpendTxForTx(b.Transactions[1], lowFee) + tx3 := createSpendTxForTx(b.Transactions[1], testhelper.LowFee) b.AddTransaction(tx3) }) accepted() @@ -1810,8 +1665,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // ... -> b65(19) // \-> b66(20) g.nextBlock("b66", nil, func(b *wire.MsgBlock) { - tx2 := createSpendTx(outs[20], lowFee) - tx3 := createSpendTxForTx(tx2, lowFee) + tx2 := testhelper.CreateSpendTx(outs[20], testhelper.LowFee) + tx3 := createSpendTxForTx(tx2, testhelper.LowFee) b.AddTransaction(tx3) b.AddTransaction(tx2) }) @@ -1825,8 +1680,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { g.setTip("b65") g.nextBlock("b67", outs[20], func(b *wire.MsgBlock) { tx2 := b.Transactions[1] - tx3 := createSpendTxForTx(tx2, lowFee) - tx4 := createSpendTxForTx(tx2, lowFee) + tx3 := createSpendTxForTx(tx2, testhelper.LowFee) + tx4 := createSpendTxForTx(tx2, testhelper.LowFee) b.AddTransaction(tx3) b.AddTransaction(tx4) }) @@ -1949,7 +1804,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { txscript.OP_ELSE, txscript.OP_TRUE, txscript.OP_ENDIF} g.nextBlock("b74", outs[23], replaceSpendScript(script), func(b *wire.MsgBlock) { tx2 := b.Transactions[1] - tx3 := createSpendTxForTx(tx2, lowFee) + tx3 := createSpendTxForTx(tx2, testhelper.LowFee) tx3.TxIn[0].SignatureScript = []byte{txscript.OP_FALSE} b.AddTransaction(tx3) }) @@ -1970,7 +1825,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { const zeroCoin = int64(0) spendTx := b.Transactions[1] for i := 0; i < numAdditionalOutputs; i++ { - spendTx.AddTxOut(wire.NewTxOut(zeroCoin, opTrueScript)) + spendTx.AddTxOut(wire.NewTxOut(zeroCoin, testhelper.OpTrueScript)) } // Add transactions spending from the outputs added above that @@ -1979,14 +1834,14 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // NOTE: The createSpendTx func adds the OP_RETURN output. zeroFee := btcutil.Amount(0) for i := uint32(0); i < numAdditionalOutputs; i++ { - spend := makeSpendableOut(b, 1, i+2) - tx := createSpendTx(&spend, zeroFee) + spend := testhelper.MakeSpendableOut(b, 1, i+2) + tx := testhelper.CreateSpendTx(&spend, zeroFee) b.AddTransaction(tx) } }) g.assertTipBlockNumTxns(6) g.assertTipBlockTxOutOpReturn(5, 1) - b75OpReturnOut := makeSpendableOut(g.tip, 5, 1) + b75OpReturnOut := testhelper.MakeSpendableOut(g.tip, 5, 1) accepted() // Reorg to a side chain that does not contain the OP_RETURNs. @@ -2019,7 +1874,7 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // An OP_RETURN output doesn't have any value and the default behavior // of nextBlock is to assign a fee of one, so increment the amount here // to effective negate that behavior. - b75OpReturnOut.amount++ + b75OpReturnOut.Amount++ g.nextBlock("b80", &b75OpReturnOut) rejected(blockchain.ErrMissingTxOut) @@ -2035,7 +1890,10 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { const zeroCoin = int64(0) spendTx := b.Transactions[1] for i := 0; i < numAdditionalOutputs; i++ { - opRetScript := uniqueOpReturnScript() + opRetScript, err := testhelper.UniqueOpReturnScript() + if err != nil { + panic(err) + } spendTx.AddTxOut(wire.NewTxOut(zeroCoin, opRetScript)) } }) @@ -2044,6 +1902,55 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { } accepted() + // Create a chain where the utxo created in b82a is spent in b83a. + // + // b81() -> b82a(28) -> b83a(b82.tx[1].out[0]) + // + g.nextBlock("b82a", outs[28]) + accepted() + + b82aTx1Out0 := testhelper.MakeSpendableOut(g.tip, 1, 0) + g.nextBlock("b83a", &b82aTx1Out0) + accepted() + + // Now we'll build a side-chain where we don't spend any of the outputs. + // + // b81() -> b82a(28) -> b83a(b82.tx[1].out[0]) + // \-> b82() -> b83() + // + g.setTip("b81") + g.nextBlock("b82", nil) + acceptedToSideChainWithExpectedTip("b83a") + + g.nextBlock("b83", nil) + acceptedToSideChainWithExpectedTip("b83a") + + // At this point b83a is still the tip. When we add block 84, the tip + // will change. Pre-load up the expected utxos test before the reorganization. + // + // We expect b82a output to now be a utxo since b83a was spending it and it was + // removed from the main chain. + blockDisconnectExpectUTXO("b82aTx1Out0", + true, b82aTx1Out0.PrevOut, g.blocksByName["b83a"].BlockHash()) + + // We expect the output from b82 to not exist once b82a itself has been removed + // from the main chain. + blockDisconnectExpectUTXO("b82aTx1Out0", + false, b82aTx1Out0.PrevOut, g.blocksByName["b82a"].BlockHash()) + + // The output that was being spent in b82a should exist after the removal of + // b82a. + blockDisconnectExpectUTXO("outs[28]", + true, outs[28].PrevOut, g.blocksByName["b82a"].BlockHash()) + + // Create block 84 and reorg out the sidechain with b83a as the tip. + // + // b81() -> b82a(28) -> b83a(b82.tx[1].out[0]) + // \-> b82() -> b83() -> b84() + // + g.nextBlock("b84", nil) + accepted() + // --------------------------------------------------------------------- // Large block re-org test. // --------------------------------------------------------------------- @@ -2054,8 +1961,8 @@ func Generate(includeLargeReorg bool) (tests [][]TestInstance, err error) { // Ensure the tip the re-org test builds on is the best chain tip. // - // ... -> b81(27) -> ... - g.setTip("b81") + // ... -> b84() -> ... + g.setTip("b84") // Collect all of the spendable coinbase outputs from the previous // collection point up to the current tip. diff --git a/blockchain/indexers/addrindex.go b/blockchain/indexers/addrindex.go index 7eaaab06b7..2a56574acd 100644 --- a/blockchain/indexers/addrindex.go +++ b/blockchain/indexers/addrindex.go @@ -36,7 +36,7 @@ const ( // consumes. It consists of the address key + 1 byte for the level. levelKeySize = addrKeySize + 1 - // levelOffset is the offset in the level key which identifes the level. + // levelOffset is the offset in the level key which identifies the level. levelOffset = levelKeySize - 1 // addrKeyTypePubKeyHash is the address type in an address key which @@ -64,7 +64,7 @@ const ( addrKeyTypeWitnessScriptHash = 3 // addrKeyTypeTaprootPubKey is the address type in an address key that - // represnts a pay-to-taproot address. We use this to denote addresses + // represents a pay-to-taproot address. We use this to denote addresses // related to the segwit v1 that are encoded in the bech32m format. addrKeyTypeTaprootPubKey = 4 @@ -158,7 +158,7 @@ func serializeAddrIndexEntry(blockID uint32, txLoc wire.TxLoc) []byte { // deserializeAddrIndexEntry decodes the passed serialized byte slice into the // provided region struct according to the format described in detail above and -// uses the passed block hash fetching function in order to conver the block ID +// uses the passed block hash fetching function in order to convert the block ID // to the associated block hash. func deserializeAddrIndexEntry(serialized []byte, region *database.BlockRegion, fetchBlockHash fetchBlockHashFunc) error { @@ -734,7 +734,7 @@ func (idx *AddrIndex) indexBlock(data writeIndexData, block *btcutil.Block, idx.indexPkScript(data, pkScript, txIdx) // With an input indexed, we'll advance the - // stxo coutner. + // stxo counter. stxoIndex++ } } diff --git a/blockchain/internal/testhelper/README.md b/blockchain/internal/testhelper/README.md new file mode 100644 index 0000000000..40b339bf79 --- /dev/null +++ b/blockchain/internal/testhelper/README.md @@ -0,0 +1,16 @@ +testhelper +========== + +[![Build Status](https://github.com/btcsuite/btcd/workflows/Build%20and%20Test/badge.svg)](https://github.com/btcsuite/btcd/actions) +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/btcsuite/btcd/blockchain/testhelper) + +Package testhelper provides functions that are used internally in the +btcd/blockchain and btcd/blockchain/fullblocktests package to test consensus +validation rules. Mainly provided to avoid dependency cycles internally among +the different packages in btcd. + +## License + +Package testhelper is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/blockchain/internal/testhelper/common.go b/blockchain/internal/testhelper/common.go new file mode 100644 index 0000000000..681097480c --- /dev/null +++ b/blockchain/internal/testhelper/common.go @@ -0,0 +1,194 @@ +package testhelper + +import ( + "encoding/binary" + "math" + "runtime" + + "github.com/btcsuite/btcd/blockchain/internal/workmath" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" +) + +var ( + // OpTrueScript is simply a public key script that contains the OP_TRUE + // opcode. It is defined here to reduce garbage creation. + OpTrueScript = []byte{txscript.OP_TRUE} + + // LowFee is a single satoshi and exists to make the test code more + // readable. + LowFee = btcutil.Amount(1) +) + +// CreateSpendTx creates a transaction that spends from the provided spendable +// output and includes an additional unique OP_RETURN output to ensure the +// transaction ends up with a unique hash. The script is a simple OP_TRUE +// script which avoids the need to track addresses and signature scripts in the +// tests. +func CreateSpendTx(spend *SpendableOut, fee btcutil.Amount) *wire.MsgTx { + spendTx := wire.NewMsgTx(1) + spendTx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: spend.PrevOut, + Sequence: wire.MaxTxInSequenceNum, + SignatureScript: nil, + }) + spendTx.AddTxOut(wire.NewTxOut(int64(spend.Amount-fee), + OpTrueScript)) + opRetScript, err := UniqueOpReturnScript() + if err != nil { + panic(err) + } + spendTx.AddTxOut(wire.NewTxOut(0, opRetScript)) + + return spendTx +} + +// CreateCoinbaseTx returns a coinbase transaction paying an appropriate +// subsidy based on the passed block height and the block subsidy. The +// coinbase signature script conforms to the requirements of version 2 blocks. +func CreateCoinbaseTx(blockHeight int32, blockSubsidy int64) *wire.MsgTx { + extraNonce := uint64(0) + coinbaseScript, err := StandardCoinbaseScript(blockHeight, extraNonce) + if err != nil { + panic(err) + } + + tx := wire.NewMsgTx(1) + tx.AddTxIn(&wire.TxIn{ + // Coinbase transactions have no inputs, so previous outpoint is + // zero hash and max index. + PreviousOutPoint: *wire.NewOutPoint(&chainhash.Hash{}, + wire.MaxPrevOutIndex), + Sequence: wire.MaxTxInSequenceNum, + SignatureScript: coinbaseScript, + }) + tx.AddTxOut(&wire.TxOut{ + Value: blockSubsidy, + PkScript: OpTrueScript, + }) + return tx +} + +// StandardCoinbaseScript returns a standard script suitable for use as the +// signature script of the coinbase transaction of a new block. In particular, +// it starts with the block height that is required by version 2 blocks. +func StandardCoinbaseScript(blockHeight int32, extraNonce uint64) ([]byte, error) { + return txscript.NewScriptBuilder().AddInt64(int64(blockHeight)). + AddInt64(int64(extraNonce)).Script() +} + +// OpReturnScript returns a provably-pruneable OP_RETURN script with the +// provided data. +func OpReturnScript(data []byte) ([]byte, error) { + builder := txscript.NewScriptBuilder() + script, err := builder.AddOp(txscript.OP_RETURN).AddData(data).Script() + if err != nil { + return nil, err + } + return script, nil +} + +// UniqueOpReturnScript returns a standard provably-pruneable OP_RETURN script +// with a random uint64 encoded as the data. +func UniqueOpReturnScript() ([]byte, error) { + rand, err := wire.RandomUint64() + if err != nil { + return nil, err + } + + data := make([]byte, 8) + binary.LittleEndian.PutUint64(data[0:8], rand) + return OpReturnScript(data) +} + +// SpendableOut represents a transaction output that is spendable along with +// additional metadata such as the block its in and how much it pays. +type SpendableOut struct { + PrevOut wire.OutPoint + Amount btcutil.Amount +} + +// MakeSpendableOutForTx returns a spendable output for the given transaction +// and transaction output index within the transaction. +func MakeSpendableOutForTx(tx *wire.MsgTx, txOutIndex uint32) SpendableOut { + return SpendableOut{ + PrevOut: wire.OutPoint{ + Hash: tx.TxHash(), + Index: txOutIndex, + }, + Amount: btcutil.Amount(tx.TxOut[txOutIndex].Value), + } +} + +// MakeSpendableOut returns a spendable output for the given block, transaction +// index within the block, and transaction output index within the transaction. +func MakeSpendableOut(block *wire.MsgBlock, txIndex, txOutIndex uint32) SpendableOut { + return MakeSpendableOutForTx(block.Transactions[txIndex], txOutIndex) +} + +// SolveBlock attempts to find a nonce which makes the passed block header hash +// to a value less than the target difficulty. When a successful solution is +// found true is returned and the nonce field of the passed header is updated +// with the solution. False is returned if no solution exists. +// +// NOTE: This function will never solve blocks with a nonce of 0. This is done +// so the 'nextBlock' function can properly detect when a nonce was modified by +// a munge function. +func SolveBlock(header *wire.BlockHeader) bool { + // sbResult is used by the solver goroutines to send results. + type sbResult struct { + found bool + nonce uint32 + } + + // solver accepts a block header and a nonce range to test. It is + // intended to be run as a goroutine. + targetDifficulty := workmath.CompactToBig(header.Bits) + quit := make(chan bool) + results := make(chan sbResult) + solver := func(hdr wire.BlockHeader, startNonce, stopNonce uint32) { + // We need to modify the nonce field of the header, so make sure + // we work with a copy of the original header. + for i := startNonce; i >= startNonce && i <= stopNonce; i++ { + select { + case <-quit: + return + default: + hdr.Nonce = i + hash := hdr.BlockHash() + if workmath.HashToBig(&hash).Cmp( + targetDifficulty) <= 0 { + + results <- sbResult{true, i} + return + } + } + } + results <- sbResult{false, 0} + } + + startNonce := uint32(1) + stopNonce := uint32(math.MaxUint32) + numCores := uint32(runtime.NumCPU()) + noncesPerCore := (stopNonce - startNonce) / numCores + for i := uint32(0); i < numCores; i++ { + rangeStart := startNonce + (noncesPerCore * i) + rangeStop := startNonce + (noncesPerCore * (i + 1)) - 1 + if i == numCores-1 { + rangeStop = stopNonce + } + go solver(*header, rangeStart, rangeStop) + } + for i := uint32(0); i < numCores; i++ { + result := <-results + if result.found { + close(quit) + header.Nonce = result.nonce + return true + } + } + + return false +} diff --git a/blockchain/internal/workmath/README.md b/blockchain/internal/workmath/README.md new file mode 100644 index 0000000000..879b2dcfd7 --- /dev/null +++ b/blockchain/internal/workmath/README.md @@ -0,0 +1,15 @@ +workmath +========== + +[![Build Status](https://github.com/btcsuite/btcd/workflows/Build%20and%20Test/badge.svg)](https://github.com/btcsuite/btcd/actions) +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/btcsuite/btcd/workmath) + +Package workmath provides utility functions that are related with calculating +the work from difficulty bits. This package was introduced to avoid import +cycles in btcd. + +## License + +Package workmath is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/blockchain/internal/workmath/difficulty.go b/blockchain/internal/workmath/difficulty.go new file mode 100644 index 0000000000..8ff7adad1c --- /dev/null +++ b/blockchain/internal/workmath/difficulty.go @@ -0,0 +1,153 @@ +// Copyright (c) 2013-2017 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package workmath + +import ( + "math/big" + + "github.com/btcsuite/btcd/chaincfg/chainhash" +) + +var ( + // bigOne is 1 represented as a big.Int. It is defined here to avoid + // the overhead of creating it multiple times. + bigOne = big.NewInt(1) + + // oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid + // the overhead of creating it multiple times. + oneLsh256 = new(big.Int).Lsh(bigOne, 256) +) + +// HashToBig converts a chainhash.Hash into a big.Int that can be used to +// perform math comparisons. +func HashToBig(hash *chainhash.Hash) *big.Int { + // A Hash is in little-endian, but the big package wants the bytes in + // big-endian, so reverse them. + buf := *hash + blen := len(buf) + for i := 0; i < blen/2; i++ { + buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i] + } + + return new(big.Int).SetBytes(buf[:]) +} + +// CompactToBig converts a compact representation of a whole number N to an +// unsigned 32-bit number. The representation is similar to IEEE754 floating +// point numbers. +// +// Like IEEE754 floating point, there are three basic components: the sign, +// the exponent, and the mantissa. They are broken out as follows: +// +// - the most significant 8 bits represent the unsigned base 256 exponent +// - bit 23 (the 24th bit) represents the sign bit +// - the least significant 23 bits represent the mantissa +// +// ------------------------------------------------- +// | Exponent | Sign | Mantissa | +// ------------------------------------------------- +// | 8 bits [31-24] | 1 bit [23] | 23 bits [22-00] | +// ------------------------------------------------- +// +// The formula to calculate N is: +// +// N = (-1^sign) * mantissa * 256^(exponent-3) +// +// This compact form is only used in bitcoin to encode unsigned 256-bit numbers +// which represent difficulty targets, thus there really is not a need for a +// sign bit, but it is implemented here to stay consistent with bitcoind. +func CompactToBig(compact uint32) *big.Int { + // Extract the mantissa, sign bit, and exponent. + mantissa := compact & 0x007fffff + isNegative := compact&0x00800000 != 0 + exponent := uint(compact >> 24) + + // Since the base for the exponent is 256, the exponent can be treated + // as the number of bytes to represent the full 256-bit number. So, + // treat the exponent as the number of bytes and shift the mantissa + // right or left accordingly. This is equivalent to: + // N = mantissa * 256^(exponent-3) + var bn *big.Int + if exponent <= 3 { + mantissa >>= 8 * (3 - exponent) + bn = big.NewInt(int64(mantissa)) + } else { + bn = big.NewInt(int64(mantissa)) + bn.Lsh(bn, 8*(exponent-3)) + } + + // Make it negative if the sign bit is set. + if isNegative { + bn = bn.Neg(bn) + } + + return bn +} + +// BigToCompact converts a whole number N to a compact representation using +// an unsigned 32-bit number. The compact representation only provides 23 bits +// of precision, so values larger than (2^23 - 1) only encode the most +// significant digits of the number. See CompactToBig for details. +func BigToCompact(n *big.Int) uint32 { + // No need to do any work if it's zero. + if n.Sign() == 0 { + return 0 + } + + // Since the base for the exponent is 256, the exponent can be treated + // as the number of bytes. So, shift the number right or left + // accordingly. This is equivalent to: + // mantissa = mantissa / 256^(exponent-3) + var mantissa uint32 + exponent := uint(len(n.Bytes())) + if exponent <= 3 { + mantissa = uint32(n.Bits()[0]) + mantissa <<= 8 * (3 - exponent) + } else { + // Use a copy to avoid modifying the caller's original number. + tn := new(big.Int).Set(n) + mantissa = uint32(tn.Rsh(tn, 8*(exponent-3)).Bits()[0]) + } + + // When the mantissa already has the sign bit set, the number is too + // large to fit into the available 23-bits, so divide the number by 256 + // and increment the exponent accordingly. + if mantissa&0x00800000 != 0 { + mantissa >>= 8 + exponent++ + } + + // Pack the exponent, sign bit, and mantissa into an unsigned 32-bit + // int and return it. + compact := uint32(exponent<<24) | mantissa + if n.Sign() < 0 { + compact |= 0x00800000 + } + return compact +} + +// CalcWork calculates a work value from difficulty bits. Bitcoin increases +// the difficulty for generating a block by decreasing the value which the +// generated hash must be less than. This difficulty target is stored in each +// block header using a compact representation as described in the documentation +// for CompactToBig. The main chain is selected by choosing the chain that has +// the most proof of work (highest difficulty). Since a lower target difficulty +// value equates to higher actual difficulty, the work value which will be +// accumulated must be the inverse of the difficulty. Also, in order to avoid +// potential division by zero and really small floating point numbers, the +// result adds 1 to the denominator and multiplies the numerator by 2^256. +func CalcWork(bits uint32) *big.Int { + // Return a work value of zero if the passed difficulty bits represent + // a negative number. Note this should not happen in practice with valid + // blocks, but an invalid block could trigger it. + difficultyNum := CompactToBig(bits) + if difficultyNum.Sign() <= 0 { + return big.NewInt(0) + } + + // (1 << 256) / (difficultyNum + 1) + denominator := new(big.Int).Add(difficultyNum, bigOne) + return new(big.Int).Div(oneLsh256, denominator) +} diff --git a/blockchain/difficulty_test.go b/blockchain/internal/workmath/difficulty_test.go similarity index 95% rename from blockchain/difficulty_test.go rename to blockchain/internal/workmath/difficulty_test.go index 6fed37f136..bed4d1f13f 100644 --- a/blockchain/difficulty_test.go +++ b/blockchain/internal/workmath/difficulty_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. -package blockchain +package workmath import ( "math/big" @@ -32,7 +32,7 @@ func TestBigToCompact(t *testing.T) { } // TestCompactToBig ensures CompactToBig converts numbers using the compact -// representation to the expected big intergers. +// representation to the expected big integers. func TestCompactToBig(t *testing.T) { tests := []struct { in uint32 diff --git a/blockchain/merkle.go b/blockchain/merkle.go index b89b518505..086c3643f6 100644 --- a/blockchain/merkle.go +++ b/blockchain/merkle.go @@ -146,7 +146,7 @@ func BuildMerkleTreeStore(transactions []*btcutil.Tx, witness bool) []*chainhash merkles[offset] = &newHash // The normal case sets the parent node to the double sha256 - // of the concatentation of the left and right children. + // of the concatenation of the left and right children. default: newHash := HashMerkleBranches(merkles[i], merkles[i+1]) merkles[offset] = &newHash diff --git a/blockchain/sizehelper.go b/blockchain/sizehelper.go index 4904a8e4c3..8330549dd6 100644 --- a/blockchain/sizehelper.go +++ b/blockchain/sizehelper.go @@ -39,7 +39,7 @@ const ( avgEntrySize = baseEntrySize + (pubKeyHashLen + 7) ) -// The code here is shamelessely taken from the go runtime package. All the relevant +// The code here is shamelessly taken from the go runtime package. All the relevant // code and variables are copied to here. These values are only correct for a 64 bit // system. diff --git a/blockchain/timesorter.go b/blockchain/timesorter.go index d0288e1d30..4a5498b258 100644 --- a/blockchain/timesorter.go +++ b/blockchain/timesorter.go @@ -20,7 +20,7 @@ func (s timeSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -// Less returns whether the timstamp with index i should sort before the +// Less returns whether the timestamp with index i should sort before the // timestamp with index j. It is part of the sort.Interface implementation. func (s timeSorter) Less(i, j int) bool { return s[i] < s[j] diff --git a/blockchain/upgrade_test.go b/blockchain/upgrade_test.go index 97e7f55c35..9a060b3e8e 100644 --- a/blockchain/upgrade_test.go +++ b/blockchain/upgrade_test.go @@ -9,7 +9,7 @@ import ( "testing" ) -// TestDeserializeUtxoEntryV0 ensures deserializing unspent trasaction output +// TestDeserializeUtxoEntryV0 ensures deserializing unspent transaction output // entries from the legacy version 0 format works as expected. func TestDeserializeUtxoEntryV0(t *testing.T) { tests := []struct { diff --git a/blockchain/utxocache.go b/blockchain/utxocache.go index af7a3b7b6f..550d8c5602 100644 --- a/blockchain/utxocache.go +++ b/blockchain/utxocache.go @@ -99,7 +99,8 @@ func (ms *mapSlice) put(op wire.OutPoint, entry *UtxoEntry, totalEntryMemory uin ms.mtx.Lock() defer ms.mtx.Unlock() - for i, maxNum := range ms.maxEntries { + // Look for the key in the maps. + for i := range ms.maxEntries { m := ms.maps[i] _, found := m[op] if found { @@ -107,6 +108,10 @@ func (ms *mapSlice) put(op wire.OutPoint, entry *UtxoEntry, totalEntryMemory uin m[op] = entry return // Return as we were successful in adding the entry. } + } + + for i, maxNum := range ms.maxEntries { + m := ms.maps[i] if len(m) >= maxNum { // Don't try to insert if the map already at max since // that'll force the map to allocate double the memory it's @@ -177,7 +182,7 @@ const ( // utxoFlushPeriodicInterval is the interval at which a flush is performed // when the flush mode FlushPeriodic is used. This is used when the initial // block download is complete and it's useful to flush periodically in case - // of unforseen shutdowns. + // of unforeseen shutdowns. utxoFlushPeriodicInterval = time.Minute * 5 ) @@ -229,7 +234,7 @@ func newUtxoCache(db database.DB, maxTotalMemoryUsage uint64) *utxoCache { numMaxElements := calculateMinEntries(int(maxTotalMemoryUsage), bucketSize+avgEntrySize) numMaxElements -= 1 - log.Infof("Pre-alloacting for %d MiB: ", maxTotalMemoryUsage/(1024*1024)+1) + log.Infof("Pre-alloacting for %d MiB", maxTotalMemoryUsage/(1024*1024)+1) m := make(map[wire.OutPoint]*UtxoEntry, numMaxElements) @@ -617,6 +622,7 @@ func (b *BlockChain) InitConsistentState(tip *blockNode, interrupt <-chan struct // Set the last flush hash as it's the default value of 0s. s.lastFlushHash = tip.hash + s.lastFlushTime = time.Now() return err } @@ -634,6 +640,10 @@ func (b *BlockChain) InitConsistentState(tip *blockNode, interrupt <-chan struct // it to the tip since we checked it's consistent. s.lastFlushHash = tip.hash + // Set the last flush time as now since we know the state is consistent + // at this time. + s.lastFlushTime = time.Now() + return nil } @@ -721,22 +731,35 @@ func (b *BlockChain) InitConsistentState(tip *blockNode, interrupt <-chan struct // Example: if the last flush hash was at height 100 and one of the deleted blocks was at // height 98, this function will return true. func (b *BlockChain) flushNeededAfterPrune(deletedBlockHashes []chainhash.Hash) (bool, error) { - lastFlushHeight, err := b.BlockHeightByHash(&b.utxoCache.lastFlushHash) - if err != nil { - return false, err + node := b.index.LookupNode(&b.utxoCache.lastFlushHash) + if node == nil { + // If we couldn't find the node where we last flushed at, have the utxo cache + // flush to be safe and that will set the last flush hash again. + // + // This realistically should never happen as nodes are never deleted from + // the block index. This happening likely means that there's a hardware + // error which is something we can't recover from. The best that we can + // do here is to just force a flush and hope that the newly set + // lastFlushHash doesn't error. + return true, nil } + lastFlushHeight := node.Height() + // Loop through all the block hashes and find out what the highest block height // among the deleted hashes is. highestDeletedHeight := int32(-1) for _, deletedBlockHash := range deletedBlockHashes { - height, err := b.BlockHeightByHash(&deletedBlockHash) - if err != nil { - return false, err + node := b.index.LookupNode(&deletedBlockHash) + if node == nil { + // If we couldn't find this node, just skip it and try the next + // deleted hash. This might be a corruption in the database + // but there's nothing we can do here to address it except for + // moving onto the next block. + continue } - - if height > highestDeletedHeight { - highestDeletedHeight = height + if node.height > highestDeletedHeight { + highestDeletedHeight = node.height } } diff --git a/blockchain/utxocache_test.go b/blockchain/utxocache_test.go index 20b2a5b34a..0f410cc99e 100644 --- a/blockchain/utxocache_test.go +++ b/blockchain/utxocache_test.go @@ -69,6 +69,26 @@ func TestMapSlice(t *testing.T) { t.Fatalf("expected len of %d, got %d", len(m), ms.length()) } + // Delete the first element in the first map. + ms.delete(test.keys[0]) + delete(m, test.keys[0]) + + // Try to insert the last element in the mapslice again. + ms.put(test.keys[len(test.keys)-1], &UtxoEntry{}, 0) + m[test.keys[len(test.keys)-1]] = &UtxoEntry{} + + // Check that the duplicate didn't make it in. + if len(m) != ms.length() { + t.Fatalf("expected len of %d, got %d", len(m), ms.length()) + } + + ms.put(test.keys[0], &UtxoEntry{}, 0) + m[test.keys[0]] = &UtxoEntry{} + + if len(m) != ms.length() { + t.Fatalf("expected len of %d, got %d", len(m), ms.length()) + } + for _, key := range test.keys { expected, found := m[key] if !found { @@ -260,7 +280,7 @@ func TestUtxoCacheEntrySize(t *testing.T) { } return blocks }(), - // Multipled by 6 since we'll have 6 entries left. + // Multiplied by 6 since we'll have 6 entries left. expectedSize: (pubKeyHashLen + baseEntrySize) * 6, }, { @@ -425,7 +445,7 @@ func TestUtxoCacheFlush(t *testing.T) { t.Fatalf("Unexpected nil entry found for %v", outpoint) } if !entry.isModified() { - t.Fatal("Entry should be marked mofified") + t.Fatal("Entry should be marked modified") } if !entry.isFresh() { t.Fatal("Entry should be marked fresh") @@ -729,18 +749,33 @@ func TestFlushOnPrune(t *testing.T) { } syncBlocks := func() { + // Modify block 1 to be a different hash. This is to artificially + // create a stale branch in the chain. + staleMsgBlock := blocks[1].MsgBlock().Copy() + staleMsgBlock.Header.Nonce = 0 + staleBlock := btcutil.NewBlock(staleMsgBlock) + + // Add the stale block here to create a chain view like so. The + // block will be the main chain at first but become stale as we + // keep adding blocks. BFNoPoWCheck is given as the pow check will + // fail. + // + // (genesis block) -> 1 -> 2 -> 3 -> ... + // \-> 1a + _, _, err = chain.ProcessBlock(staleBlock, BFNoPoWCheck) + if err != nil { + t.Fatal(err) + } + for i, block := range blocks { if i == 0 { // Skip the genesis block. continue } - isMainChain, _, err := chain.ProcessBlock(block, BFNone) + _, _, err = chain.ProcessBlock(block, BFNone) if err != nil { - t.Fatal(err) - } - - if !isMainChain { - t.Fatalf("expected block %s to be on the main chain", block.Hash()) + t.Fatalf("Failed to process block %v(%v). %v", + block.Hash().String(), block.Height(), err) } } } @@ -749,36 +784,40 @@ func TestFlushOnPrune(t *testing.T) { ffldb.TstRunWithMaxBlockFileSize(chain.db, maxBlockFileSize, syncBlocks) // Function that errors out if the block that should exist doesn't exist. - shouldExist := func(dbTx database.Tx, blockHash *chainhash.Hash) { + shouldExist := func(dbTx database.Tx, blockHash *chainhash.Hash) error { bytes, err := dbTx.FetchBlock(blockHash) if err != nil { - t.Fatal(err) + return err } block, err := btcutil.NewBlockFromBytes(bytes) if err != nil { - t.Fatalf("didn't find block %v. %v", blockHash, err) + return fmt.Errorf("didn't find block %v. %v", blockHash, err) } if !block.Hash().IsEqual(blockHash) { - t.Fatalf("expected to find block %v but got %v", + return fmt.Errorf("expected to find block %v but got %v", blockHash, block.Hash()) } + + return nil } // Function that errors out if the block that shouldn't exist exists. - shouldNotExist := func(dbTx database.Tx, blockHash *chainhash.Hash) { + shouldNotExist := func(dbTx database.Tx, blockHash *chainhash.Hash) error { bytes, err := dbTx.FetchBlock(chaincfg.MainNetParams.GenesisHash) if err == nil { - t.Fatalf("expected block %s to be pruned", blockHash) + return fmt.Errorf("expected block %s to be pruned", blockHash.String()) } if len(bytes) != 0 { - t.Fatalf("expected block %s to be pruned but got %v", + return fmt.Errorf("expected block %s to be pruned but got %v", blockHash, bytes) } + + return nil } // The below code checks that the correct blocks were pruned. - chain.db.View(func(dbTx database.Tx) error { + err = chain.db.View(func(dbTx database.Tx) error { exist := false for _, block := range blocks { // Blocks up to the last flush hash should not exist. @@ -789,15 +828,23 @@ func TestFlushOnPrune(t *testing.T) { } if exist { - shouldExist(dbTx, block.Hash()) + err = shouldExist(dbTx, block.Hash()) + if err != nil { + return err + } } else { - shouldNotExist(dbTx, block.Hash()) + err = shouldNotExist(dbTx, block.Hash()) + if err != nil { + return err + } } - } return nil }) + if err != nil { + t.Fatal(err) + } } func TestInitConsistentState(t *testing.T) { diff --git a/blockchain/utxoviewpoint.go b/blockchain/utxoviewpoint.go index fdd165c095..f62f4b915f 100644 --- a/blockchain/utxoviewpoint.go +++ b/blockchain/utxoviewpoint.go @@ -163,13 +163,13 @@ type UtxoViewpoint struct { } // BestHash returns the hash of the best block in the chain the view currently -// respresents. +// represents. func (view *UtxoViewpoint) BestHash() *chainhash.Hash { return &view.bestHash } // SetBestHash sets the hash of the best block in the chain the view currently -// respresents. +// represents. func (view *UtxoViewpoint) SetBestHash(hash *chainhash.Hash) { view.bestHash = *hash } @@ -519,41 +519,6 @@ func (view *UtxoViewpoint) commit() { } } -// fetchUtxosMain fetches unspent transaction output data about the provided -// set of outpoints from the point of view of the end of the main chain at the -// time of the call. -// -// Upon completion of this function, the view will contain an entry for each -// requested outpoint. Spent outputs, or those which otherwise don't exist, -// will result in a nil entry in the view. -func (view *UtxoViewpoint) fetchUtxosMain(db database.DB, outpoints []wire.OutPoint) error { - // Nothing to do if there are no requested outputs. - if len(outpoints) == 0 { - return nil - } - - // Load the requested set of unspent transaction outputs from the point - // of view of the end of the main chain. - // - // NOTE: Missing entries are not considered an error here and instead - // will result in nil entries in the view. This is intentionally done - // so other code can use the presence of an entry in the store as a way - // to unnecessarily avoid attempting to reload it from the database. - return db.View(func(dbTx database.Tx) error { - utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) - for i := range outpoints { - entry, err := dbFetchUtxoEntry(dbTx, utxoBucket, outpoints[i]) - if err != nil { - return err - } - - view.entries[outpoints[i]] = entry - } - - return nil - }) -} - // fetchUtxosFromCache fetches unspent transaction output data about the provided // set of outpoints from the point of view of the end of the main chain at the // time of the call. It attempts to fetch them from the cache and whatever entries @@ -666,15 +631,11 @@ func (view *UtxoViewpoint) findInputsToFetch(block *btcutil.Block) []wire.OutPoi // fetchInputUtxos loads the unspent transaction outputs for the inputs // referenced by the transactions in the given block into the view from the -// database or the cache as needed. In particular, referenced entries that -// are earlier in the block are added to the view and entries that are already -// in the view are not modified. -func (view *UtxoViewpoint) fetchInputUtxos(db database.DB, cache *utxoCache, block *btcutil.Block) error { - if cache != nil { - return view.fetchUtxosFromCache(cache, view.findInputsToFetch(block)) - } - // Request the input utxos from the cache. - return view.fetchUtxosMain(db, view.findInputsToFetch(block)) +// cache as needed. In particular, referenced entries that are earlier in +// the block are added to the view and entries that are already in the view +// are not modified. +func (view *UtxoViewpoint) fetchInputUtxos(cache *utxoCache, block *btcutil.Block) error { + return view.fetchUtxosFromCache(cache, view.findInputsToFetch(block)) } // NewUtxoViewpoint returns a new empty unspent transaction output view. diff --git a/blockchain/validate.go b/blockchain/validate.go index 02d36134b1..5e24405ef9 100644 --- a/blockchain/validate.go +++ b/blockchain/validate.go @@ -243,9 +243,9 @@ func CheckTransactionSanity(tx *btcutil.Tx) error { return ruleError(ErrBadTxOutValue, str) } if satoshi > btcutil.MaxSatoshi { - str := fmt.Sprintf("transaction output value of %v is "+ - "higher than max allowed value of %v", satoshi, - btcutil.MaxSatoshi) + str := fmt.Sprintf("transaction output value is "+ + "higher than max allowed value: %v > %v ", + satoshi, btcutil.MaxSatoshi) return ruleError(ErrBadTxOutValue, str) } @@ -761,7 +761,7 @@ func CheckBlockHeaderContext(header *wire.BlockHeader, prevNode HeaderCtx, return nil } -// checkBlockContext peforms several validation checks on the block which depend +// checkBlockContext performs several validation checks on the block which depend // on its position within the block chain. // // The flags modify the behavior of this function as follows: @@ -879,7 +879,7 @@ func (b *BlockChain) checkBlockContext(block *btcutil.Block, prevNode *blockNode // // This function MUST be called with the chain state lock held (for reads). func (b *BlockChain) checkBIP0030(node *blockNode, block *btcutil.Block, view *UtxoViewpoint) error { - // Fetch utxos for all of the transaction ouputs in this block. + // Fetch utxos for all of the transaction outputs in this block. // Typically, there will not be any utxos for any of the outputs. fetch := make([]wire.OutPoint, 0, len(block.Transactions())) for _, tx := range block.Transactions() { @@ -968,8 +968,8 @@ func CheckTransactionInputs(tx *btcutil.Tx, txHeight int32, utxoView *UtxoViewpo return 0, ruleError(ErrBadTxOutValue, str) } if originTxSatoshi > btcutil.MaxSatoshi { - str := fmt.Sprintf("transaction output value of %v is "+ - "higher than max allowed value of %v", + str := fmt.Sprintf("transaction output value is "+ + "higher than max allowed value: %v > %v ", btcutil.Amount(originTxSatoshi), btcutil.MaxSatoshi) return 0, ruleError(ErrBadTxOutValue, str) @@ -1084,7 +1084,7 @@ func (b *BlockChain) checkConnectBlock(node *blockNode, block *btcutil.Block, vi // // These utxo entries are needed for verification of things such as // transaction inputs, counting pay-to-script-hashes, and scripts. - err := view.fetchInputUtxos(nil, b.utxoCache, block) + err := view.fetchInputUtxos(b.utxoCache, block) if err != nil { return err } diff --git a/btcec/README.md b/btcec/README.md index cbf63dd045..533917736e 100644 --- a/btcec/README.md +++ b/btcec/README.md @@ -10,7 +10,7 @@ Bitcoin (secp256k1 only for now). It is designed so that it may be used with the standard crypto/ecdsa packages provided with go. A comprehensive suite of test is provided to ensure proper functionality. Package btcec was originally based on work from ThePiachu which is licensed under the same terms as Go, but it has -signficantly diverged since then. The btcsuite developers original is licensed +significantly diverged since then. The btcsuite developers original is licensed under the liberal ISC license. Although this package was primarily written for btcd, it has intentionally been diff --git a/btcec/ecdsa/signature.go b/btcec/ecdsa/signature.go index 092e4ceb1c..11c6267caf 100644 --- a/btcec/ecdsa/signature.go +++ b/btcec/ecdsa/signature.go @@ -37,10 +37,20 @@ var ( oneInitializer = []byte{0x01} ) -// MinSigLen is the minimum length of a DER encoded signature and is when both R -// and S are 1 byte each. -// 0x30 + <1-byte> + 0x02 + 0x01 + + 0x2 + 0x01 + -const MinSigLen = 8 +const ( + // MinSigLen is the minimum length of a DER encoded signature and is when both R + // and S are 1 byte each. + // 0x30 + <1-byte> + 0x02 + 0x01 + + 0x2 + 0x01 + + MinSigLen = 8 + + // MaxSigLen is the maximum length of a DER encoded signature and is + // when both R and S are 33 bytes each. It is 33 bytes because a + // 256-bit integer requires 32 bytes and an additional leading null byte + // might be required if the high bit is set in the value. + // + // 0x30 + <1-byte> + 0x02 + 0x21 + <33 bytes> + 0x2 + 0x21 + <33 bytes> + MaxSigLen = 72 +) // canonicalPadding checks whether a big-endian encoded integer could // possibly be misinterpreted as a negative number (even though OpenSSL @@ -68,9 +78,15 @@ func parseSig(sigStr []byte, der bool) (*Signature, error) { // 0x30 <0x02> 0x2 // . - if len(sigStr) < MinSigLen { + // The signature must adhere to the minimum and maximum allowed length. + totalSigLen := len(sigStr) + if totalSigLen < MinSigLen { return nil, errors.New("malformed signature: too short") } + if der && totalSigLen > MaxSigLen { + return nil, errors.New("malformed signature: too long") + } + // 0x30 index := 0 if sigStr[index] != 0x30 { @@ -196,7 +212,7 @@ func parseSig(sigStr []byte, der bool) (*Signature, error) { } // ParseSignature parses a signature in BER format for the curve type `curve' -// into a Signature type, perfoming some basic sanity checks. If parsing +// into a Signature type, performing some basic sanity checks. If parsing // according to the more strict DER format is needed, use ParseDERSignature. func ParseSignature(sigStr []byte) (*Signature, error) { return parseSig(sigStr, false) diff --git a/btcec/ecdsa/signature_test.go b/btcec/ecdsa/signature_test.go index d2eebdc788..f36e15db89 100644 --- a/btcec/ecdsa/signature_test.go +++ b/btcec/ecdsa/signature_test.go @@ -333,6 +333,21 @@ var signatureTests = []signatureTest{ der: false, isValid: false, }, + { + name: "Long signature.", + sig: []byte{0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69, + 0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, 0xa1, + 0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, 0xe9, 0xd6, + 0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, 0x5f, 0xb8, 0xcd, + 0x41, 0x02, 0x20, 0x18, 0x15, 0x22, 0xec, 0x8e, 0xca, + 0x07, 0xde, 0x48, 0x60, 0xa4, 0xac, 0xdd, 0x12, 0x90, + 0x9d, 0x83, 0x1c, 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22, + 0x08, 0x22, 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09, 0x91, + 0x17, 0x90, 0xda, 0x42, 0xca, 0xaf, 0x19, 0x7d, 0xb4, + }, + der: true, + isValid: false, + }, } func TestSignatures(t *testing.T) { diff --git a/btcec/field_test.go b/btcec/field_test.go index 6ade97a1eb..0844dc1d67 100644 --- a/btcec/field_test.go +++ b/btcec/field_test.go @@ -952,7 +952,7 @@ func TestFieldSquareRoot(t *testing.T) { input := setHex(test.in).Normalize() want := setHex(test.want).Normalize() - // Calculate the square root and enusre the validity flag matches the + // Calculate the square root and ensure the validity flag matches the // expected value. var result FieldVal isValid := result.SquareRootVal(input) diff --git a/btcec/pubkey.go b/btcec/pubkey.go index c4b0680a7a..2c3a5ccbef 100644 --- a/btcec/pubkey.go +++ b/btcec/pubkey.go @@ -10,6 +10,8 @@ import ( // These constants define the lengths of serialized public keys. const ( + // PubKeyBytesLenCompressed is the bytes length of a serialized compressed + // public key. PubKeyBytesLenCompressed = 33 ) @@ -49,3 +51,38 @@ type PublicKey = secp.PublicKey func NewPublicKey(x, y *FieldVal) *PublicKey { return secp.NewPublicKey(x, y) } + +// SerializedKey is a type for representing a public key in its compressed +// serialized form. +// +// NOTE: This type is useful when using public keys as keys in maps. +type SerializedKey [PubKeyBytesLenCompressed]byte + +// ToPubKey returns the public key parsed from the serialized key. +func (s SerializedKey) ToPubKey() (*PublicKey, error) { + return ParsePubKey(s[:]) +} + +// SchnorrSerialized returns the Schnorr serialized, x-only 32-byte +// representation of the serialized key. +func (s SerializedKey) SchnorrSerialized() [32]byte { + var serializedSchnorr [32]byte + copy(serializedSchnorr[:], s[1:]) + return serializedSchnorr +} + +// CopyBytes returns a copy of the underlying array as a byte slice. +func (s SerializedKey) CopyBytes() []byte { + c := make([]byte, PubKeyBytesLenCompressed) + copy(c, s[:]) + + return c +} + +// ToSerialized serializes a public key into its compressed form. +func ToSerialized(pubKey *PublicKey) SerializedKey { + var serialized SerializedKey + copy(serialized[:], pubKey.SerializeCompressed()) + + return serialized +} diff --git a/btcec/schnorr/musig2/context.go b/btcec/schnorr/musig2/context.go index 8f4521502a..8e6b7154d3 100644 --- a/btcec/schnorr/musig2/context.go +++ b/btcec/schnorr/musig2/context.go @@ -513,7 +513,7 @@ func (s *Session) PublicNonce() [PubNonceSize]byte { } // NumRegisteredNonces returns the total number of nonces that have been -// regsitered so far. +// registered so far. func (s *Session) NumRegisteredNonces() int { return len(s.pubNonces) } diff --git a/btcec/schnorr/musig2/musig2_test.go b/btcec/schnorr/musig2/musig2_test.go index 91dad90b3e..dfd48f3e82 100644 --- a/btcec/schnorr/musig2/musig2_test.go +++ b/btcec/schnorr/musig2/musig2_test.go @@ -258,7 +258,7 @@ func TestMuSigMultiParty(t *testing.T) { } // TestMuSigEarlyNonce tests that for protocols where nonces need to be -// exchagned before all signers are known, the context API works as expected. +// exchanged before all signers are known, the context API works as expected. func TestMuSigEarlyNonce(t *testing.T) { t.Parallel() diff --git a/btcec/schnorr/musig2/nonces.go b/btcec/schnorr/musig2/nonces.go index 988b199471..dbe39ef3db 100644 --- a/btcec/schnorr/musig2/nonces.go +++ b/btcec/schnorr/musig2/nonces.go @@ -144,7 +144,7 @@ func defaultNonceGenOpts() *nonceGenOpts { // WithCustomRand allows a caller to use a custom random number generator in // place for crypto/rand. This should only really be used to generate -// determinstic tests. +// deterministic tests. func WithCustomRand(r io.Reader) NonceGenOption { return func(o *nonceGenOpts) { o.randReader = r diff --git a/btcec/schnorr/musig2/nonces_test.go b/btcec/schnorr/musig2/nonces_test.go index 7105d83b30..074fe7363a 100644 --- a/btcec/schnorr/musig2/nonces_test.go +++ b/btcec/schnorr/musig2/nonces_test.go @@ -111,7 +111,7 @@ type nonceAggTestCases struct { } // TestMusig2AggregateNoncesTestVectors tests that the musig2 implementation -// passes the nonce aggregration test vectors for musig2 1.0. +// passes the nonce aggregation test vectors for musig2 1.0. func TestMusig2AggregateNoncesTestVectors(t *testing.T) { t.Parallel() diff --git a/btcec/schnorr/musig2/sign_test.go b/btcec/schnorr/musig2/sign_test.go index a7f5d79d5d..a967cfe476 100644 --- a/btcec/schnorr/musig2/sign_test.go +++ b/btcec/schnorr/musig2/sign_test.go @@ -298,7 +298,7 @@ type sigCombineTestVectors struct { ValidCases []sigCombineValidCase `json:"valid_test_cases"` } -func pSigsFromIndicies(t *testing.T, sigs []string, indices []int) []*PartialSignature { +func pSigsFromIndices(t *testing.T, sigs []string, indices []int) []*PartialSignature { pSigs := make([]*PartialSignature, len(indices)) for i, idx := range indices { var pSig PartialSignature @@ -341,7 +341,7 @@ func TestMusig2SignCombine(t *testing.T) { t, testCase.NonceIndices, testCases.PubNonces, ) - partialSigs := pSigsFromIndicies( + partialSigs := pSigsFromIndices( t, testCases.Psigs, testCase.PSigIndices, ) diff --git a/btcec/schnorr/signature_test.go b/btcec/schnorr/signature_test.go index 2f96b7e4d5..9e99bbe233 100644 --- a/btcec/schnorr/signature_test.go +++ b/btcec/schnorr/signature_test.go @@ -245,7 +245,7 @@ func TestSchnorrVerify(t *testing.T) { verify := err == nil if test.verifyResult != verify { - t.Fatalf("test #%v: verificaiton mismatch: expected "+ + t.Fatalf("test #%v: verification mismatch: expected "+ "%v, got %v", i, test.verifyResult, verify) } diff --git a/btcjson/btcdextcmds.go b/btcjson/btcdextcmds.go index a3ca46ba71..768dca4d3d 100644 --- a/btcjson/btcdextcmds.go +++ b/btcjson/btcdextcmds.go @@ -20,7 +20,7 @@ const ( // persistent peer. NRemove NodeSubCmd = "remove" - // NDisconnect indicates the specified peer should be disonnected. + // NDisconnect indicates the specified peer should be disconnected. NDisconnect NodeSubCmd = "disconnect" ) diff --git a/btcjson/btcdextresults_test.go b/btcjson/btcdextresults_test.go index 478f088cd3..9a8b9eedf7 100644 --- a/btcjson/btcdextresults_test.go +++ b/btcjson/btcdextresults_test.go @@ -13,7 +13,7 @@ import ( ) // TestBtcdExtCustomResults ensures any results that have custom marshalling -// work as inteded. +// work as intended. // and unmarshal code of results are as expected. func TestBtcdExtCustomResults(t *testing.T) { t.Parallel() diff --git a/btcjson/chainsvrcmds.go b/btcjson/chainsvrcmds.go index aa1d4415da..22552e7bcd 100644 --- a/btcjson/chainsvrcmds.go +++ b/btcjson/chainsvrcmds.go @@ -16,6 +16,10 @@ import ( "github.com/btcsuite/btcd/wire" ) +// BTCPerkvB is the units used to represent Bitcoin transaction fees. +// This unit represents the fee in BTC for a transaction size of 1 kB. +type BTCPerkvB = float64 + // AddNodeSubCmd defines the type used in the addnode JSON-RPC command for the // sub command field. type AddNodeSubCmd string @@ -142,11 +146,12 @@ type FundRawTransactionOpts struct { ChangeType *ChangeType `json:"change_type,omitempty"` IncludeWatching *bool `json:"includeWatching,omitempty"` LockUnspents *bool `json:"lockUnspents,omitempty"` - FeeRate *float64 `json:"feeRate,omitempty"` // BTC/kB + FeeRate *BTCPerkvB `json:"feeRate,omitempty"` // BTC/kB SubtractFeeFromOutputs []int `json:"subtractFeeFromOutputs,omitempty"` Replaceable *bool `json:"replaceable,omitempty"` ConfTarget *int `json:"conf_target,omitempty"` EstimateMode *EstimateSmartFeeMode `json:"estimate_mode,omitempty"` + IncludeUnsafe *bool `json:"include_unsafe,omitempty"` } // FundRawTransactionCmd defines the fundrawtransaction JSON-RPC command @@ -821,7 +826,7 @@ func NewSearchRawTransactionsCmd(address string, verbose, skip, count *int, vinE } // AllowHighFeesOrMaxFeeRate defines a type that can either be the legacy -// allowhighfees boolean field or the new maxfeerate int field. +// allowhighfees boolean field or the new maxfeerate float64 field. type AllowHighFeesOrMaxFeeRate struct { Value interface{} } @@ -861,7 +866,7 @@ func (a *AllowHighFeesOrMaxFeeRate) UnmarshalJSON(data []byte) error { case bool: a.Value = Bool(v) case float64: - a.Value = Int32(int32(v)) + a.Value = Float64(v) default: return fmt.Errorf("invalid allowhighfees or maxfeerate value: "+ "%v", unmarshalled) @@ -892,9 +897,10 @@ func NewSendRawTransactionCmd(hexTx string, allowHighFees *bool) *SendRawTransac // NewSendRawTransactionCmd returns a new instance which can be used to issue a // sendrawtransaction JSON-RPC command to a bitcoind node. +// maxFeeRate is the maximum fee rate for the transaction in BTC/kvB. // // A 0 maxFeeRate indicates that a maximum fee rate won't be enforced. -func NewBitcoindSendRawTransactionCmd(hexTx string, maxFeeRate int32) *SendRawTransactionCmd { +func NewBitcoindSendRawTransactionCmd(hexTx string, maxFeeRate BTCPerkvB) *SendRawTransactionCmd { return &SendRawTransactionCmd{ HexTx: hexTx, FeeSetting: &AllowHighFeesOrMaxFeeRate{ @@ -1042,6 +1048,59 @@ func NewVerifyTxOutProofCmd(proof string) *VerifyTxOutProofCmd { } } +// TestMempoolAcceptCmd defines the testmempoolaccept JSON-RPC command. +type TestMempoolAcceptCmd struct { + // An array of hex strings of raw transactions. + RawTxns []string + + // Reject transactions whose fee rate is higher than the specified + // value, expressed in BTC/kvB, optional, default="0.10". + MaxFeeRate BTCPerkvB `json:"omitempty"` +} + +// NewTestMempoolAcceptCmd returns a new instance which can be used to issue a +// testmempoolaccept JSON-RPC command. +func NewTestMempoolAcceptCmd(rawTxns []string, + maxFeeRate BTCPerkvB) *TestMempoolAcceptCmd { + + return &TestMempoolAcceptCmd{ + RawTxns: rawTxns, + MaxFeeRate: maxFeeRate, + } +} + +// GetTxSpendingPrevOutCmd defines the gettxspendingprevout JSON-RPC command. +type GetTxSpendingPrevOutCmd struct { + // Outputs is a list of transaction outputs to query. + Outputs []*GetTxSpendingPrevOutCmdOutput +} + +// GetTxSpendingPrevOutCmdOutput defines the output to query for the +// gettxspendingprevout JSON-RPC command. +type GetTxSpendingPrevOutCmdOutput struct { + Txid string `json:"txid"` + Vout uint32 `json:"vout"` +} + +// NewGetTxSpendingPrevOutCmd returns a new instance which can be used to issue +// a gettxspendingprevout JSON-RPC command. +func NewGetTxSpendingPrevOutCmd( + outpoints []wire.OutPoint) *GetTxSpendingPrevOutCmd { + + outputs := make([]*GetTxSpendingPrevOutCmdOutput, 0, len(outpoints)) + + for _, op := range outpoints { + outputs = append(outputs, &GetTxSpendingPrevOutCmdOutput{ + Txid: op.Hash.String(), + Vout: op.Index, + }) + } + + return &GetTxSpendingPrevOutCmd{ + Outputs: outputs, + } +} + func init() { // No special flags for commands in this file. flags := UsageFlag(0) @@ -1102,4 +1161,6 @@ func init() { MustRegisterCmd("verifychain", (*VerifyChainCmd)(nil), flags) MustRegisterCmd("verifymessage", (*VerifyMessageCmd)(nil), flags) MustRegisterCmd("verifytxoutproof", (*VerifyTxOutProofCmd)(nil), flags) + MustRegisterCmd("testmempoolaccept", (*TestMempoolAcceptCmd)(nil), flags) + MustRegisterCmd("gettxspendingprevout", (*GetTxSpendingPrevOutCmd)(nil), flags) } diff --git a/btcjson/chainsvrcmds_test.go b/btcjson/chainsvrcmds_test.go index 99983288b1..38113a687e 100644 --- a/btcjson/chainsvrcmds_test.go +++ b/btcjson/chainsvrcmds_test.go @@ -13,6 +13,7 @@ import ( "testing" "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" ) @@ -1256,16 +1257,16 @@ func TestChainSvrCmds(t *testing.T) { { name: "sendrawtransaction optional, bitcoind >= 0.19.0", newCmd: func() (interface{}, error) { - return btcjson.NewCmd("sendrawtransaction", "1122", &btcjson.AllowHighFeesOrMaxFeeRate{Value: btcjson.Int32(1234)}) + return btcjson.NewCmd("sendrawtransaction", "1122", &btcjson.AllowHighFeesOrMaxFeeRate{Value: btcjson.Float64(0.1234)}) }, staticCmd: func() interface{} { - return btcjson.NewBitcoindSendRawTransactionCmd("1122", 1234) + return btcjson.NewBitcoindSendRawTransactionCmd("1122", 0.1234) }, - marshalled: `{"jsonrpc":"1.0","method":"sendrawtransaction","params":["1122",1234],"id":1}`, + marshalled: `{"jsonrpc":"1.0","method":"sendrawtransaction","params":["1122",0.1234],"id":1}`, unmarshalled: &btcjson.SendRawTransactionCmd{ HexTx: "1122", FeeSetting: &btcjson.AllowHighFeesOrMaxFeeRate{ - Value: btcjson.Int32(1234), + Value: btcjson.Float64(0.1234), }, }, }, @@ -1472,6 +1473,57 @@ func TestChainSvrCmds(t *testing.T) { marshalled: `{"jsonrpc":"1.0","method":"getzmqnotifications","params":[],"id":1}`, unmarshalled: &btcjson.GetZmqNotificationsCmd{}, }, + { + name: "testmempoolaccept", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd("testmempoolaccept", []string{"rawhex"}, 0.1) + }, + staticCmd: func() interface{} { + return btcjson.NewTestMempoolAcceptCmd([]string{"rawhex"}, 0.1) + }, + marshalled: `{"jsonrpc":"1.0","method":"testmempoolaccept","params":[["rawhex"],0.1],"id":1}`, + unmarshalled: &btcjson.TestMempoolAcceptCmd{ + RawTxns: []string{"rawhex"}, + MaxFeeRate: 0.1, + }, + }, + { + name: "testmempoolaccept with maxfeerate", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd("testmempoolaccept", []string{"rawhex"}, 0.01) + }, + staticCmd: func() interface{} { + return btcjson.NewTestMempoolAcceptCmd([]string{"rawhex"}, 0.01) + }, + marshalled: `{"jsonrpc":"1.0","method":"testmempoolaccept","params":[["rawhex"],0.01],"id":1}`, + unmarshalled: &btcjson.TestMempoolAcceptCmd{ + RawTxns: []string{"rawhex"}, + MaxFeeRate: 0.01, + }, + }, + { + name: "gettxspendingprevout", + newCmd: func() (interface{}, error) { + return btcjson.NewCmd( + "gettxspendingprevout", + []*btcjson.GetTxSpendingPrevOutCmdOutput{ + {Txid: "0000000000000000000000000000000000000000000000000000000000000001", Vout: 0}, + }) + }, + staticCmd: func() interface{} { + outputs := []wire.OutPoint{ + {Hash: chainhash.Hash{1}, Index: 0}, + } + return btcjson.NewGetTxSpendingPrevOutCmd(outputs) + }, + marshalled: `{"jsonrpc":"1.0","method":"gettxspendingprevout","params":[[{"txid":"0000000000000000000000000000000000000000000000000000000000000001","vout":0}]],"id":1}`, + unmarshalled: &btcjson.GetTxSpendingPrevOutCmd{ + Outputs: []*btcjson.GetTxSpendingPrevOutCmdOutput{{ + Txid: "0000000000000000000000000000000000000000000000000000000000000001", + Vout: 0, + }}, + }, + }, } t.Logf("Running %d tests", len(tests)) diff --git a/btcjson/chainsvrresults.go b/btcjson/chainsvrresults.go index 41b93f8570..11c0483d31 100644 --- a/btcjson/chainsvrresults.go +++ b/btcjson/chainsvrresults.go @@ -304,8 +304,8 @@ type GetBlockTemplateResult struct { NonceRange string `json:"noncerange,omitempty"` // Block proposal from BIP 0023. - Capabilities []string `json:"capabilities,omitempty"` - RejectReasion string `json:"reject-reason,omitempty"` + Capabilities []string `json:"capabilities,omitempty"` + RejectReason string `json:"reject-reason,omitempty"` } // GetMempoolEntryResult models the data returned from the getmempoolentry's @@ -855,3 +855,73 @@ type LoadWalletResult struct { type DumpWalletResult struct { Filename string `json:"filename"` } + +// TestMempoolAcceptResult models the data from the testmempoolaccept command. +// The result of the mempool acceptance test for each raw transaction in the +// input array. Returns results for each transaction in the same order they +// were passed in. Transactions that cannot be fully validated due to failures +// in other transactions will not contain an 'allowed' result. +type TestMempoolAcceptResult struct { + // Txid is the transaction hash in hex. + Txid string `json:"txid"` + + // Wtxid is the transaction witness hash in hex. + Wtxid string `json:"wtxid"` + + // PackageError is the package validation error, if any (only possible + // if rawtxs had more than 1 transaction). + PackageError string `json:"package-error"` + + // Allowed specifies whether this tx would be accepted to the mempool + // and pass client-specified maxfeerate. If not present, the tx was not + // fully validated due to a failure in another tx in the list. + Allowed bool `json:"allowed,omitempty"` + + // Vsize is the virtual transaction size as defined in BIP 141. This is + // different from actual serialized size for witness transactions as + // witness data is discounted (only present when 'allowed' is true) + Vsize int32 `json:"vsize,omitempty"` + + // Fees specifies the transaction fees (only present if 'allowed' is + // true). + Fees *TestMempoolAcceptFees `json:"fees,omitempty"` + + // RejectReason is the rejection string (only present when 'allowed' is + // false). + RejectReason string `json:"reject-reason,omitempty"` +} + +// TestMempoolAcceptFees models the `fees` section from the testmempoolaccept +// command. +type TestMempoolAcceptFees struct { + // Base is the transaction fee in BTC. + Base float64 `json:"base"` + + // EffectiveFeeRate specifies the effective feerate in BTC per KvB. May + // differ from the base feerate if, for example, there are modified + // fees from prioritisetransaction or a package feerate was used. + // + // NOTE: this field only exists in bitcoind v25.0 and above. + EffectiveFeeRate float64 `json:"effective-feerate"` + + // EffectiveIncludes specifies transactions whose fees and vsizes are + // included in effective-feerate. Each item is a transaction wtxid in + // hex. + // + // NOTE: this field only exists in bitcoind v25.0 and above. + EffectiveIncludes []string `json:"effective-includes"` +} + +// GetTxSpendingPrevOutResult defines a single item returned from the +// gettxspendingprevout command. +type GetTxSpendingPrevOutResult struct { + // Txid is the transaction id of the checked output. + Txid string `json:"txid"` + + // Vout is the vout value of the checked output. + Vout uint32 `json:"vout"` + + // SpendingTxid is the transaction id of the mempool transaction + // spending this output (omitted if unspent). + SpendingTxid string `json:"spendingtxid,omitempty"` +} diff --git a/btcjson/chainsvrresults_test.go b/btcjson/chainsvrresults_test.go index 8a11197af2..2566e65f62 100644 --- a/btcjson/chainsvrresults_test.go +++ b/btcjson/chainsvrresults_test.go @@ -17,7 +17,7 @@ import ( ) // TestChainSvrCustomResults ensures any results that have custom marshalling -// work as inteded. +// work as intended. // and unmarshal code of results are as expected. func TestChainSvrCustomResults(t *testing.T) { t.Parallel() diff --git a/btcjson/chainsvrwsresults_test.go b/btcjson/chainsvrwsresults_test.go index b1e17450c1..9e8c7676c1 100644 --- a/btcjson/chainsvrwsresults_test.go +++ b/btcjson/chainsvrwsresults_test.go @@ -13,7 +13,7 @@ import ( ) // TestChainSvrWsResults ensures any results that have custom marshalling -// work as inteded. +// work as intended. func TestChainSvrWsResults(t *testing.T) { t.Parallel() diff --git a/btcjson/cmdinfo_test.go b/btcjson/cmdinfo_test.go index 61a693e404..9dc4567840 100644 --- a/btcjson/cmdinfo_test.go +++ b/btcjson/cmdinfo_test.go @@ -11,7 +11,7 @@ import ( "github.com/btcsuite/btcd/btcjson" ) -// TestCmdMethod tests the CmdMethod function to ensure it retunrs the expected +// TestCmdMethod tests the CmdMethod function to ensure it returns the expected // methods and errors. func TestCmdMethod(t *testing.T) { t.Parallel() diff --git a/btcjson/cmdparse.go b/btcjson/cmdparse.go index 5cf3215e52..c9b248e389 100644 --- a/btcjson/cmdparse.go +++ b/btcjson/cmdparse.go @@ -232,7 +232,7 @@ func baseType(arg reflect.Type) (reflect.Type, int) { // assignField is the main workhorse for the NewCmd function which handles // assigning the provided source value to the destination field. It supports // direct type assignments, indirection, conversion of numeric types, and -// unmarshaling of strings into arrays, slices, structs, and maps via +// unmarshalling of strings into arrays, slices, structs, and maps via // json.Unmarshal. func assignField(paramNum int, fieldName string, dest reflect.Value, src reflect.Value) error { // Just error now when the types have no chance of being compatible. @@ -451,7 +451,7 @@ func assignField(paramNum int, fieldName string, dest reflect.Value, src reflect // String -> float of varying size. case reflect.Float32, reflect.Float64: - srcFloat, err := strconv.ParseFloat(src.String(), 0) + srcFloat, err := strconv.ParseFloat(src.String(), 64) if err != nil { str := fmt.Sprintf("parameter #%d '%s' must "+ "parse to a %v", paramNum, fieldName, diff --git a/btcjson/error.go b/btcjson/error.go index 3d72329f91..66be0214d1 100644 --- a/btcjson/error.go +++ b/btcjson/error.go @@ -30,7 +30,7 @@ const ( // embedded type which is not not supported. ErrEmbeddedType - // ErrUnexportedField indiciates the provided command struct contains an + // ErrUnexportedField indicates the provided command struct contains an // unexported field which is not supported. ErrUnexportedField diff --git a/btcjson/walletsvrcmds.go b/btcjson/walletsvrcmds.go index 979ab0c25b..9b787f60c4 100644 --- a/btcjson/walletsvrcmds.go +++ b/btcjson/walletsvrcmds.go @@ -978,7 +978,7 @@ type ImportMultiRequest struct { KeyPool *bool `json:"keypool,omitempty"` } -// ImportMultiRequest defines the options struct, provided to the +// ImportMultiOptions defines the options struct, provided to the // ImportMultiCmd as a pointer argument. type ImportMultiOptions struct { Rescan bool `json:"rescan"` // Rescan the blockchain after all imports diff --git a/btcutil/address.go b/btcutil/address.go index d0367abfc8..95d3e6c301 100644 --- a/btcutil/address.go +++ b/btcutil/address.go @@ -686,8 +686,8 @@ func NewAddressTaproot(witnessProg []byte, return newAddressTaproot(net.Bech32HRPSegwit, witnessProg) } -// newAddressWitnessScriptHash is an internal helper function to create an -// AddressWitnessScriptHash with a known human-readable part, rather than +// newAddressTaproot is an internal helper function to create an +// AddressTaproot with a known human-readable part, rather than // looking it up through its parameters. func newAddressTaproot(hrp string, witnessProg []byte) (*AddressTaproot, error) { diff --git a/btcutil/bech32/bech32.go b/btcutil/bech32/bech32.go index c1e00106e6..92994b2881 100644 --- a/btcutil/bech32/bech32.go +++ b/btcutil/bech32/bech32.go @@ -18,7 +18,7 @@ const charset = "qpzry9x8gf2tvdw0s3jn54khce6mua7l" var gen = []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3} // toBytes converts each character in the string 'chars' to the value of the -// index of the correspoding character in 'charset'. +// index of the corresponding character in 'charset'. func toBytes(chars string) ([]byte, error) { decoded := make([]byte, 0, len(chars)) for i := 0; i < len(chars); i++ { @@ -165,11 +165,14 @@ func bech32VerifyChecksum(hrp string, data []byte) (Version, bool) { return VersionUnknown, false } -// DecodeNoLimit is a bech32 checksum version aware arbitrary string length -// decoder. This function will return the version of the decoded checksum -// constant so higher level validation can be performed to ensure the correct -// version of bech32 was used when encoding. -func decodeNoLimit(bech string) (string, []byte, Version, error) { +// DecodeNoLimitWithVersion is a bech32 checksum version aware arbitrary string +// length decoder. This function will return the version of the decoded +// checksum constant so higher level validation can be performed to ensure the +// correct version of bech32 was used when encoding. +// +// Note that the returned data is 5-bit (base32) encoded and the human-readable +// part will be lowercase. +func DecodeNoLimitWithVersion(bech string) (string, []byte, Version, error) { // The minimum allowed size of a bech32 string is 8 characters, since it // needs a non-empty HRP, a separator, and a 6 character checksum. if len(bech) < 8 { @@ -262,7 +265,7 @@ func decodeNoLimit(bech string) (string, []byte, Version, error) { // Note that the returned data is 5-bit (base32) encoded and the human-readable // part will be lowercase. func DecodeNoLimit(bech string) (string, []byte, error) { - hrp, data, _, err := decodeNoLimit(bech) + hrp, data, _, err := DecodeNoLimitWithVersion(bech) return hrp, data, err } @@ -277,7 +280,7 @@ func Decode(bech string) (string, []byte, error) { return "", nil, ErrInvalidLength(len(bech)) } - hrp, data, _, err := decodeNoLimit(bech) + hrp, data, _, err := DecodeNoLimitWithVersion(bech) return hrp, data, err } @@ -291,7 +294,7 @@ func DecodeGeneric(bech string) (string, []byte, Version, error) { return "", nil, VersionUnknown, ErrInvalidLength(len(bech)) } - return decodeNoLimit(bech) + return DecodeNoLimitWithVersion(bech) } // encodeGeneric is the base bech32 encoding function that is aware of the diff --git a/btcutil/bech32/bech32_test.go b/btcutil/bech32/bech32_test.go index 1e04905a61..3f637c4034 100644 --- a/btcutil/bech32/bech32_test.go +++ b/btcutil/bech32/bech32_test.go @@ -297,9 +297,9 @@ func TestMixedCaseEncode(t *testing.T) { } } -// TestCanDecodeUnlimtedBech32 tests whether decoding a large bech32 string works +// TestCanDecodeUnlimitedBech32 tests whether decoding a large bech32 string works // when using the DecodeNoLimit version -func TestCanDecodeUnlimtedBech32(t *testing.T) { +func TestCanDecodeUnlimitedBech32(t *testing.T) { input := "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq5kx0yd" // Sanity check that an input of this length errors on regular Decode() @@ -668,7 +668,7 @@ func BenchmarkConvertBitsDown(b *testing.B) { } } -// BenchmarkConvertBitsDown benchmarks the speed and memory allocation behavior +// BenchmarkConvertBitsUp benchmarks the speed and memory allocation behavior // of ConvertBits when converting from a lower base into a higher base (e.g. 5 // => 8). // diff --git a/btcutil/bloom/example_test.go b/btcutil/bloom/example_test.go index e5a148a5ba..2be2b67a74 100644 --- a/btcutil/bloom/example_test.go +++ b/btcutil/bloom/example_test.go @@ -26,7 +26,7 @@ func ExampleNewFilter() { filter := bloom.NewFilter(10, tweak, 0.0001, wire.BloomUpdateNone) // Create a transaction hash and add it to the filter. This particular - // trasaction is the first transaction in block 310,000 of the main + // transaction is the first transaction in block 310,000 of the main // bitcoin block chain. txHashStr := "fd611c56ca0d378cdcd16244b45c2ba9588da3adac367c4ef43e808b280b8a45" txHash, err := chainhash.NewHashFromStr(txHashStr) diff --git a/btcutil/coinset/coins_test.go b/btcutil/coinset/coins_test.go index 035a40cb99..c1984623f6 100644 --- a/btcutil/coinset/coins_test.go +++ b/btcutil/coinset/coins_test.go @@ -252,7 +252,7 @@ func TestSimpleCoin(t *testing.T) { t.Error("Different value of coin pkScript than expected") } if testSimpleCoin.NumConfs() != 1 { - t.Error("Differet value of num confs than expected") + t.Error("Different value of num confs than expected") } if testSimpleCoin.ValueAge() != testSimpleCoinTxValueAge0 { t.Error("Different value of coin value * age than expected") diff --git a/btcutil/gcs/builder/builder.go b/btcutil/gcs/builder/builder.go index 3a85ad0519..8e257b39fc 100644 --- a/btcutil/gcs/builder/builder.go +++ b/btcutil/gcs/builder/builder.go @@ -60,7 +60,7 @@ func RandomKey() ([gcs.KeySize]byte, error) { } // DeriveKey is a utility function that derives a key from a chainhash.Hash by -// truncating the bytes of the hash to the appopriate key size. +// truncating the bytes of the hash to the appropriate key size. func DeriveKey(keyHash *chainhash.Hash) [gcs.KeySize]byte { var key [gcs.KeySize]byte copy(key[:], keyHash.CloneBytes()) @@ -207,7 +207,7 @@ func (b *GCSBuilder) Build() (*gcs.Filter, error) { return nil, b.err } - // We'll ensure that all the parmaters we need to actually build the + // We'll ensure that all the parameters we need to actually build the // filter properly are set. if b.p == 0 { return nil, fmt.Errorf("p value is not set, cannot build") diff --git a/btcutil/gcs/gcsbench_test.go b/btcutil/gcs/gcsbench_test.go index 14125a16c0..69ce7708b3 100644 --- a/btcutil/gcs/gcsbench_test.go +++ b/btcutil/gcs/gcsbench_test.go @@ -158,7 +158,7 @@ var matchAnyBenchmarks = []struct { {"q10M-f10K", randElems10000000, filter10000}, } -// BenchmarkGCSFilterMatchAny benchmarks the sort-and-zip MatchAny impl. +// BenchmarkGCSFilterZipMatchAny benchmarks the sort-and-zip MatchAny impl. func BenchmarkGCSFilterZipMatchAny(b *testing.B) { for _, test := range matchAnyBenchmarks { test := test @@ -184,7 +184,7 @@ func BenchmarkGCSFilterZipMatchAny(b *testing.B) { } } -// BenchmarkGCSFilterMatchAny benchmarks the hash-join MatchAny impl. +// BenchmarkGCSFilterHashMatchAny benchmarks the hash-join MatchAny impl. func BenchmarkGCSFilterHashMatchAny(b *testing.B) { for _, test := range matchAnyBenchmarks { test := test diff --git a/btcutil/go.sum b/btcutil/go.sum index a57a5dd920..5ee4cd5207 100644 --- a/btcutil/go.sum +++ b/btcutil/go.sum @@ -2,8 +2,6 @@ github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= -github.com/btcsuite/btcd v0.23.4 h1:IzV6qqkfwbItOS/sg/aDfPDsjPP8twrCOE2R93hxMlQ= -github.com/btcsuite/btcd v0.23.4/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd h1:js1gPwhcFflTZ7Nzl7WHaOTlTr5hIrR4n1NM4v9n4Kw= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= @@ -18,15 +16,12 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtyd github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -36,7 +31,6 @@ github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/decred/dcrd/lru v1.0.0 h1:Kbsb1SFDsIlaupWPwsPp+dkxiBY1frcS07PCPgotKz8= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -55,9 +49,7 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= diff --git a/btcutil/hdkeychain/README.md b/btcutil/hdkeychain/README.md index eaf57d7c58..2101ed4d81 100644 --- a/btcutil/hdkeychain/README.md +++ b/btcutil/hdkeychain/README.md @@ -17,7 +17,7 @@ report. - Full BIP0032 implementation - Single type for private and public extended keys -- Convenient cryptograpically secure seed generation +- Convenient cryptographically secure seed generation - Simple creation of master nodes - Support for multi-layer derivation - Easy serialization and deserialization for both private and public extended @@ -26,7 +26,7 @@ report. - Obtaining the underlying EC pubkeys, EC privkeys, and associated bitcoin addresses ties in seamlessly with existing btcec and btcutil types which provide powerful tools for working with them to do things like sign - transations and generate payment scripts + transactions and generate payment scripts - Uses the btcec package which is highly optimized for secp256k1 - Code examples including: - Generating a cryptographically secure random seed and deriving a diff --git a/btcutil/hdkeychain/extendedkey.go b/btcutil/hdkeychain/extendedkey.go index 0bbb1e7d19..8bbcdceef4 100644 --- a/btcutil/hdkeychain/extendedkey.go +++ b/btcutil/hdkeychain/extendedkey.go @@ -467,7 +467,7 @@ func (k *ExtendedKey) DeriveNonStandard(i uint32) (*ExtendedKey, error) { k.depth+1, i, isPrivate), nil } -// ChildNum returns the index at which the child extended key was derived. +// ChildIndex returns the index at which the child extended key was derived. // // Extended keys with ChildNum value between 0 and 2^31-1 are normal child // keys, and those with a value between 2^31 and 2^32-1 are hardened keys. diff --git a/btcutil/psbt/creator.go b/btcutil/psbt/creator.go index a5f832e0dd..58b9a54488 100644 --- a/btcutil/psbt/creator.go +++ b/btcutil/psbt/creator.go @@ -17,7 +17,7 @@ const MinTxVersion = 1 // within the unsigned transaction. The values of nLockTime, nSequence (per // input) and transaction version (must be 1 of 2) must be specified here. Note // that the default nSequence value is wire.MaxTxInSequenceNum. Referencing -// the PSBT BIP, this function serves the roles of teh Creator. +// the PSBT BIP, this function serves the roles of the Creator. func New(inputs []*wire.OutPoint, outputs []*wire.TxOut, version int32, nLockTime uint32, nSequences []uint32) (*Packet, error) { diff --git a/btcutil/psbt/finalizer.go b/btcutil/psbt/finalizer.go index 3c2edd5557..b1bf12d131 100644 --- a/btcutil/psbt/finalizer.go +++ b/btcutil/psbt/finalizer.go @@ -404,7 +404,7 @@ func finalizeWitnessInput(p *Packet, inIndex int) error { } containsRedeemScript := pInput.RedeemScript != nil - cointainsWitnessScript := pInput.WitnessScript != nil + containsWitnessScript := pInput.WitnessScript != nil // If there's no redeem script, then we assume that this is native // segwit input. @@ -413,7 +413,7 @@ func finalizeWitnessInput(p *Packet, inIndex int) error { // If we have only a sigley pubkey+sig pair, and no witness // script, then we assume this is a P2WKH input. if len(pubKeys) == 1 && len(sigs) == 1 && - !cointainsWitnessScript { + !containsWitnessScript { serializedWitness, err = writePKHWitness( sigs[0], pubKeys[0], @@ -430,7 +430,7 @@ func finalizeWitnessInput(p *Packet, inIndex int) error { // TODO(roasbeef): need to add custom finalize for // non-multisig P2WSH outputs (HTLCs, delay outputs, // etc). - if !cointainsWitnessScript { + if !containsWitnessScript { return ErrNotFinalizable } @@ -457,7 +457,7 @@ func finalizeWitnessInput(p *Packet, inIndex int) error { // If don't have a witness script, then we assume this is a // nested p2wkh output. - if !cointainsWitnessScript { + if !containsWitnessScript { // Assumed p2sh-p2wkh Here the witness is just (sig, // pub) as for p2pkh case if len(sigs) != 1 || len(pubKeys) != 1 { diff --git a/btcutil/psbt/utils.go b/btcutil/psbt/utils.go index 85bc82f529..0a9002798e 100644 --- a/btcutil/psbt/utils.go +++ b/btcutil/psbt/utils.go @@ -226,7 +226,7 @@ func serializeKVPairWithType(w io.Writer, kt uint8, keydata []byte, // getKey retrieves a single key - both the key type and the keydata (if // present) from the stream and returns the key type as an integer, or -1 if -// the key was of zero length. This integer is is used to indicate the presence +// the key was of zero length. This integer is used to indicate the presence // of a separator byte which indicates the end of a given key-value pair list, // and the keydata as a byte slice or nil if none is present. func getKey(r io.Reader) (int, []byte, error) { diff --git a/btcutil/txsort/txsort_test.go b/btcutil/txsort/txsort_test.go index dd2149294e..16a3e61c83 100644 --- a/btcutil/txsort/txsort_test.go +++ b/btcutil/txsort/txsort_test.go @@ -7,7 +7,7 @@ package txsort_test import ( "bytes" "encoding/hex" - "io/ioutil" + "os" "path/filepath" "testing" @@ -64,7 +64,7 @@ func TestSort(t *testing.T) { for _, test := range tests { // Load and deserialize the test transaction. filePath := filepath.Join("testdata", test.hexFile) - txHexBytes, err := ioutil.ReadFile(filePath) + txHexBytes, err := os.ReadFile(filePath) if err != nil { t.Errorf("ReadFile (%s): failed to read test file: %v", test.name, err) diff --git a/chaincfg/params.go b/chaincfg/params.go index 3a7f7661e1..1c329cb50f 100644 --- a/chaincfg/params.go +++ b/chaincfg/params.go @@ -59,6 +59,7 @@ var ( // DefaultSignetDNSSeeds is the list of seed nodes for the default // (public, Taproot enabled) signet network. DefaultSignetDNSSeeds = []DNSSeed{ + {"seed.signet.bitcoin.sprovoost.nl", true}, {"178.128.221.177", false}, {"2a01:7c8:d005:390::5", false}, {"v7ajjeirttkbnt32wpy3c6w3emwnfr3fkla7hpxcfokr3ysd3kqtzmqd.onion:38333", false}, @@ -284,10 +285,11 @@ var MainNetParams = Params{ {"seed.bitcoin.sipa.be", true}, {"dnsseed.bluematt.me", true}, {"dnsseed.bitcoin.dashjr.org", false}, - {"seed.bitcoinstats.com", true}, {"seed.bitnodes.io", false}, {"seed.bitcoin.jonasschnelli.ch", true}, {"seed.btc.petertodd.net", true}, + {"seed.bitcoin.sprovoost.nl", true}, + {"seed.bitcoin.wiz.biz", true}, }, // Chain parameters @@ -544,8 +546,8 @@ var TestNet3Params = Params{ DefaultPort: "18333", DNSSeeds: []DNSSeed{ {"testnet-seed.bitcoin.jonasschnelli.ch", true}, - {"testnet-seed.bitcoin.schildbach.de", false}, {"seed.tbtc.petertodd.net", true}, + {"seed.testnet.bitcoin.sprovoost.nl", true}, {"testnet-seed.bluematt.me", false}, }, diff --git a/cmd/addblock/config.go b/cmd/addblock/config.go index ffcc0eca79..d49df0a11d 100644 --- a/cmd/addblock/config.go +++ b/cmd/addblock/config.go @@ -45,7 +45,7 @@ type config struct { TxIndex bool `long:"txindex" description:"Build a full hash-based transaction index which makes all transactions available via the getrawtransaction RPC"` } -// filesExists reports whether the named file or directory exists. +// fileExists reports whether the named file or directory exists. func fileExists(name string) bool { if _, err := os.Stat(name); err != nil { if os.IsNotExist(err) { diff --git a/cmd/btcctl/httpclient.go b/cmd/btcctl/httpclient.go index 2a0f6dffd4..c7b4b7e3a1 100644 --- a/cmd/btcctl/httpclient.go +++ b/cmd/btcctl/httpclient.go @@ -6,9 +6,10 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" + "os" "github.com/btcsuite/btcd/btcjson" "github.com/btcsuite/go-socks/socks" @@ -37,7 +38,7 @@ func newHTTPClient(cfg *config) (*http.Client, error) { // Configure TLS if needed. var tlsConfig *tls.Config if !cfg.NoTLS && cfg.RPCCert != "" { - pem, err := ioutil.ReadFile(cfg.RPCCert) + pem, err := os.ReadFile(cfg.RPCCert) if err != nil { return nil, err } @@ -95,7 +96,7 @@ func sendPostRequest(marshalledJSON []byte, cfg *config) ([]byte, error) { } // Read the raw bytes and close the response. - respBytes, err := ioutil.ReadAll(httpResponse.Body) + respBytes, err := io.ReadAll(httpResponse.Body) httpResponse.Body.Close() if err != nil { err = fmt.Errorf("error reading json reply: %v", err) diff --git a/cmd/gencerts/gencerts.go b/cmd/gencerts/gencerts.go index 27c9ae385c..0c91b6fb85 100644 --- a/cmd/gencerts/gencerts.go +++ b/cmd/gencerts/gencerts.go @@ -6,7 +6,6 @@ package main import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -65,11 +64,11 @@ func main() { } // Write cert and key files. - if err = ioutil.WriteFile(certFile, cert, 0666); err != nil { + if err = os.WriteFile(certFile, cert, 0666); err != nil { fmt.Fprintf(os.Stderr, "cannot write cert: %v\n", err) os.Exit(1) } - if err = ioutil.WriteFile(keyFile, key, 0600); err != nil { + if err = os.WriteFile(keyFile, key, 0600); err != nil { os.Remove(certFile) fmt.Fprintf(os.Stderr, "cannot write key: %v\n", err) os.Exit(1) @@ -91,7 +90,7 @@ func cleanAndExpandPath(path string) string { return filepath.Clean(os.ExpandEnv(path)) } -// filesExists reports whether the named file or directory exists. +// fileExists reports whether the named file or directory exists. func fileExists(name string) bool { if _, err := os.Stat(name); err != nil { if os.IsNotExist(err) { diff --git a/config.go b/config.go index 18620a008c..9bbce7f69a 100644 --- a/config.go +++ b/config.go @@ -101,7 +101,7 @@ type config struct { AddPeers []string `short:"a" long:"addpeer" description:"Add a peer to connect with at startup"` AddrIndex bool `long:"addrindex" description:"Maintain a full address-based transaction index which makes the searchrawtransactions RPC available"` AgentBlacklist []string `long:"agentblacklist" description:"A comma separated list of user-agent substrings which will cause btcd to reject any peers whose user-agent contains any of the blacklisted substrings."` - AgentWhitelist []string `long:"agentwhitelist" description:"A comma separated list of user-agent substrings which will cause btcd to require all peers' user-agents to contain one of the whitelisted substrings. The blacklist is applied before the blacklist, and an empty whitelist will allow all agents that do not fail the blacklist."` + AgentWhitelist []string `long:"agentwhitelist" description:"A comma separated list of user-agent substrings which will cause btcd to require all peers' user-agents to contain one of the whitelisted substrings. The blacklist is applied before the whitelist, and an empty whitelist will allow all agents that do not fail the blacklist."` BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"` BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."` BlockMaxSize uint32 `long:"blockmaxsize" description:"Maximum block size in bytes to be used when creating a block"` @@ -244,7 +244,7 @@ func supportedSubsystems() []string { // the levels accordingly. An appropriate error is returned if anything is // invalid. func parseAndSetDebugLevels(debugLevel string) error { - // When the specified string doesn't have any delimters, treat it as + // When the specified string doesn't have any delimiters, treat it as // the log level for all subsystems. if !strings.Contains(debugLevel, ",") && !strings.Contains(debugLevel, "=") { // Validate debug log level. @@ -275,7 +275,7 @@ func parseAndSetDebugLevels(debugLevel string) error { // Validate subsystem. if _, exists := subsystemLoggers[subsysID]; !exists { str := "The specified subsystem [%v] is invalid -- " + - "supported subsytems %v" + "supported subsystems %v" return fmt.Errorf(str, subsysID, supportedSubsystems()) } @@ -384,7 +384,7 @@ func parseCheckpoints(checkpointStrings []string) ([]chaincfg.Checkpoint, error) return checkpoints, nil } -// filesExists reports whether the named file or directory exists. +// fileExists reports whether the named file or directory exists. func fileExists(name string) bool { if _, err := os.Stat(name); err != nil { if os.IsNotExist(err) { diff --git a/config_test.go b/config_test.go index e54a9f5f20..42a0cd4b90 100644 --- a/config_test.go +++ b/config_test.go @@ -1,7 +1,6 @@ package main import ( - "io/ioutil" "os" "path/filepath" "regexp" @@ -23,14 +22,14 @@ func TestCreateDefaultConfigFile(t *testing.T) { sampleConfigFile := filepath.Join(filepath.Dir(path), "sample-btcd.conf") // Setup a temporary directory - tmpDir, err := ioutil.TempDir("", "btcd") + tmpDir, err := os.MkdirTemp("", "btcd") if err != nil { t.Fatalf("Failed creating a temporary directory: %v", err) } testpath := filepath.Join(tmpDir, "test.conf") // copy config file to location of btcd binary - data, err := ioutil.ReadFile(sampleConfigFile) + data, err := os.ReadFile(sampleConfigFile) if err != nil { t.Fatalf("Failed reading sample config file: %v", err) } @@ -39,7 +38,7 @@ func TestCreateDefaultConfigFile(t *testing.T) { t.Fatalf("Failed obtaining app path: %v", err) } tmpConfigFile := filepath.Join(appPath, "sample-btcd.conf") - err = ioutil.WriteFile(tmpConfigFile, data, 0644) + err = os.WriteFile(tmpConfigFile, data, 0644) if err != nil { t.Fatalf("Failed copying sample config file: %v", err) } @@ -57,7 +56,7 @@ func TestCreateDefaultConfigFile(t *testing.T) { t.Fatalf("Failed to create a default config file: %v", err) } - content, err := ioutil.ReadFile(testpath) + content, err := os.ReadFile(testpath) if err != nil { t.Fatalf("Failed to read generated default config file: %v", err) } diff --git a/connmgr/connmanager.go b/connmgr/connmanager.go index b487bd1ba1..e88f8af0cb 100644 --- a/connmgr/connmanager.go +++ b/connmgr/connmanager.go @@ -525,9 +525,9 @@ func (cm *ConnManager) Start() { // Start all the listeners so long as the caller requested them and // provided a callback to be invoked when connections are accepted. if cm.cfg.OnAccept != nil { - for _, listner := range cm.cfg.Listeners { + for _, listener := range cm.cfg.Listeners { cm.wg.Add(1) - go cm.listenHandler(listner) + go cm.listenHandler(listener) } } diff --git a/connmgr/seed.go b/connmgr/seed.go index 4c26160d8f..705618f778 100644 --- a/connmgr/seed.go +++ b/connmgr/seed.go @@ -23,7 +23,7 @@ const ( ) // OnSeed is the signature of the callback function which is invoked when DNS -// seeding is succesfull. +// seeding is successful. type OnSeed func(addrs []*wire.NetAddressV2) // LookupFunc is the signature of the DNS lookup function. diff --git a/database/error.go b/database/error.go index 49c250eef5..3470c49749 100644 --- a/database/error.go +++ b/database/error.go @@ -87,7 +87,7 @@ const ( // should be relatively, so this should rarely be an issue. ErrKeyTooLarge - // ErrValueTooLarge indicates an attmpt to insert a value that is larger + // ErrValueTooLarge indicates an attempt to insert a value that is larger // than max allowed value size. The max key size depends on the // specific backend driver being used. ErrValueTooLarge diff --git a/database/example_test.go b/database/example_test.go index b64baf2c8e..1110d0dbc3 100644 --- a/database/example_test.go +++ b/database/example_test.go @@ -7,7 +7,6 @@ package database_test import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" @@ -123,7 +122,7 @@ func Example_blockStorageAndRetrieval() { // Typically you wouldn't want to remove the database right away like // this, nor put it in the temp directory, but it's done here to ensure // the example cleans up after itself. - dbPath, err := ioutil.TempDir("", "exampleblkstorage") + dbPath, err := os.MkdirTemp("", "exampleblkstorage") if err != nil { fmt.Println(err) return diff --git a/database/ffldb/db.go b/database/ffldb/db.go index 8fc4d32646..3e96bfc738 100644 --- a/database/ffldb/db.go +++ b/database/ffldb/db.go @@ -2071,7 +2071,7 @@ func (db *db) Close() error { return closeErr } -// filesExists reports whether the named file or directory exists. +// fileExists reports whether the named file or directory exists. func fileExists(name string) bool { if _, err := os.Stat(name); err != nil { if os.IsNotExist(err) { diff --git a/database/ffldb/driver.go b/database/ffldb/driver.go index 28ab8277e9..01290bf09a 100644 --- a/database/ffldb/driver.go +++ b/database/ffldb/driver.go @@ -78,7 +78,7 @@ func init() { UseLogger: useLogger, } if err := database.RegisterDriver(driver); err != nil { - panic(fmt.Sprintf("Failed to regiser database driver '%s': %v", + panic(fmt.Sprintf("Failed to register database driver '%s': %v", dbType, err)) } } diff --git a/database/ffldb/driver_test.go b/database/ffldb/driver_test.go index 38a84ee2f9..0b2f452032 100644 --- a/database/ffldb/driver_test.go +++ b/database/ffldb/driver_test.go @@ -350,7 +350,7 @@ func TestPrune(t *testing.T) { } if pruned { - err = fmt.Errorf("The database hasn't been commited yet " + + err = fmt.Errorf("The database hasn't been committed yet " + "but files were already deleted") } return err diff --git a/database/ffldb/interface_test.go b/database/ffldb/interface_test.go index b0f275c5de..36db769b01 100644 --- a/database/ffldb/interface_test.go +++ b/database/ffldb/interface_test.go @@ -255,7 +255,7 @@ func testDeleteValues(tc *testContext, bucket database.Bucket, values []keyPair) return true } -// testCursorInterface ensures the cursor itnerface is working properly by +// testCursorInterface ensures the cursor interface is working properly by // exercising all of its functions on the passed bucket. func testCursorInterface(tc *testContext, bucket database.Bucket) bool { // Ensure a cursor can be obtained for the bucket. @@ -639,7 +639,7 @@ func rollbackOnPanic(t *testing.T, tx database.Tx) { func testMetadataManualTxInterface(tc *testContext) bool { // populateValues tests that populating values works as expected. // - // When the writable flag is false, a read-only tranasction is created, + // When the writable flag is false, a read-only transaction is created, // standard bucket tests for read-only transactions are performed, and // the Commit function is checked to ensure it fails as expected. // diff --git a/database/ffldb/whitebox_test.go b/database/ffldb/whitebox_test.go index cc7c13d45f..cac4984077 100644 --- a/database/ffldb/whitebox_test.go +++ b/database/ffldb/whitebox_test.go @@ -218,7 +218,7 @@ func TestCornerCases(t *testing.T) { ldb := idb.(*db).cache.ldb ldb.Close() - // Ensure initilization errors in the underlying database work as + // Ensure initialization errors in the underlying database work as // expected. testName = "initDB: reinitialization" wantErrCode = database.ErrDbNotOpen diff --git a/database/interface.go b/database/interface.go index 7efc7c55f6..7c4dd85122 100644 --- a/database/interface.go +++ b/database/interface.go @@ -390,7 +390,7 @@ type Tx interface { FetchBlockRegions(regions []BlockRegion) ([][]byte, error) // PruneBlocks deletes the block files until it reaches the target size - // (specificed in bytes). + // (specified in bytes). // // The interface contract guarantees at least the following errors will // be returned (other implementation-specific errors are possible): diff --git a/docs/configuration.md b/docs/configuration.md index c6f95b274c..cc1d0dbe44 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -175,7 +175,7 @@ and we prefer to keep the daemon itself as lightweight as possible. access the database used by btcd and it will be locked if btcd is using it. 2. Note the path to the downloaded bootstrap.dat file. 3. Run the addblock utility with the `-i` argument pointing to the location of - boostrap.dat: + bootstrap.dat: **Windows:** diff --git a/docs/developer_resources.md b/docs/developer_resources.md index c595c8330e..328e1b225a 100644 --- a/docs/developer_resources.md +++ b/docs/developer_resources.md @@ -28,7 +28,7 @@ * [mempool](https://github.com/btcsuite/btcd/tree/master/mempool) - Package mempool provides a policy-enforced pool of unmined bitcoin transactions. - * [btcutil](https://github.com/btcsuite/btcd/btcutil) - Provides Bitcoin-specific + * [btcutil](https://github.com/btcsuite/btcd/tree/master/btcutil) - Provides Bitcoin-specific convenience functions and types * [chainhash](https://github.com/btcsuite/btcd/tree/master/chaincfg/chainhash) - Provides a generic hash type and associated functions that allows the diff --git a/docs/json_rpc_api.md b/docs/json_rpc_api.md index 2c7d455457..1999a6c245 100644 --- a/docs/json_rpc_api.md +++ b/docs/json_rpc_api.md @@ -472,7 +472,7 @@ Example Return|`{`
  `"bytes": 310768,`
  `"size": |---|---| |Method|help| |Parameters|1. command (string, optional) - the command to get help for| -|Description|Returns a list of all commands or help for a specified command.
When no `command` parameter is specified, a list of avaialable commands is returned
When `command` is a valid method, the help text for that method is returned.| +|Description|Returns a list of all commands or help for a specified command.
When no `command` parameter is specified, a list of available commands is returned
When `command` is a valid method, the help text for that method is returned.| |Returns|string| |Example Return|getblockcount
Returns a numeric for the number of blocks in the longest block chain.| [Return to Overview](#MethodOverview)
@@ -1121,7 +1121,7 @@ func main() { // generated by btcd when it starts the RPC server and doesn't already // have one. btcdHomeDir := btcutil.AppDataDir("btcd", false) - certs, err := ioutil.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) + certs, err := os.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) if err != nil { log.Fatal(err) } @@ -1185,7 +1185,7 @@ func main() { // generated by btcd when it starts the RPC server and doesn't already // have one. btcdHomeDir := btcutil.AppDataDir("btcd", false) - certs, err := ioutil.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) + certs, err := os.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) if err != nil { log.Fatal(err) } @@ -1288,7 +1288,7 @@ func main() { // generated by btcd when it starts the RPC server and doesn't already // have one. btcdHomeDir := btcutil.AppDataDir("btcd", false) - certs, err := ioutil.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) + certs, err := os.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) if err != nil { log.Fatal(err) } diff --git a/go.mod b/go.mod index 425e6d7f2b..6eea83508e 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/jessevdk/go-flags v1.4.0 github.com/jrick/logrotate v1.0.0 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.4 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed @@ -26,7 +26,8 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + github.com/stretchr/objx v0.5.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) // The retract statements below fixes an accidental push of the tags of a btcd diff --git a/go.sum b/go.sum index 158e868092..ddd7e1ebd0 100644 --- a/go.sum +++ b/go.sum @@ -77,8 +77,14 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -125,5 +131,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/integration/chain_test.go b/integration/chain_test.go new file mode 100644 index 0000000000..0f5cd94c83 --- /dev/null +++ b/integration/chain_test.go @@ -0,0 +1,146 @@ +//go:build rpctest +// +build rpctest + +package integration + +import ( + "testing" + + "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/integration/rpctest" + "github.com/btcsuite/btcd/rpcclient" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" +) + +// TestGetTxSpendingPrevOut checks that `GetTxSpendingPrevOut` behaves as +// expected. +// - an error is returned when invalid params are used. +// - orphan tx is rejected. +// - fee rate above the max is rejected. +// - a mixed of both allowed and rejected can be returned in the same response. +func TestGetTxSpendingPrevOut(t *testing.T) { + t.Parallel() + + // Boilerplate codetestDir to make a pruned node. + btcdCfg := []string{"--rejectnonstd", "--debuglevel=debug"} + r, err := rpctest.New(&chaincfg.SimNetParams, nil, btcdCfg, "") + require.NoError(t, err) + + // Setup the node. + require.NoError(t, r.SetUp(true, 100)) + t.Cleanup(func() { + require.NoError(t, r.TearDown()) + }) + + // Create a tx and testing outpoints. + tx := createTxInMempool(t, r) + opInMempool := tx.TxIn[0].PreviousOutPoint + opNotInMempool := wire.OutPoint{ + Hash: tx.TxHash(), + Index: 0, + } + + testCases := []struct { + name string + outpoints []wire.OutPoint + expectedErr error + expectedResult []*btcjson.GetTxSpendingPrevOutResult + }{ + { + // When no outpoints are provided, the method should + // return an error. + name: "empty outpoints", + expectedErr: rpcclient.ErrInvalidParam, + expectedResult: nil, + }, + { + // When there are outpoints provided, check the + // expceted results are returned. + name: "outpoints", + outpoints: []wire.OutPoint{ + opInMempool, opNotInMempool, + }, + expectedErr: nil, + expectedResult: []*btcjson.GetTxSpendingPrevOutResult{ + { + Txid: opInMempool.Hash.String(), + Vout: opInMempool.Index, + SpendingTxid: tx.TxHash().String(), + }, + { + Txid: opNotInMempool.Hash.String(), + Vout: opNotInMempool.Index, + }, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + require := require.New(t) + + results, err := r.Client.GetTxSpendingPrevOut( + tc.outpoints, + ) + + require.ErrorIs(err, tc.expectedErr) + require.Len(results, len(tc.expectedResult)) + + // Check each item is returned as expected. + for i, r := range results { + e := tc.expectedResult[i] + + require.Equal(e.Txid, r.Txid) + require.Equal(e.Vout, r.Vout) + require.Equal(e.SpendingTxid, r.SpendingTxid) + } + }) + } +} + +// createTxInMempool creates a tx and puts it in the mempool. +func createTxInMempool(t *testing.T, r *rpctest.Harness) *wire.MsgTx { + // Create a fresh output for usage within the test below. + const outputValue = btcutil.SatoshiPerBitcoin + outputKey, testOutput, testPkScript, err := makeTestOutput( + r, t, outputValue, + ) + require.NoError(t, err) + + // Create a new transaction with a lock-time past the current known + // MTP. + tx := wire.NewMsgTx(1) + tx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: *testOutput, + }) + + // Fetch a fresh address from the harness, we'll use this address to + // send funds back into the Harness. + addr, err := r.NewAddress() + require.NoError(t, err) + + addrScript, err := txscript.PayToAddrScript(addr) + require.NoError(t, err) + + tx.AddTxOut(&wire.TxOut{ + PkScript: addrScript, + Value: outputValue - 1000, + }) + + sigScript, err := txscript.SignatureScript( + tx, 0, testPkScript, txscript.SigHashAll, outputKey, true, + ) + require.NoError(t, err) + tx.TxIn[0].SignatureScript = sigScript + + // Send the tx. + _, err = r.Client.SendRawTransaction(tx, true) + require.NoError(t, err) + + return tx +} diff --git a/integration/invalidate_reconsider_block_test.go b/integration/invalidate_reconsider_block_test.go new file mode 100644 index 0000000000..4fe6ff0012 --- /dev/null +++ b/integration/invalidate_reconsider_block_test.go @@ -0,0 +1,244 @@ +package integration + +import ( + "testing" + + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/integration/rpctest" +) + +func TestInvalidateAndReconsiderBlock(t *testing.T) { + // Set up regtest chain. + r, err := rpctest.New(&chaincfg.RegressionNetParams, nil, nil, "") + if err != nil { + t.Fatalf("TestInvalidateAndReconsiderBlock fail."+ + "Unable to create primary harness: %v", err) + } + if err := r.SetUp(true, 0); err != nil { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. "+ + "Unable to setup test chain: %v", err) + } + defer r.TearDown() + + // Generate 4 blocks. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2 -> 3 -> 4 + _, err = r.Client.Generate(4) + if err != nil { + t.Fatal(err) + } + + // Cache the active tip hash. + block4ActiveTipHash, err := r.Client.GetBestBlockHash() + if err != nil { + t.Fatal(err) + } + + // Cache block 1 hash as this will be our chaintip after we invalidate block 2. + block1Hash, err := r.Client.GetBlockHash(1) + if err != nil { + t.Fatal(err) + } + + // Invalidate block 2. + // + // Our chain view looks like so: + // (genesis block) -> 1 (active) + // \ -> 2 -> 3 -> 4 (invalid) + block2Hash, err := r.Client.GetBlockHash(2) + if err != nil { + t.Fatal(err) + } + err = r.Client.InvalidateBlock(block2Hash) + if err != nil { + t.Fatal(err) + } + + // Assert that block 1 is the active chaintip. + bestHash, err := r.Client.GetBestBlockHash() + if *bestHash != *block1Hash { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. Expected the "+ + "best block hash to be block 1 with hash %s but got %s", + block1Hash.String(), bestHash.String()) + } + + // Generate 2 blocks. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2a -> 3a (active) + // \ -> 2 -> 3 -> 4 (invalid) + _, err = r.Client.Generate(2) + if err != nil { + t.Fatal(err) + } + + // Cache the active tip hash for the current active tip. + block3aActiveTipHash, err := r.Client.GetBestBlockHash() + if err != nil { + t.Fatal(err) + } + + tips, err := r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + + // Assert that there are two branches. + if len(tips) != 2 { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. "+ + "Expected 2 chaintips but got %d", len(tips)) + } + + for _, tip := range tips { + if tip.Hash == block4ActiveTipHash.String() && + tip.Status != "invalid" { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. Expected "+ + "invalidated branch tip of %s to be invalid but got %s", + tip.Hash, tip.Status) + } + } + + // Reconsider the invalidated block 2. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2a -> 3a (valid-fork) + // \ -> 2 -> 3 -> 4 (active) + err = r.Client.ReconsiderBlock(block2Hash) + if err != nil { + t.Fatal(err) + } + + tips, err = r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + // Assert that there are two branches. + if len(tips) != 2 { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. "+ + "Expected 2 chaintips but got %d", len(tips)) + } + + var checkedTips int + for _, tip := range tips { + if tip.Hash == block4ActiveTipHash.String() { + if tip.Status != "active" { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. Expected "+ + "the reconsidered branch tip of %s to be active but got %s", + tip.Hash, tip.Status) + } + + checkedTips++ + } + + if tip.Hash == block3aActiveTipHash.String() { + if tip.Status != "valid-fork" { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. Expected "+ + "invalidated branch tip of %s to be valid-fork but got %s", + tip.Hash, tip.Status) + } + checkedTips++ + } + } + + if checkedTips != 2 { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. "+ + "Expected to check %d chaintips, checked %d", 2, checkedTips) + } + + // Invalidate block 3a. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2a -> 3a (invalid) + // \ -> 2 -> 3 -> 4 (active) + err = r.Client.InvalidateBlock(block3aActiveTipHash) + if err != nil { + t.Fatal(err) + } + + tips, err = r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + + // Assert that there are two branches. + if len(tips) != 2 { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. "+ + "Expected 2 chaintips but got %d", len(tips)) + } + + checkedTips = 0 + for _, tip := range tips { + if tip.Hash == block4ActiveTipHash.String() { + if tip.Status != "active" { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. Expected "+ + "an active branch tip of %s but got %s", + tip.Hash, tip.Status) + } + + checkedTips++ + } + + if tip.Hash == block3aActiveTipHash.String() { + if tip.Status != "invalid" { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. Expected "+ + "the invalidated tip of %s to be invalid but got %s", + tip.Hash, tip.Status) + } + checkedTips++ + } + } + + if checkedTips != 2 { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. "+ + "Expected to check %d chaintips, checked %d", 2, checkedTips) + } + + // Reconsider block 3a. + // + // Our chain view looks like so: + // (genesis block) -> 1 -> 2a -> 3a (valid-fork) + // \ -> 2 -> 3 -> 4 (active) + err = r.Client.ReconsiderBlock(block3aActiveTipHash) + if err != nil { + t.Fatal(err) + } + + tips, err = r.Client.GetChainTips() + if err != nil { + t.Fatal(err) + } + + // Assert that there are two branches. + if len(tips) != 2 { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. "+ + "Expected 2 chaintips but got %d", len(tips)) + } + + checkedTips = 0 + for _, tip := range tips { + if tip.Hash == block4ActiveTipHash.String() { + if tip.Status != "active" { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. Expected "+ + "an active branch tip of %s but got %s", + tip.Hash, tip.Status) + } + + checkedTips++ + } + + if tip.Hash == block3aActiveTipHash.String() { + if tip.Status != "valid-fork" { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. Expected "+ + "the reconsidered tip of %s to be a valid-fork but got %s", + tip.Hash, tip.Status) + } + checkedTips++ + } + } + + if checkedTips != 2 { + t.Fatalf("TestInvalidateAndReconsiderBlock fail. "+ + "Expected to check %d chaintips, checked %d", 2, checkedTips) + } +} diff --git a/integration/log.go b/integration/log.go new file mode 100644 index 0000000000..26d6217454 --- /dev/null +++ b/integration/log.go @@ -0,0 +1,26 @@ +//go:build rpctest +// +build rpctest + +package integration + +import ( + "os" + + "github.com/btcsuite/btcd/rpcclient" + "github.com/btcsuite/btclog" +) + +type logWriter struct{} + +func (logWriter) Write(p []byte) (n int, err error) { + os.Stdout.Write(p) + return len(p), nil +} + +func init() { + backendLog := btclog.NewBackend(logWriter{}) + testLog := backendLog.Logger("ITEST") + testLog.SetLevel(btclog.LevelDebug) + + rpcclient.UseLogger(testLog) +} diff --git a/integration/rawtx_test.go b/integration/rawtx_test.go new file mode 100644 index 0000000000..f27f5176cf --- /dev/null +++ b/integration/rawtx_test.go @@ -0,0 +1,203 @@ +//go:build rpctest +// +build rpctest + +package integration + +import ( + "encoding/hex" + "testing" + + "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/integration/rpctest" + "github.com/btcsuite/btcd/rpcclient" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" +) + +// TestTestMempoolAccept checks that `TestTestMempoolAccept` behaves as +// expected. It checks that, +// - an error is returned when invalid params are used. +// - orphan tx is rejected. +// - fee rate above the max is rejected. +// - a mixed of both allowed and rejected can be returned in the same response. +func TestTestMempoolAccept(t *testing.T) { + t.Parallel() + + // Boilerplate codetestDir to make a pruned node. + btcdCfg := []string{"--rejectnonstd", "--debuglevel=debug"} + r, err := rpctest.New(&chaincfg.SimNetParams, nil, btcdCfg, "") + require.NoError(t, err) + + // Setup the node. + require.NoError(t, r.SetUp(true, 100)) + t.Cleanup(func() { + require.NoError(t, r.TearDown()) + }) + + // Create testing txns. + invalidTx := decodeHex(t, missingParentsHex) + validTx := createTestTx(t, r) + + // Create testing constants. + const feeRate = 10 + + testCases := []struct { + name string + txns []*wire.MsgTx + maxFeeRate float64 + expectedErr error + expectedResult []*btcjson.TestMempoolAcceptResult + }{ + { + // When too many txns are provided, the method should + // return an error. + name: "too many txns", + txns: make([]*wire.MsgTx, 26), + maxFeeRate: 0, + expectedErr: rpcclient.ErrInvalidParam, + expectedResult: nil, + }, + { + // When no txns are provided, the method should return + // an error. + name: "empty txns", + txns: nil, + maxFeeRate: 0, + expectedErr: rpcclient.ErrInvalidParam, + expectedResult: nil, + }, + { + // When a corrupted txn is provided, the method should + // return an error. + name: "corrupted tx", + txns: []*wire.MsgTx{{}}, + maxFeeRate: 0, + expectedErr: rpcclient.ErrInvalidParam, + expectedResult: nil, + }, + { + // When an orphan tx is provided, the method should + // return a test mempool accept result which says this + // tx is not allowed. + name: "orphan tx", + txns: []*wire.MsgTx{invalidTx}, + maxFeeRate: 0, + expectedResult: []*btcjson.TestMempoolAcceptResult{{ + Txid: invalidTx.TxHash().String(), + Wtxid: invalidTx.TxHash().String(), + Allowed: false, + RejectReason: "missing-inputs", + }}, + }, + { + // When a valid tx is provided but it exceeds the max + // fee rate, the method should return a test mempool + // accept result which says it's not allowed. + name: "valid tx but exceeds max fee rate", + txns: []*wire.MsgTx{validTx}, + maxFeeRate: 1e-5, + expectedResult: []*btcjson.TestMempoolAcceptResult{{ + Txid: validTx.TxHash().String(), + Wtxid: validTx.TxHash().String(), + Allowed: false, + RejectReason: "max-fee-exceeded", + }}, + }, + { + // When a valid tx is provided and it doesn't exceeds + // the max fee rate, the method should return a test + // mempool accept result which says it's allowed. + name: "valid tx and sane fee rate", + txns: []*wire.MsgTx{validTx}, + expectedResult: []*btcjson.TestMempoolAcceptResult{{ + Txid: validTx.TxHash().String(), + Wtxid: validTx.TxHash().String(), + Allowed: true, + // TODO(yy): need to calculate the fees, atm + // there's no easy way. + // Fees: &btcjson.TestMempoolAcceptFees{}, + }}, + }, + { + // When multiple txns are provided, the method should + // return the correct results for each of the txns. + name: "multiple txns", + txns: []*wire.MsgTx{invalidTx, validTx}, + expectedResult: []*btcjson.TestMempoolAcceptResult{{ + Txid: invalidTx.TxHash().String(), + Wtxid: invalidTx.TxHash().String(), + Allowed: false, + RejectReason: "missing-inputs", + }, { + Txid: validTx.TxHash().String(), + Wtxid: validTx.TxHash().String(), + Allowed: true, + }}, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + require := require.New(t) + + results, err := r.Client.TestMempoolAccept( + tc.txns, tc.maxFeeRate, + ) + + require.ErrorIs(err, tc.expectedErr) + require.Len(results, len(tc.expectedResult)) + + // Check each item is returned as expected. + for i, r := range results { + expected := tc.expectedResult[i] + + // TODO(yy): check all the fields? + require.Equal(expected.Txid, r.Txid) + require.Equal(expected.Wtxid, r.Wtxid) + require.Equal(expected.Allowed, r.Allowed) + require.Equal(expected.RejectReason, + r.RejectReason) + } + }) + } +} + +var ( + //nolint:lll + missingParentsHex = "0100000003bcb2054607a921b3c6df992a9486776863b28485e731a805931b6feb14221acff2000000001c75619cdff9d694a434b13abfbbd618e2ece4460f24b4821cf47d5afc481a386c59565c4900000000cff75994dceb5f5568f8ada45d428630f512fb8efacd46682b4367b4edaf1985c5e4af4b07010000003c029216047236f3000000000017a9141d5a2c690c3e2dacb3cead240f0ce4a273b9d0e48758020000000000001600149d38710eb90e420b159c7a9263994c88e6810bc758020000000000001976a91490770ceff2b1c32e9dbf952fbe65b04a54d1949388ac580200000000000017a914f017945d4d088c7d42ab3bcbc1adce51d74fbd9f8784d7ee4b" +) + +// createTestTx creates a `wire.MsgTx` and asserts its creation. +func createTestTx(t *testing.T, h *rpctest.Harness) *wire.MsgTx { + addr, err := h.NewAddress() + require.NoError(t, err) + + script, err := txscript.PayToAddrScript(addr) + require.NoError(t, err) + + output := &wire.TxOut{ + PkScript: script, + Value: 1e6, + } + + tx, err := h.CreateTransaction([]*wire.TxOut{output}, 10, true) + require.NoError(t, err) + + return tx +} + +// decodeHex takes a tx hexstring and asserts it can be decoded into a +// `wire.MsgTx`. +func decodeHex(t *testing.T, txHex string) *wire.MsgTx { + serializedTx, err := hex.DecodeString(txHex) + require.NoError(t, err) + + tx, err := btcutil.NewTxFromBytes(serializedTx) + require.NoError(t, err) + + return tx.MsgTx() +} diff --git a/log.go b/log.go index 71accc7c9c..5707d7c23a 100644 --- a/log.go +++ b/log.go @@ -36,7 +36,7 @@ func (logWriter) Write(p []byte) (n int, err error) { return len(p), nil } -// Loggers per subsystem. A single backend logger is created and all subsytem +// Loggers per subsystem. A single backend logger is created and all subsystem // loggers created from it will write to the backend. When adding new // subsystems, add the subsystem logger variable here and to the // subsystemLoggers map. diff --git a/mempool/README.md b/mempool/README.md index 5f1e4a4cd1..85c03993c2 100644 --- a/mempool/README.md +++ b/mempool/README.md @@ -7,7 +7,7 @@ mempool Package mempool provides a policy-enforced pool of unmined bitcoin transactions. -A key responsbility of the bitcoin network is mining user-generated transactions +A key responsibility of the bitcoin network is mining user-generated transactions into blocks. In order to facilitate this, the mining process relies on having a readily-available source of transactions to include in a block that is being solved. diff --git a/mempool/doc.go b/mempool/doc.go index 22fb2a06a0..8c6c0bc1ea 100644 --- a/mempool/doc.go +++ b/mempool/doc.go @@ -5,7 +5,7 @@ /* Package mempool provides a policy-enforced pool of unmined bitcoin transactions. -A key responsbility of the bitcoin network is mining user-generated transactions +A key responsibility of the bitcoin network is mining user-generated transactions into blocks. In order to facilitate this, the mining process relies on having a readily-available source of transactions to include in a block that is being solved. diff --git a/mempool/interface.go b/mempool/interface.go new file mode 100644 index 0000000000..f6fe1f059a --- /dev/null +++ b/mempool/interface.go @@ -0,0 +1,71 @@ +package mempool + +import ( + "time" + + "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" +) + +// TxMempool defines an interface that's used by other subsystems to interact +// with the mempool. +type TxMempool interface { + // LastUpdated returns the last time a transaction was added to or + // removed from the source pool. + LastUpdated() time.Time + + // TxDescs returns a slice of descriptors for all the transactions in + // the pool. + TxDescs() []*TxDesc + + // RawMempoolVerbose returns all the entries in the mempool as a fully + // populated btcjson result. + RawMempoolVerbose() map[string]*btcjson.GetRawMempoolVerboseResult + + // Count returns the number of transactions in the main pool. It does + // not include the orphan pool. + Count() int + + // FetchTransaction returns the requested transaction from the + // transaction pool. This only fetches from the main transaction pool + // and does not include orphans. + FetchTransaction(txHash *chainhash.Hash) (*btcutil.Tx, error) + + // HaveTransaction returns whether or not the passed transaction + // already exists in the main pool or in the orphan pool. + HaveTransaction(hash *chainhash.Hash) bool + + // ProcessTransaction is the main workhorse for handling insertion of + // new free-standing transactions into the memory pool. It includes + // functionality such as rejecting duplicate transactions, ensuring + // transactions follow all rules, orphan transaction handling, and + // insertion into the memory pool. + // + // It returns a slice of transactions added to the mempool. When the + // error is nil, the list will include the passed transaction itself + // along with any additional orphan transactions that were added as a + // result of the passed one being accepted. + ProcessTransaction(tx *btcutil.Tx, allowOrphan, + rateLimit bool, tag Tag) ([]*TxDesc, error) + + // RemoveTransaction removes the passed transaction from the mempool. + // When the removeRedeemers flag is set, any transactions that redeem + // outputs from the removed transaction will also be removed + // recursively from the mempool, as they would otherwise become + // orphans. + RemoveTransaction(tx *btcutil.Tx, removeRedeemers bool) + + // CheckMempoolAcceptance behaves similarly to bitcoind's + // `testmempoolaccept` RPC method. It will perform a series of checks + // to decide whether this transaction can be accepted to the mempool. + // If not, the specific error is returned and the caller needs to take + // actions based on it. + CheckMempoolAcceptance(tx *btcutil.Tx) (*MempoolAcceptResult, error) + + // CheckSpend checks whether the passed outpoint is already spent by + // a transaction in the mempool. If that's the case the spending + // transaction will be returned, if not nil will be returned. + CheckSpend(op wire.OutPoint) *btcutil.Tx +} diff --git a/mempool/mempool.go b/mempool/mempool.go index a80d8ee4f4..cc0ec10fe0 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -21,6 +21,7 @@ import ( "github.com/btcsuite/btcd/mining" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" ) const ( @@ -48,6 +49,10 @@ const ( // can be evicted from the mempool when accepting a transaction // replacement. MaxReplacementEvictions = 100 + + // Transactions smaller than 65 non-witness bytes are not relayed to + // mitigate CVE-2017-12842. + MinStandardTxNonWitnessSize = 65 ) // Tag represents an identifier to use for tagging orphan transactions. The @@ -195,6 +200,9 @@ type TxPool struct { // Ensure the TxPool type implements the mining.TxSource interface. var _ mining.TxSource = (*TxPool)(nil) +// Ensure the TxPool type implements the TxMemPool interface. +var _ TxMempool = (*TxPool)(nil) + // removeOrphan is the internal function which implements the public // RemoveOrphan. See the comment for RemoveOrphan for more details. // @@ -588,9 +596,9 @@ func (mp *TxPool) checkPoolDoubleSpend(tx *btcutil.Tx) (bool, error) { // transactions or if it doesn't signal replacement. if mp.cfg.Policy.RejectReplacement || !mp.signalsReplacement(conflict, nil) { - str := fmt.Sprintf("output %v already spent by "+ - "transaction %v in the memory pool", - txIn.PreviousOutPoint, conflict.Hash()) + str := fmt.Sprintf("output already spent in mempool: "+ + "output=%v, tx=%v", txIn.PreviousOutPoint, + conflict.Hash()) return false, txRuleError(wire.RejectDuplicate, str) } @@ -838,7 +846,7 @@ func (mp *TxPool) validateReplacement(tx *btcutil.Tx, // exceed the maximum allowed. conflicts := mp.txConflicts(tx) if len(conflicts) > MaxReplacementEvictions { - str := fmt.Sprintf("replacement transaction %v evicts more "+ + str := fmt.Sprintf("%v: replacement transaction evicts more "+ "transactions than permitted: max is %v, evicts %v", tx.Hash(), MaxReplacementEvictions, len(conflicts)) return nil, txRuleError(wire.RejectNonstandard, str) @@ -851,7 +859,7 @@ func (mp *TxPool) validateReplacement(tx *btcutil.Tx, if _, ok := conflicts[ancestorHash]; !ok { continue } - str := fmt.Sprintf("replacement transaction %v spends parent "+ + str := fmt.Sprintf("%v: replacement transaction spends parent "+ "transaction %v", tx.Hash(), ancestorHash) return nil, txRuleError(wire.RejectInvalid, str) } @@ -872,7 +880,7 @@ func (mp *TxPool) validateReplacement(tx *btcutil.Tx, ) for hash, conflict := range conflicts { if txFeeRate <= mp.pool[hash].FeePerKB { - str := fmt.Sprintf("replacement transaction %v has an "+ + str := fmt.Sprintf("%v: replacement transaction has an "+ "insufficient fee rate: needs more than %v, "+ "has %v", tx.Hash(), mp.pool[hash].FeePerKB, txFeeRate) @@ -893,7 +901,7 @@ func (mp *TxPool) validateReplacement(tx *btcutil.Tx, // which is determined by our minimum relay fee. minFee := calcMinRequiredTxRelayFee(txSize, mp.cfg.Policy.MinRelayTxFee) if txFee < conflictsFee+minFee { - str := fmt.Sprintf("replacement transaction %v has an "+ + str := fmt.Sprintf("%v: replacement transaction has an "+ "insufficient absolute fee: needs %v, has %v", tx.Hash(), conflictsFee+minFee, txFee) return nil, txRuleError(wire.RejectInsufficientFee, str) @@ -924,313 +932,39 @@ func (mp *TxPool) validateReplacement(tx *btcutil.Tx, // more details. // // This function MUST be called with the mempool lock held (for writes). -func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, rejectDupOrphans bool) ([]*chainhash.Hash, *TxDesc, error) { - txHash := tx.Hash() - - // If a transaction has witness data, and segwit isn't active yet, If - // segwit isn't active yet, then we won't accept it into the mempool as - // it can't be mined yet. - if tx.MsgTx().HasWitness() { - segwitActive, err := mp.cfg.IsDeploymentActive(chaincfg.DeploymentSegwit) - if err != nil { - return nil, nil, err - } - - if !segwitActive { - simnetHint := "" - if mp.cfg.ChainParams.Net == wire.SimNet { - bestHeight := mp.cfg.BestHeight() - simnetHint = fmt.Sprintf(" (The threshold for segwit activation is 300 blocks on simnet, "+ - "current best height is %d)", bestHeight) - } - str := fmt.Sprintf("transaction %v has witness data, "+ - "but segwit isn't active yet%s", txHash, simnetHint) - return nil, nil, txRuleError(wire.RejectNonstandard, str) - } - } - - // Don't accept the transaction if it already exists in the pool. This - // applies to orphan transactions as well when the reject duplicate - // orphans flag is set. This check is intended to be a quick check to - // weed out duplicates. - if mp.isTransactionInPool(txHash) || (rejectDupOrphans && - mp.isOrphanInPool(txHash)) { - - str := fmt.Sprintf("already have transaction %v", txHash) - return nil, nil, txRuleError(wire.RejectDuplicate, str) - } - - // Perform preliminary sanity checks on the transaction. This makes - // use of blockchain which contains the invariant rules for what - // transactions are allowed into blocks. - err := blockchain.CheckTransactionSanity(tx) - if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { - return nil, nil, chainRuleError(cerr) - } - return nil, nil, err - } - - // A standalone transaction must not be a coinbase transaction. - if blockchain.IsCoinBase(tx) { - str := fmt.Sprintf("transaction %v is an individual coinbase", - txHash) - return nil, nil, txRuleError(wire.RejectInvalid, str) - } - - // Get the current height of the main chain. A standalone transaction - // will be mined into the next block at best, so its height is at least - // one more than the current height. - bestHeight := mp.cfg.BestHeight() - nextBlockHeight := bestHeight + 1 - - medianTimePast := mp.cfg.MedianTimePast() - - // Don't allow non-standard transactions if the network parameters - // forbid their acceptance. - if !mp.cfg.Policy.AcceptNonStd { - err = CheckTransactionStandard(tx, nextBlockHeight, - medianTimePast, mp.cfg.Policy.MinRelayTxFee, - mp.cfg.Policy.MaxTxVersion) - if err != nil { - // Attempt to extract a reject code from the error so - // it can be retained. When not possible, fall back to - // a non standard error. - rejectCode, found := extractRejectCode(err) - if !found { - rejectCode = wire.RejectNonstandard - } - str := fmt.Sprintf("transaction %v is not standard: %v", - txHash, err) - return nil, nil, txRuleError(rejectCode, str) - } - } - - // The transaction may not use any of the same outputs as other - // transactions already in the pool as that would ultimately result in a - // double spend, unless those transactions signal for RBF. This check is - // intended to be quick and therefore only detects double spends within - // the transaction pool itself. The transaction could still be double - // spending coins from the main chain at this point. There is a more - // in-depth check that happens later after fetching the referenced - // transaction inputs from the main chain which examines the actual - // spend data and prevents double spends. - isReplacement, err := mp.checkPoolDoubleSpend(tx) - if err != nil { - return nil, nil, err - } - - // Fetch all of the unspent transaction outputs referenced by the inputs - // to this transaction. This function also attempts to fetch the - // transaction itself to be used for detecting a duplicate transaction - // without needing to do a separate lookup. - utxoView, err := mp.fetchInputUtxos(tx) - if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { - return nil, nil, chainRuleError(cerr) - } - return nil, nil, err - } - - // Don't allow the transaction if it exists in the main chain and is - // already fully spent. - prevOut := wire.OutPoint{Hash: *txHash} - for txOutIdx := range tx.MsgTx().TxOut { - prevOut.Index = uint32(txOutIdx) - entry := utxoView.LookupEntry(prevOut) - if entry != nil && !entry.IsSpent() { - return nil, nil, txRuleError(wire.RejectDuplicate, - "transaction already exists") - } - utxoView.RemoveEntry(prevOut) - } - - // Transaction is an orphan if any of the referenced transaction outputs - // don't exist or are already spent. Adding orphans to the orphan pool - // is not handled by this function, and the caller should use - // maybeAddOrphan if this behavior is desired. - var missingParents []*chainhash.Hash - for outpoint, entry := range utxoView.Entries() { - if entry == nil || entry.IsSpent() { - // Must make a copy of the hash here since the iterator - // is replaced and taking its address directly would - // result in all the entries pointing to the same - // memory location and thus all be the final hash. - hashCopy := outpoint.Hash - missingParents = append(missingParents, &hashCopy) - } - } - if len(missingParents) > 0 { - return missingParents, nil, nil - } - - // Don't allow the transaction into the mempool unless its sequence - // lock is active, meaning that it'll be allowed into the next block - // with respect to its defined relative lock times. - sequenceLock, err := mp.cfg.CalcSequenceLock(tx, utxoView) - if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { - return nil, nil, chainRuleError(cerr) - } - return nil, nil, err - } - if !blockchain.SequenceLockActive(sequenceLock, nextBlockHeight, - medianTimePast) { - return nil, nil, txRuleError(wire.RejectNonstandard, - "transaction's sequence locks on inputs not met") - } - - // Perform several checks on the transaction inputs using the invariant - // rules in blockchain for what transactions are allowed into blocks. - // Also returns the fees associated with the transaction which will be - // used later. - txFee, err := blockchain.CheckTransactionInputs(tx, nextBlockHeight, - utxoView, mp.cfg.ChainParams) - if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { - return nil, nil, chainRuleError(cerr) - } - return nil, nil, err - } +func (mp *TxPool) maybeAcceptTransaction(tx *btcutil.Tx, isNew, rateLimit, + rejectDupOrphans bool) ([]*chainhash.Hash, *TxDesc, error) { - // Don't allow transactions with non-standard inputs if the network - // parameters forbid their acceptance. - if !mp.cfg.Policy.AcceptNonStd { - err := checkInputsStandard(tx, utxoView) - if err != nil { - // Attempt to extract a reject code from the error so - // it can be retained. When not possible, fall back to - // a non-standard error. - rejectCode, found := extractRejectCode(err) - if !found { - rejectCode = wire.RejectNonstandard - } - str := fmt.Sprintf("transaction %v has a non-standard "+ - "input: %v", txHash, err) - return nil, nil, txRuleError(rejectCode, str) - } - } - - // NOTE: if you modify this code to accept non-standard transactions, - // you should add code here to check that the transaction does a - // reasonable number of ECDSA signature verifications. + txHash := tx.Hash() - // Don't allow transactions with an excessive number of signature - // operations which would result in making it impossible to mine. Since - // the coinbase address itself can contain signature operations, the - // maximum allowed signature operations per transaction is less than - // the maximum allowed signature operations per block. - // TODO(roasbeef): last bool should be conditional on segwit activation - sigOpCost, err := blockchain.GetSigOpCost(tx, false, utxoView, true, true) + // Check for mempool acceptance. + r, err := mp.checkMempoolAcceptance( + tx, isNew, rateLimit, rejectDupOrphans, + ) if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { - return nil, nil, chainRuleError(cerr) - } return nil, nil, err } - if sigOpCost > mp.cfg.Policy.MaxSigOpCostPerTx { - str := fmt.Sprintf("transaction %v sigop cost is too high: %d > %d", - txHash, sigOpCost, mp.cfg.Policy.MaxSigOpCostPerTx) - return nil, nil, txRuleError(wire.RejectNonstandard, str) - } - // Don't allow transactions with fees too low to get into a mined block. - // - // Most miners allow a free transaction area in blocks they mine to go - // alongside the area used for high-priority transactions as well as - // transactions with fees. A transaction size of up to 1000 bytes is - // considered safe to go into this section. Further, the minimum fee - // calculated below on its own would encourage several small - // transactions to avoid fees rather than one single larger transaction - // which is more desirable. Therefore, as long as the size of the - // transaction does not exceed 1000 less than the reserved space for - // high-priority transactions, don't require a fee for it. - serializedSize := GetTxVirtualSize(tx) - minFee := calcMinRequiredTxRelayFee(serializedSize, - mp.cfg.Policy.MinRelayTxFee) - if serializedSize >= (DefaultBlockPrioritySize-1000) && txFee < minFee { - str := fmt.Sprintf("transaction %v has %d fees which is under "+ - "the required amount of %d", txHash, txFee, - minFee) - return nil, nil, txRuleError(wire.RejectInsufficientFee, str) - } - - // Require that free transactions have sufficient priority to be mined - // in the next block. Transactions which are being added back to the - // memory pool from blocks that have been disconnected during a reorg - // are exempted. - if isNew && !mp.cfg.Policy.DisableRelayPriority && txFee < minFee { - currentPriority := mining.CalcPriority(tx.MsgTx(), utxoView, - nextBlockHeight) - if currentPriority <= mining.MinHighPriority { - str := fmt.Sprintf("transaction %v has insufficient "+ - "priority (%g <= %g)", txHash, - currentPriority, mining.MinHighPriority) - return nil, nil, txRuleError(wire.RejectInsufficientFee, str) - } - } - - // Free-to-relay transactions are rate limited here to prevent - // penny-flooding with tiny transactions as a form of attack. - if rateLimit && txFee < minFee { - nowUnix := time.Now().Unix() - // Decay passed data with an exponentially decaying ~10 minute - // window - matches bitcoind handling. - mp.pennyTotal *= math.Pow(1.0-1.0/600.0, - float64(nowUnix-mp.lastPennyUnix)) - mp.lastPennyUnix = nowUnix - - // Are we still over the limit? - if mp.pennyTotal >= mp.cfg.Policy.FreeTxRelayLimit*10*1000 { - str := fmt.Sprintf("transaction %v has been rejected "+ - "by the rate limiter due to low fees", txHash) - return nil, nil, txRuleError(wire.RejectInsufficientFee, str) - } - oldTotal := mp.pennyTotal - - mp.pennyTotal += float64(serializedSize) - log.Tracef("rate limit: curTotal %v, nextTotal: %v, "+ - "limit %v", oldTotal, mp.pennyTotal, - mp.cfg.Policy.FreeTxRelayLimit*10*1000) - } - - // If the transaction has any conflicts, and we've made it this far, then - // we're processing a potential replacement. - var conflicts map[chainhash.Hash]*btcutil.Tx - if isReplacement { - conflicts, err = mp.validateReplacement(tx, txFee) - if err != nil { - return nil, nil, err - } - } - - // Verify crypto signatures for each input and reject the transaction if - // any don't verify. - err = blockchain.ValidateTransactionScripts(tx, utxoView, - txscript.StandardVerifyFlags, mp.cfg.SigCache, - mp.cfg.HashCache) - if err != nil { - if cerr, ok := err.(blockchain.RuleError); ok { - return nil, nil, chainRuleError(cerr) - } - return nil, nil, err + // Exit early if this transaction is missing parents. + if len(r.MissingParents) > 0 { + return r.MissingParents, nil, nil } // Now that we've deemed the transaction as valid, we can add it to the // mempool. If it ended up replacing any transactions, we'll remove them // first. - for _, conflict := range conflicts { + for _, conflict := range r.Conflicts { log.Debugf("Replacing transaction %v (fee_rate=%v sat/kb) "+ "with %v (fee_rate=%v sat/kb)\n", conflict.Hash(), mp.pool[*conflict.Hash()].FeePerKB, tx.Hash(), - txFee*1000/serializedSize) + int64(r.TxFee)*1000/r.TxSize) // The conflict set should already include the descendants for // each one, so we don't need to remove the redeemers within // this call as they'll be removed eventually. mp.removeTransaction(conflict, false) } - txD := mp.addTransaction(utxoView, tx, bestHeight, txFee) + txD := mp.addTransaction(r.utxoView, tx, r.bestHeight, int64(r.TxFee)) log.Debugf("Accepted transaction %v (pool size: %v)", txHash, len(mp.pool)) @@ -1545,6 +1279,484 @@ func (mp *TxPool) LastUpdated() time.Time { return time.Unix(atomic.LoadInt64(&mp.lastUpdated), 0) } +// MempoolAcceptResult holds the result from mempool acceptance check. +type MempoolAcceptResult struct { + // TxFee is the fees paid in satoshi. + TxFee btcutil.Amount + + // TxSize is the virtual size(vb) of the tx. + TxSize int64 + + // conflicts is a set of transactions whose inputs are spent by this + // transaction(RBF). + Conflicts map[chainhash.Hash]*btcutil.Tx + + // MissingParents is a set of outpoints that are used by this + // transaction which cannot be found. Transaction is an orphan if any + // of the referenced transaction outputs don't exist or are already + // spent. + // + // NOTE: this field is mutually exclusive with other fields. If this + // field is not nil, then other fields must be empty. + MissingParents []*chainhash.Hash + + // utxoView is a set of the unspent transaction outputs referenced by + // the inputs to this transaction. + utxoView *blockchain.UtxoViewpoint + + // bestHeight is the best known height by the mempool. + bestHeight int32 +} + +// CheckMempoolAcceptance behaves similarly to bitcoind's `testmempoolaccept` +// RPC method. It will perform a series of checks to decide whether this +// transaction can be accepted to the mempool. If not, the specific error is +// returned and the caller needs to take actions based on it. +func (mp *TxPool) CheckMempoolAcceptance(tx *btcutil.Tx) ( + *MempoolAcceptResult, error) { + + mp.mtx.RLock() + defer mp.mtx.RUnlock() + + // Call checkMempoolAcceptance with isNew=true and rateLimit=true, + // which has the effect that we always check the fee paid from this tx + // is greater than min relay fee. We also reject this tx if it's + // already an orphan. + result, err := mp.checkMempoolAcceptance(tx, true, true, true) + if err != nil { + log.Errorf("CheckMempoolAcceptance: %v", err) + return nil, err + } + + log.Tracef("Tx %v passed mempool acceptance check: %v", tx.Hash(), + spew.Sdump(result)) + + return result, nil +} + +// checkMempoolAcceptance performs a series of validations on the given +// transaction. It returns an error when the transaction fails to meet the +// mempool policy, otherwise a `mempoolAcceptResult` is returned. +func (mp *TxPool) checkMempoolAcceptance(tx *btcutil.Tx, + isNew, rateLimit, rejectDupOrphans bool) (*MempoolAcceptResult, error) { + + txHash := tx.Hash() + + // Check for segwit activeness. + if err := mp.validateSegWitDeployment(tx); err != nil { + return nil, err + } + + // Don't accept the transaction if it already exists in the pool. This + // applies to orphan transactions as well when the reject duplicate + // orphans flag is set. This check is intended to be a quick check to + // weed out duplicates. + if mp.isTransactionInPool(txHash) || (rejectDupOrphans && + mp.isOrphanInPool(txHash)) { + + str := fmt.Sprintf("already have transaction in mempool %v", + txHash) + return nil, txRuleError(wire.RejectDuplicate, str) + } + + // Disallow transactions under the minimum standardness size. + if tx.MsgTx().SerializeSizeStripped() < MinStandardTxNonWitnessSize { + str := fmt.Sprintf("tx %v is too small", txHash) + return nil, txRuleError(wire.RejectNonstandard, str) + } + + // Perform preliminary sanity checks on the transaction. This makes use + // of blockchain which contains the invariant rules for what + // transactions are allowed into blocks. + err := blockchain.CheckTransactionSanity(tx) + if err != nil { + if cerr, ok := err.(blockchain.RuleError); ok { + return nil, chainRuleError(cerr) + } + + return nil, err + } + + // A standalone transaction must not be a coinbase transaction. + if blockchain.IsCoinBase(tx) { + str := fmt.Sprintf("transaction is an individual coinbase %v", + txHash) + + return nil, txRuleError(wire.RejectInvalid, str) + } + + // Get the current height of the main chain. A standalone transaction + // will be mined into the next block at best, so its height is at least + // one more than the current height. + bestHeight := mp.cfg.BestHeight() + nextBlockHeight := bestHeight + 1 + + medianTimePast := mp.cfg.MedianTimePast() + + // The transaction may not use any of the same outputs as other + // transactions already in the pool as that would ultimately result in + // a double spend, unless those transactions signal for RBF. This check + // is intended to be quick and therefore only detects double spends + // within the transaction pool itself. The transaction could still be + // double spending coins from the main chain at this point. There is a + // more in-depth check that happens later after fetching the referenced + // transaction inputs from the main chain which examines the actual + // spend data and prevents double spends. + isReplacement, err := mp.checkPoolDoubleSpend(tx) + if err != nil { + return nil, err + } + + // Fetch all of the unspent transaction outputs referenced by the + // inputs to this transaction. This function also attempts to fetch the + // transaction itself to be used for detecting a duplicate transaction + // without needing to do a separate lookup. + utxoView, err := mp.fetchInputUtxos(tx) + if err != nil { + if cerr, ok := err.(blockchain.RuleError); ok { + return nil, chainRuleError(cerr) + } + + return nil, err + } + + // Don't allow the transaction if it exists in the main chain and is + // already fully spent. + prevOut := wire.OutPoint{Hash: *txHash} + for txOutIdx := range tx.MsgTx().TxOut { + prevOut.Index = uint32(txOutIdx) + + entry := utxoView.LookupEntry(prevOut) + if entry != nil && !entry.IsSpent() { + return nil, txRuleError(wire.RejectDuplicate, + "transaction already exists in blockchain") + } + + utxoView.RemoveEntry(prevOut) + } + + // Transaction is an orphan if any of the referenced transaction + // outputs don't exist or are already spent. Adding orphans to the + // orphan pool is not handled by this function, and the caller should + // use maybeAddOrphan if this behavior is desired. + var missingParents []*chainhash.Hash + for outpoint, entry := range utxoView.Entries() { + if entry == nil || entry.IsSpent() { + // Must make a copy of the hash here since the iterator + // is replaced and taking its address directly would + // result in all the entries pointing to the same + // memory location and thus all be the final hash. + hashCopy := outpoint.Hash + missingParents = append(missingParents, &hashCopy) + } + } + + // Exit early if this transaction is missing parents. + if len(missingParents) > 0 { + log.Debugf("Tx %v is an orphan with missing parents: %v", + txHash, missingParents) + + return &MempoolAcceptResult{ + MissingParents: missingParents, + }, nil + } + + // Perform several checks on the transaction inputs using the invariant + // rules in blockchain for what transactions are allowed into blocks. + // Also returns the fees associated with the transaction which will be + // used later. + // + // NOTE: this check must be performed before `validateStandardness` to + // make sure a nil entry is not returned from `utxoView.LookupEntry`. + txFee, err := blockchain.CheckTransactionInputs( + tx, nextBlockHeight, utxoView, mp.cfg.ChainParams, + ) + if err != nil { + if cerr, ok := err.(blockchain.RuleError); ok { + return nil, chainRuleError(cerr) + } + return nil, err + } + + // Don't allow non-standard transactions or non-standard inputs if the + // network parameters forbid their acceptance. + err = mp.validateStandardness( + tx, nextBlockHeight, medianTimePast, utxoView, + ) + if err != nil { + return nil, err + } + + // Don't allow the transaction into the mempool unless its sequence + // lock is active, meaning that it'll be allowed into the next block + // with respect to its defined relative lock times. + sequenceLock, err := mp.cfg.CalcSequenceLock(tx, utxoView) + if err != nil { + if cerr, ok := err.(blockchain.RuleError); ok { + return nil, chainRuleError(cerr) + } + + return nil, err + } + + if !blockchain.SequenceLockActive( + sequenceLock, nextBlockHeight, medianTimePast, + ) { + + return nil, txRuleError(wire.RejectNonstandard, + "transaction's sequence locks on inputs not met") + } + + // Don't allow transactions with an excessive number of signature + // operations which would result in making it impossible to mine. + if err := mp.validateSigCost(tx, utxoView); err != nil { + return nil, err + } + + txSize := GetTxVirtualSize(tx) + + // Don't allow transactions with fees too low to get into a mined + // block. + err = mp.validateRelayFeeMet( + tx, txFee, txSize, utxoView, nextBlockHeight, isNew, rateLimit, + ) + if err != nil { + return nil, err + } + + // If the transaction has any conflicts, and we've made it this far, + // then we're processing a potential replacement. + var conflicts map[chainhash.Hash]*btcutil.Tx + if isReplacement { + conflicts, err = mp.validateReplacement(tx, txFee) + if err != nil { + return nil, err + } + } + + // Verify crypto signatures for each input and reject the transaction + // if any don't verify. + err = blockchain.ValidateTransactionScripts(tx, utxoView, + txscript.StandardVerifyFlags, mp.cfg.SigCache, + mp.cfg.HashCache) + if err != nil { + if cerr, ok := err.(blockchain.RuleError); ok { + return nil, chainRuleError(cerr) + } + return nil, err + } + + result := &MempoolAcceptResult{ + TxFee: btcutil.Amount(txFee), + TxSize: txSize, + Conflicts: conflicts, + utxoView: utxoView, + bestHeight: bestHeight, + } + + return result, nil +} + +// validateSegWitDeployment checks that when a transaction has witness data, +// segwit must be active. +func (mp *TxPool) validateSegWitDeployment(tx *btcutil.Tx) error { + // Exit early if this transaction doesn't have witness data. + if !tx.MsgTx().HasWitness() { + return nil + } + + // If a transaction has witness data, and segwit isn't active yet, then + // we won't accept it into the mempool as it can't be mined yet. + segwitActive, err := mp.cfg.IsDeploymentActive( + chaincfg.DeploymentSegwit, + ) + if err != nil { + return err + } + + // Exit early if segwit is active. + if segwitActive { + return nil + } + + simnetHint := "" + if mp.cfg.ChainParams.Net == wire.SimNet { + bestHeight := mp.cfg.BestHeight() + simnetHint = fmt.Sprintf(" (The threshold for segwit "+ + "activation is 300 blocks on simnet, current best "+ + "height is %d)", bestHeight) + } + str := fmt.Sprintf("transaction %v has witness data, "+ + "but segwit isn't active yet%s", tx.Hash(), simnetHint) + + return txRuleError(wire.RejectNonstandard, str) +} + +// validateStandardness checks the transaction passes both transaction standard +// and input standard. +func (mp *TxPool) validateStandardness(tx *btcutil.Tx, nextBlockHeight int32, + medianTimePast time.Time, utxoView *blockchain.UtxoViewpoint) error { + + // Exit early if we accept non-standard transactions. + // + // NOTE: if you modify this code to accept non-standard transactions, + // you should add code here to check that the transaction does a + // reasonable number of ECDSA signature verifications. + if mp.cfg.Policy.AcceptNonStd { + return nil + } + + // Check the transaction standard. + err := CheckTransactionStandard( + tx, nextBlockHeight, medianTimePast, + mp.cfg.Policy.MinRelayTxFee, mp.cfg.Policy.MaxTxVersion, + ) + if err != nil { + // Attempt to extract a reject code from the error so it can be + // retained. When not possible, fall back to a non standard + // error. + rejectCode, found := extractRejectCode(err) + if !found { + rejectCode = wire.RejectNonstandard + } + str := fmt.Sprintf("transaction %v is not standard: %v", + tx.Hash(), err) + + return txRuleError(rejectCode, str) + } + + // Check the inputs standard. + err = checkInputsStandard(tx, utxoView) + if err != nil { + // Attempt to extract a reject code from the error so it can be + // retained. When not possible, fall back to a non-standard + // error. + rejectCode, found := extractRejectCode(err) + if !found { + rejectCode = wire.RejectNonstandard + } + str := fmt.Sprintf("transaction %v has a non-standard "+ + "input: %v", tx.Hash(), err) + + return txRuleError(rejectCode, str) + } + + return nil +} + +// validateSigCost checks the cost to run the signature operations to make sure +// the number of signatures are sane. +func (mp *TxPool) validateSigCost(tx *btcutil.Tx, + utxoView *blockchain.UtxoViewpoint) error { + + // Since the coinbase address itself can contain signature operations, + // the maximum allowed signature operations per transaction is less + // than the maximum allowed signature operations per block. + // + // TODO(roasbeef): last bool should be conditional on segwit activation + sigOpCost, err := blockchain.GetSigOpCost( + tx, false, utxoView, true, true, + ) + if err != nil { + if cerr, ok := err.(blockchain.RuleError); ok { + return chainRuleError(cerr) + } + + return err + } + + // Exit early if the sig cost is under limit. + if sigOpCost <= mp.cfg.Policy.MaxSigOpCostPerTx { + return nil + } + + str := fmt.Sprintf("transaction %v sigop cost is too high: %d > %d", + tx.Hash(), sigOpCost, mp.cfg.Policy.MaxSigOpCostPerTx) + + return txRuleError(wire.RejectNonstandard, str) +} + +// validateRelayFeeMet checks that the min relay fee is covered by this +// transaction. +func (mp *TxPool) validateRelayFeeMet(tx *btcutil.Tx, txFee, txSize int64, + utxoView *blockchain.UtxoViewpoint, nextBlockHeight int32, + isNew, rateLimit bool) error { + + txHash := tx.Hash() + + // Most miners allow a free transaction area in blocks they mine to go + // alongside the area used for high-priority transactions as well as + // transactions with fees. A transaction size of up to 1000 bytes is + // considered safe to go into this section. Further, the minimum fee + // calculated below on its own would encourage several small + // transactions to avoid fees rather than one single larger transaction + // which is more desirable. Therefore, as long as the size of the + // transaction does not exceed 1000 less than the reserved space for + // high-priority transactions, don't require a fee for it. + minFee := calcMinRequiredTxRelayFee(txSize, mp.cfg.Policy.MinRelayTxFee) + + if txSize >= (DefaultBlockPrioritySize-1000) && txFee < minFee { + str := fmt.Sprintf("transaction %v has %d fees which is under "+ + "the required amount of %d", txHash, txFee, minFee) + + return txRuleError(wire.RejectInsufficientFee, str) + } + + // Exit early if the min relay fee is met. + if txFee >= minFee { + return nil + } + + // Exit early if this is neither a new tx or rate limited. + if !isNew && !rateLimit { + return nil + } + + // Require that free transactions have sufficient priority to be mined + // in the next block. Transactions which are being added back to the + // memory pool from blocks that have been disconnected during a reorg + // are exempted. + if isNew && !mp.cfg.Policy.DisableRelayPriority { + currentPriority := mining.CalcPriority( + tx.MsgTx(), utxoView, nextBlockHeight, + ) + if currentPriority <= mining.MinHighPriority { + str := fmt.Sprintf("transaction %v has insufficient "+ + "priority (%g <= %g)", txHash, + currentPriority, mining.MinHighPriority) + + return txRuleError(wire.RejectInsufficientFee, str) + } + } + + // We can only end up here when the rateLimit is true. Free-to-relay + // transactions are rate limited here to prevent penny-flooding with + // tiny transactions as a form of attack. + nowUnix := time.Now().Unix() + + // Decay passed data with an exponentially decaying ~10 minute window - + // matches bitcoind handling. + mp.pennyTotal *= math.Pow( + 1.0-1.0/600.0, float64(nowUnix-mp.lastPennyUnix), + ) + mp.lastPennyUnix = nowUnix + + // Are we still over the limit? + if mp.pennyTotal >= mp.cfg.Policy.FreeTxRelayLimit*10*1000 { + str := fmt.Sprintf("transaction %v has been rejected "+ + "by the rate limiter due to low fees", txHash) + + return txRuleError(wire.RejectInsufficientFee, str) + } + + oldTotal := mp.pennyTotal + mp.pennyTotal += float64(txSize) + log.Tracef("rate limit: curTotal %v, nextTotal: %v, limit %v", + oldTotal, mp.pennyTotal, mp.cfg.Policy.FreeTxRelayLimit*10*1000) + + return nil +} + // New returns a new memory pool for validating and storing standalone // transactions until they are mined into a block. func New(cfg *Config) *TxPool { diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index f0f8404cc7..e31b6043bd 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -1491,7 +1491,7 @@ func TestRBF(t *testing.T) { return tx, nil }, - err: "already spent by transaction", + err: "already spent in mempool", }, { // A transaction cannot replace another if we don't @@ -1522,7 +1522,7 @@ func TestRBF(t *testing.T) { return tx, nil }, - err: "already spent by transaction", + err: "already spent in mempool", }, { // A transaction cannot replace another if doing so diff --git a/mempool/mocks.go b/mempool/mocks.go new file mode 100644 index 0000000000..e81309c51a --- /dev/null +++ b/mempool/mocks.go @@ -0,0 +1,125 @@ +package mempool + +import ( + "time" + + "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/mock" +) + +// MockTxMempool is a mock implementation of the TxMempool interface. +type MockTxMempool struct { + mock.Mock +} + +// Ensure the MockTxMempool implements the TxMemPool interface. +var _ TxMempool = (*MockTxMempool)(nil) + +// LastUpdated returns the last time a transaction was added to or removed from +// the source pool. +func (m *MockTxMempool) LastUpdated() time.Time { + args := m.Called() + return args.Get(0).(time.Time) +} + +// TxDescs returns a slice of descriptors for all the transactions in the pool. +func (m *MockTxMempool) TxDescs() []*TxDesc { + args := m.Called() + return args.Get(0).([]*TxDesc) +} + +// RawMempoolVerbose returns all the entries in the mempool as a fully +// populated btcjson result. +func (m *MockTxMempool) RawMempoolVerbose() map[string]*btcjson. + GetRawMempoolVerboseResult { + + args := m.Called() + return args.Get(0).(map[string]*btcjson.GetRawMempoolVerboseResult) +} + +// Count returns the number of transactions in the main pool. It does not +// include the orphan pool. +func (m *MockTxMempool) Count() int { + args := m.Called() + return args.Get(0).(int) +} + +// FetchTransaction returns the requested transaction from the transaction +// pool. This only fetches from the main transaction pool and does not include +// orphans. +func (m *MockTxMempool) FetchTransaction( + txHash *chainhash.Hash) (*btcutil.Tx, error) { + + args := m.Called(txHash) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*btcutil.Tx), args.Error(1) +} + +// HaveTransaction returns whether or not the passed transaction already exists +// in the main pool or in the orphan pool. +func (m *MockTxMempool) HaveTransaction(hash *chainhash.Hash) bool { + args := m.Called(hash) + return args.Get(0).(bool) +} + +// ProcessTransaction is the main workhorse for handling insertion of new +// free-standing transactions into the memory pool. It includes functionality +// such as rejecting duplicate transactions, ensuring transactions follow all +// rules, orphan transaction handling, and insertion into the memory pool. +func (m *MockTxMempool) ProcessTransaction(tx *btcutil.Tx, allowOrphan, + rateLimit bool, tag Tag) ([]*TxDesc, error) { + + args := m.Called(tx, allowOrphan, rateLimit, tag) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).([]*TxDesc), args.Error(1) +} + +// RemoveTransaction removes the passed transaction from the mempool. When the +// removeRedeemers flag is set, any transactions that redeem outputs from the +// removed transaction will also be removed recursively from the mempool, as +// they would otherwise become orphans. +func (m *MockTxMempool) RemoveTransaction(tx *btcutil.Tx, + removeRedeemers bool) { + + m.Called(tx, removeRedeemers) +} + +// CheckMempoolAcceptance behaves similarly to bitcoind's `testmempoolaccept` +// RPC method. It will perform a series of checks to decide whether this +// transaction can be accepted to the mempool. If not, the specific error is +// returned and the caller needs to take actions based on it. +func (m *MockTxMempool) CheckMempoolAcceptance( + tx *btcutil.Tx) (*MempoolAcceptResult, error) { + + args := m.Called(tx) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*MempoolAcceptResult), args.Error(1) +} + +// CheckSpend checks whether the passed outpoint is already spent by a +// transaction in the mempool. If that's the case the spending transaction will +// be returned, if not nil will be returned. +func (m *MockTxMempool) CheckSpend(op wire.OutPoint) *btcutil.Tx { + args := m.Called(op) + + if args.Get(0) == nil { + return nil + } + + return args.Get(0).(*btcutil.Tx) +} diff --git a/mempool/policy.go b/mempool/policy.go index 758f7e06a9..862767d0c8 100644 --- a/mempool/policy.go +++ b/mempool/policy.go @@ -308,8 +308,8 @@ func CheckTransactionStandard(tx *btcutil.Tx, height int32, // attacks. txWeight := blockchain.GetTransactionWeight(tx) if txWeight > maxStandardTxWeight { - str := fmt.Sprintf("weight of transaction %v is larger than max "+ - "allowed weight of %v", txWeight, maxStandardTxWeight) + str := fmt.Sprintf("weight of transaction is larger than max "+ + "allowed: %v > %v", txWeight, maxStandardTxWeight) return txRuleError(wire.RejectNonstandard, str) } @@ -320,8 +320,8 @@ func CheckTransactionStandard(tx *btcutil.Tx, height int32, sigScriptLen := len(txIn.SignatureScript) if sigScriptLen > maxStandardSigScriptSize { str := fmt.Sprintf("transaction input %d: signature "+ - "script size of %d bytes is large than max "+ - "allowed size of %d bytes", i, sigScriptLen, + "script size is larger than max allowed: "+ + "%d > %d bytes", i, sigScriptLen, maxStandardSigScriptSize) return txRuleError(wire.RejectNonstandard, str) } @@ -359,8 +359,8 @@ func CheckTransactionStandard(tx *btcutil.Tx, height int32, if scriptClass == txscript.NullDataTy { numNullDataOutputs++ } else if IsDust(txOut, minRelayTxFee) { - str := fmt.Sprintf("transaction output %d: payment "+ - "of %d is dust", i, txOut.Value) + str := fmt.Sprintf("transaction output %d: payment is "+ + "dust: %v", i, txOut.Value) return txRuleError(wire.RejectDust, str) } } diff --git a/mining/mining.go b/mining/mining.go index 7905dade76..5f2706521a 100644 --- a/mining/mining.go +++ b/mining/mining.go @@ -563,9 +563,6 @@ mempoolLoop: } prioItem.dependsOn[*originHash] = struct{}{} - // Skip the check below. We already know the - // referenced transaction is available. - continue } } @@ -861,7 +858,7 @@ mempoolLoop: }, nil } -// AddWitnessCommitment adds the witness commitment as an OP_RETURN outpout +// AddWitnessCommitment adds the witness commitment as an OP_RETURN output // within the coinbase tx. The raw commitment is returned. func AddWitnessCommitment(coinbaseTx *btcutil.Tx, blockTxns []*btcutil.Tx) []byte { diff --git a/mining/policy.go b/mining/policy.go index 6213c2b336..8ddd575462 100644 --- a/mining/policy.go +++ b/mining/policy.go @@ -112,7 +112,7 @@ func CalcPriority(tx *wire.MsgTx, utxoView *blockchain.UtxoViewpoint, nextBlockH // A compressed pubkey pay-to-script-hash redemption with a maximum len // signature is of the form: // [OP_DATA_73 <73-byte sig> + OP_DATA_35 + {OP_DATA_33 - // <33 byte compresed pubkey> + OP_CHECKSIG}] + // <33 byte compressed pubkey> + OP_CHECKSIG}] // // Thus 1 + 73 + 1 + 1 + 33 + 1 = 110 overhead := 0 diff --git a/netsync/manager.go b/netsync/manager.go index 41ba70aa6a..3215a86ace 100644 --- a/netsync/manager.go +++ b/netsync/manager.go @@ -1136,7 +1136,7 @@ func (sm *SyncManager) haveInventory(invVect *wire.InvVect) (bool, error) { return false, nil } - // The requested inventory is is an unsupported type, so just claim + // The requested inventory is an unsupported type, so just claim // it is known to avoid requesting it. return true, nil } @@ -1454,6 +1454,13 @@ func (sm *SyncManager) handleBlockchainNotification(notification *blockchain.Not // A block has been connected to the main block chain. case blockchain.NTBlockConnected: + // Don't attempt to update the mempool if we're not current. + // The mempool is empty and the fee estimator is useless unless + // we're caught up. + if !sm.current() { + return + } + block, ok := notification.Data.(*btcutil.Block) if !ok { log.Warnf("Chain connected notification is not a block.") diff --git a/peer/example_test.go b/peer/example_test.go index d4662a2b4c..850557b877 100644 --- a/peer/example_test.go +++ b/peer/example_test.go @@ -16,7 +16,7 @@ import ( ) // mockRemotePeer creates a basic inbound peer listening on the simnet port for -// use with Example_peerConnection. It does not return until the listner is +// use with Example_peerConnection. It does not return until the listener is // active. func mockRemotePeer() error { // Configure peer to act as a simnet node that offers no services. diff --git a/peer/peer.go b/peer/peer.go index aa66cea98f..195fc0b4fe 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -744,7 +744,7 @@ func (p *Peer) LastRecv() time.Time { // LocalAddr returns the local address of the connection. // -// This function is safe fo concurrent access. +// This function is safe for concurrent access. func (p *Peer) LocalAddr() net.Addr { var localAddr net.Addr if atomic.LoadInt32(&p.connected) != 0 { diff --git a/release/release.sh b/release/release.sh index de49f64122..49dee89638 100755 --- a/release/release.sh +++ b/release/release.sh @@ -40,6 +40,7 @@ cd $MAINDIR # for a subset of systems/architectures. SYS=${BTCDBUILDSYS:-" darwin-amd64 + darwin-arm64 dragonfly-amd64 freebsd-386 freebsd-amd64 diff --git a/rpcclient/backend_version.go b/rpcclient/backend_version.go new file mode 100644 index 0000000000..cb2a46fc5e --- /dev/null +++ b/rpcclient/backend_version.go @@ -0,0 +1,208 @@ +package rpcclient + +import "strings" + +// BackendVersion defines an interface to handle the version of the backend +// used by the client. +type BackendVersion interface { + // String returns a human-readable backend version. + String() string + + // SupportUnifiedSoftForks returns true if the backend supports the + // unified softforks format. + SupportUnifiedSoftForks() bool + + // SupportTestMempoolAccept returns true if the backend supports the + // testmempoolaccept RPC. + SupportTestMempoolAccept() bool + + // SupportGetTxSpendingPrevOut returns true if the backend supports the + // gettxspendingprevout RPC. + SupportGetTxSpendingPrevOut() bool +} + +// BitcoindVersion represents the version of the bitcoind the client is +// currently connected to. +type BitcoindVersion uint8 + +const ( + // BitcoindPre19 represents a bitcoind version before 0.19.0. + BitcoindPre19 BitcoindVersion = iota + + // BitcoindPre22 represents a bitcoind version equal to or greater than + // 0.19.0 and smaller than 22.0.0. + BitcoindPre22 + + // BitcoindPre24 represents a bitcoind version equal to or greater than + // 22.0.0 and smaller than 24.0.0. + BitcoindPre24 + + // BitcoindPre25 represents a bitcoind version equal to or greater than + // 24.0.0 and smaller than 25.0.0. + BitcoindPre25 + + // BitcoindPre25 represents a bitcoind version equal to or greater than + // 25.0.0. + BitcoindPost25 +) + +// String returns a human-readable backend version. +func (b BitcoindVersion) String() string { + switch b { + case BitcoindPre19: + return "bitcoind 0.19 and below" + + case BitcoindPre22: + return "bitcoind v0.19.0-v22.0.0" + + case BitcoindPre24: + return "bitcoind v22.0.0-v24.0.0" + + case BitcoindPre25: + return "bitcoind v24.0.0-v25.0.0" + + case BitcoindPost25: + return "bitcoind v25.0.0 and above" + + default: + return "unknown" + } +} + +// SupportUnifiedSoftForks returns true if the backend supports the unified +// softforks format. +func (b BitcoindVersion) SupportUnifiedSoftForks() bool { + // Versions of bitcoind on or after v0.19.0 use the unified format. + return b > BitcoindPre19 +} + +// SupportTestMempoolAccept returns true if bitcoind version is 22.0.0 or +// above. +func (b BitcoindVersion) SupportTestMempoolAccept() bool { + return b > BitcoindPre22 +} + +// SupportGetTxSpendingPrevOut returns true if bitcoind version is 24.0.0 or +// above. +func (b BitcoindVersion) SupportGetTxSpendingPrevOut() bool { + return b > BitcoindPre24 +} + +// Compile-time checks to ensure that BitcoindVersion satisfy the +// BackendVersion interface. +var _ BackendVersion = BitcoindVersion(0) + +const ( + // bitcoind19Str is the string representation of bitcoind v0.19.0. + bitcoind19Str = "0.19.0" + + // bitcoind22Str is the string representation of bitcoind v22.0.0. + bitcoind22Str = "22.0.0" + + // bitcoind24Str is the string representation of bitcoind v24.0.0. + bitcoind24Str = "24.0.0" + + // bitcoind25Str is the string representation of bitcoind v25.0.0. + bitcoind25Str = "25.0.0" + + // bitcoindVersionPrefix specifies the prefix included in every bitcoind + // version exposed through GetNetworkInfo. + bitcoindVersionPrefix = "/Satoshi:" + + // bitcoindVersionSuffix specifies the suffix included in every bitcoind + // version exposed through GetNetworkInfo. + bitcoindVersionSuffix = "/" +) + +// parseBitcoindVersion parses the bitcoind version from its string +// representation. +func parseBitcoindVersion(version string) BitcoindVersion { + // Trim the version of its prefix and suffix to determine the + // appropriate version number. + version = strings.TrimPrefix( + strings.TrimSuffix(version, bitcoindVersionSuffix), + bitcoindVersionPrefix, + ) + switch { + case version < bitcoind19Str: + return BitcoindPre19 + + case version < bitcoind22Str: + return BitcoindPre22 + + case version < bitcoind24Str: + return BitcoindPre24 + + case version < bitcoind25Str: + return BitcoindPre25 + + default: + return BitcoindPost25 + } +} + +// BtcdVersion represents the version of the btcd the client is currently +// connected to. +type BtcdVersion int32 + +const ( + // BtcdPre2401 describes a btcd version before 0.24.1, which doesn't + // include the `testmempoolaccept` and `gettxspendingprevout` RPCs. + BtcdPre2401 BtcdVersion = iota + + // BtcdPost2401 describes a btcd version equal to or greater than + // 0.24.1. + BtcdPost2401 +) + +// String returns a human-readable backend version. +func (b BtcdVersion) String() string { + switch b { + case BtcdPre2401: + return "btcd 24.0.0 and below" + + case BtcdPost2401: + return "btcd 24.1.0 and above" + + default: + return "unknown" + } +} + +// SupportUnifiedSoftForks returns true if the backend supports the unified +// softforks format. +// +// NOTE: always true for btcd as we didn't track it before. +func (b BtcdVersion) SupportUnifiedSoftForks() bool { + return true +} + +// SupportTestMempoolAccept returns true if btcd version is 24.1.0 or above. +func (b BtcdVersion) SupportTestMempoolAccept() bool { + return b > BtcdPre2401 +} + +// SupportGetTxSpendingPrevOut returns true if btcd version is 24.1.0 or above. +func (b BtcdVersion) SupportGetTxSpendingPrevOut() bool { + return b > BtcdPre2401 +} + +// Compile-time checks to ensure that BtcdVersion satisfy the BackendVersion +// interface. +var _ BackendVersion = BtcdVersion(0) + +const ( + // btcd2401Val is the int representation of btcd v0.24.1. + btcd2401Val = 240100 +) + +// parseBtcdVersion parses the btcd version from its string representation. +func parseBtcdVersion(version int32) BtcdVersion { + switch { + case version < btcd2401Val: + return BtcdPre2401 + + default: + return BtcdPost2401 + } +} diff --git a/rpcclient/backend_version_test.go b/rpcclient/backend_version_test.go new file mode 100644 index 0000000000..3a4baec1db --- /dev/null +++ b/rpcclient/backend_version_test.go @@ -0,0 +1,148 @@ +package rpcclient + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestParseBitcoindVersion checks that the correct version from bitcoind's +// `getnetworkinfo` RPC call is parsed. +func TestParseBitcoindVersion(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + rpcVersion string + parsedVersion BitcoindVersion + }{ + { + name: "parse version 0.19 and below", + rpcVersion: "/Satoshi:0.18.0/", + parsedVersion: BitcoindPre19, + }, + { + name: "parse version 0.19", + rpcVersion: "/Satoshi:0.19.0/", + parsedVersion: BitcoindPre22, + }, + { + name: "parse version 0.19 - 22.0", + rpcVersion: "/Satoshi:0.20.1/", + parsedVersion: BitcoindPre22, + }, + { + name: "parse version 22.0", + rpcVersion: "/Satoshi:22.0.0/", + parsedVersion: BitcoindPre24, + }, + { + name: "parse version 22.0 - 24.0", + rpcVersion: "/Satoshi:23.1.0/", + parsedVersion: BitcoindPre24, + }, + { + name: "parse version 24.0", + rpcVersion: "/Satoshi:24.0.0/", + parsedVersion: BitcoindPre25, + }, + { + name: "parse version 25.0", + rpcVersion: "/Satoshi:25.0.0/", + parsedVersion: BitcoindPost25, + }, + { + name: "parse version 25.0 and above", + rpcVersion: "/Satoshi:26.0.0/", + parsedVersion: BitcoindPost25, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + version := parseBitcoindVersion(tc.rpcVersion) + require.Equal(t, tc.parsedVersion, version) + }) + } +} + +// TestParseBtcdVersion checks that the correct version from btcd's `getinfo` +// RPC call is parsed. +func TestParseBtcdVersion(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + rpcVersion int32 + parsedVersion BtcdVersion + }{ + { + name: "parse version 0.24 and below", + rpcVersion: 230000, + parsedVersion: BtcdPre2401, + }, + { + name: "parse version 0.24.1", + rpcVersion: 240100, + parsedVersion: BtcdPost2401, + }, + { + name: "parse version 0.24.1 and above", + rpcVersion: 250000, + parsedVersion: BtcdPost2401, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + version := parseBtcdVersion(tc.rpcVersion) + require.Equal(t, tc.parsedVersion, version) + }) + } +} + +// TestVersionSupports checks all the versions of bitcoind and btcd to ensure +// that the RPCs are supported correctly. +func TestVersionSupports(t *testing.T) { + t.Parallel() + + require := require.New(t) + + // For bitcoind, unified softforks format is supported in 19.0 and + // above. + require.False(BitcoindPre19.SupportUnifiedSoftForks()) + require.True(BitcoindPre22.SupportUnifiedSoftForks()) + require.True(BitcoindPre24.SupportUnifiedSoftForks()) + require.True(BitcoindPre25.SupportUnifiedSoftForks()) + require.True(BitcoindPost25.SupportUnifiedSoftForks()) + + // For bitcoind, `testmempoolaccept` is supported in 22.0 and above. + require.False(BitcoindPre19.SupportTestMempoolAccept()) + require.False(BitcoindPre22.SupportTestMempoolAccept()) + require.True(BitcoindPre24.SupportTestMempoolAccept()) + require.True(BitcoindPre25.SupportTestMempoolAccept()) + require.True(BitcoindPost25.SupportTestMempoolAccept()) + + // For bitcoind, `gettxspendingprevout` is supported in 24.0 and above. + require.False(BitcoindPre19.SupportGetTxSpendingPrevOut()) + require.False(BitcoindPre22.SupportGetTxSpendingPrevOut()) + require.False(BitcoindPre24.SupportGetTxSpendingPrevOut()) + require.True(BitcoindPre25.SupportGetTxSpendingPrevOut()) + require.True(BitcoindPost25.SupportGetTxSpendingPrevOut()) + + // For btcd, unified softforks format is supported in all versions. + require.True(BtcdPre2401.SupportUnifiedSoftForks()) + require.True(BtcdPost2401.SupportUnifiedSoftForks()) + + // For btcd, `testmempoolaccept` is supported in 24.1 and above. + require.False(BtcdPre2401.SupportTestMempoolAccept()) + require.True(BtcdPost2401.SupportTestMempoolAccept()) + + // For btcd, `gettxspendingprevout` is supported in 24.1 and above. + require.False(BtcdPre2401.SupportGetTxSpendingPrevOut()) + require.True(BtcdPost2401.SupportGetTxSpendingPrevOut()) +} diff --git a/rpcclient/chain.go b/rpcclient/chain.go index b21665991f..c8562b8e65 100644 --- a/rpcclient/chain.go +++ b/rpcclient/chain.go @@ -440,17 +440,16 @@ func unmarshalPartialGetBlockChainInfoResult(res []byte) (*btcjson.GetBlockChain func unmarshalGetBlockChainInfoResultSoftForks(chainInfo *btcjson.GetBlockChainInfoResult, version BackendVersion, res []byte) error { - switch version { // Versions of bitcoind on or after v0.19.0 use the unified format. - case BitcoindPost19: + if version.SupportUnifiedSoftForks() { var softForks btcjson.UnifiedSoftForks if err := json.Unmarshal(res, &softForks); err != nil { return err } chainInfo.UnifiedSoftForks = &softForks + } else { - // All other versions use the original format. - default: + // All other versions use the original format. var softForks btcjson.SoftForks if err := json.Unmarshal(res, &softForks); err != nil { return err @@ -891,7 +890,7 @@ func (c *Client) EstimateFee(numBlocks int64) (float64, error) { return c.EstimateFeeAsync(numBlocks).Receive() } -// FutureEstimateFeeResult is a future promise to deliver the result of a +// FutureEstimateSmartFeeResult is a future promise to deliver the result of a // EstimateSmartFeeAsync RPC invocation (or an applicable error). type FutureEstimateSmartFeeResult chan *Response @@ -1420,3 +1419,38 @@ func (c *Client) GetDescriptorInfoAsync(descriptor string) FutureGetDescriptorIn func (c *Client) GetDescriptorInfo(descriptor string) (*btcjson.GetDescriptorInfoResult, error) { return c.GetDescriptorInfoAsync(descriptor).Receive() } + +// FutureReconsiderBlockResult is a future promise to deliver the result of a +// ReconsiderBlockAsync RPC invocation (or an applicable error). +type FutureReconsiderBlockResult chan *Response + +// Receive waits for the Response promised by the future and returns the raw +// block requested from the server given its hash. +func (r FutureReconsiderBlockResult) Receive() error { + _, err := ReceiveFuture(r) + return err +} + +// ReconsiderBlockAsync returns an instance of a type that can be used to get the +// result of the RPC at some future time by invoking the Receive function on the +// returned instance. +// +// See ReconsiderBlock for the blocking version and more details. +func (c *Client) ReconsiderBlockAsync( + blockHash *chainhash.Hash) FutureReconsiderBlockResult { + + hash := "" + if blockHash != nil { + hash = blockHash.String() + } + + cmd := btcjson.NewReconsiderBlockCmd(hash) + return c.SendCmd(cmd) +} + +// ReconsiderBlock reconsiders an verifies a specific block and the branch that +// the block is included in. If the block is valid on reconsideration, the chain +// will reorg to that block if it has more PoW than the current tip. +func (c *Client) ReconsiderBlock(blockHash *chainhash.Hash) error { + return c.ReconsiderBlockAsync(blockHash).Receive() +} diff --git a/rpcclient/chain_test.go b/rpcclient/chain_test.go index de8d3a740e..ad1fb7aa2a 100644 --- a/rpcclient/chain_test.go +++ b/rpcclient/chain_test.go @@ -2,13 +2,14 @@ package rpcclient import ( "errors" - "github.com/gorilla/websocket" "net/http" "net/http/httptest" "strings" "sync" "testing" "time" + + "github.com/gorilla/websocket" ) var upgrader = websocket.Upgrader{} @@ -33,7 +34,7 @@ func TestUnmarshalGetBlockChainInfoResultSoftForks(t *testing.T) { }, { name: "bitcoind >= 0.19.0 with separate softforks", - version: BitcoindPost19, + version: BitcoindPre22, res: []byte(`{"softforks": [{"version": 2}]}`), compatible: false, }, @@ -45,7 +46,7 @@ func TestUnmarshalGetBlockChainInfoResultSoftForks(t *testing.T) { }, { name: "bitcoind >= 0.19.0 with unified softforks", - version: BitcoindPost19, + version: BitcoindPre22, res: []byte(`{"softforks": {"segwit": {"type": "bip9"}}}`), compatible: true, }, @@ -53,7 +54,7 @@ func TestUnmarshalGetBlockChainInfoResultSoftForks(t *testing.T) { for _, test := range tests { success := t.Run(test.name, func(t *testing.T) { - // We'll start by unmarshaling the JSON into a struct. + // We'll start by unmarshalling the JSON into a struct. // The SoftForks and UnifiedSoftForks field should not // be set yet, as they are unmarshaled within a // different function. @@ -87,7 +88,7 @@ func TestUnmarshalGetBlockChainInfoResultSoftForks(t *testing.T) { // If the version is compatible with the response, we // should expect to see the proper softforks field set. - if test.version == BitcoindPost19 && + if test.version == BitcoindPre22 && info.SoftForks != nil { t.Fatal("expected SoftForks to be empty") } @@ -225,7 +226,7 @@ func TestClientConnectedToWSServerRunner(t *testing.T) { response := <-ch if &expectedResponse != response { - t.Fatalf("received unexepcted response") + t.Fatalf("received unexpected response") } // ensure the goroutine created in this test exists, @@ -235,7 +236,7 @@ func TestClientConnectedToWSServerRunner(t *testing.T) { }, } - // since these tests rely on concurrency, ensure there is a resonable timeout + // since these tests rely on concurrency, ensure there is a reasonable timeout // that they should run within for _, testCase := range testTable { done := make(chan bool) diff --git a/rpcclient/errors.go b/rpcclient/errors.go new file mode 100644 index 0000000000..68c0780dff --- /dev/null +++ b/rpcclient/errors.go @@ -0,0 +1,515 @@ +package rpcclient + +import ( + "errors" + "fmt" + "strings" +) + +var ( + // ErrBackendVersion is returned when running against a bitcoind or + // btcd that is older than the minimum version supported by the + // rpcclient. + ErrBackendVersion = errors.New("backend version too low") + + // ErrInvalidParam is returned when the caller provides an invalid + // parameter to an RPC method. + ErrInvalidParam = errors.New("invalid param") + + // ErrUndefined is used when an error returned is not recognized. We + // should gradually increase our error types to avoid returning this + // error. + ErrUndefined = errors.New("undefined") +) + +// BitcoindRPCErr represents an error returned by bitcoind's RPC server. +type BitcoindRPCErr uint32 + +// This section defines all possible errors or reject reasons returned from +// bitcoind's `sendrawtransaction` or `testmempoolaccept` RPC. +const ( + // ErrMissingInputsOrSpent is returned when calling + // `sendrawtransaction` with missing inputs. + ErrMissingInputsOrSpent BitcoindRPCErr = iota + + // ErrMaxBurnExceeded is returned when calling `sendrawtransaction` + // with exceeding, falling short of, and equaling maxburnamount. + ErrMaxBurnExceeded + + // ErrMaxFeeExceeded can happen when passing a signed tx to + // `testmempoolaccept`, but the tx pays more fees than specified. + ErrMaxFeeExceeded + + // ErrTxAlreadyKnown is used in the `reject-reason` field of + // `testmempoolaccept` when a transaction is already in the blockchain. + ErrTxAlreadyKnown + + // ErrTxAlreadyConfirmed is returned as an error from + // `sendrawtransaction` when a transaction is already in the + // blockchain. + ErrTxAlreadyConfirmed + + // ErrMempoolConflict happens when RBF is not enabled yet the + // transaction conflicts with an unconfirmed tx. . + // + // NOTE: RBF rule 1. + ErrMempoolConflict + + // ErrReplacementAddsUnconfirmed is returned when a transaction adds + // new unconfirmed inputs. + // + // NOTE: RBF rule 2. + ErrReplacementAddsUnconfirmed + + // ErrInsufficientFee is returned when fee rate used or fees paid + // doesn't meet the requirements. + // + // NOTE: RBF rule 3 or 4. + ErrInsufficientFee + + // ErrTooManyReplacements is returned when a transaction causes too + // many transactions being replaced. This is set by + // `MAX_REPLACEMENT_CANDIDATES` in `bitcoind` and defaults to 100. + // + // NOTE: RBF rule 5. + ErrTooManyReplacements + + // ErrMempoolMinFeeNotMet is returned when the transaction doesn't meet + // the minimum relay fee. + ErrMempoolMinFeeNotMet + + // ErrConflictingTx is returned when a transaction that spends + // conflicting tx outputs that are rejected. + ErrConflictingTx + + // ErrEmptyOutput is returned when a transaction has no outputs. + ErrEmptyOutput + + // ErrEmptyInput is returned when a transaction has no inputs. + ErrEmptyInput + + // ErrTxTooSmall is returned when spending a tiny transaction(in + // non-witness bytes) that is disallowed. + // + // NOTE: ErrTxTooLarge must be put after ErrTxTooSmall because it's a + // subset of ErrTxTooSmall. Otherwise, if bitcoind returns + // `tx-size-small`, it will be matched to ErrTxTooLarge. + ErrTxTooSmall + + // ErrDuplicateInput is returned when a transaction has duplicate + // inputs. + ErrDuplicateInput + + // ErrEmptyPrevOut is returned when a non-coinbase transaction has + // coinbase-like outpoint. + ErrEmptyPrevOut + + // ErrBelowOutValue is returned when a transaction's output value is + // greater than its input value. + ErrBelowOutValue + + // ErrNegativeOutput is returned when a transaction has negative output + // value. + ErrNegativeOutput + + // ErrLargeOutput is returned when a transaction has too large output + // value. + ErrLargeOutput + + // ErrLargeTotalOutput is returned when a transaction has too large sum + // of output values. + ErrLargeTotalOutput + + // ErrScriptVerifyFlag is returned when there is invalid OP_IF + // construction. + ErrScriptVerifyFlag + + // ErrTooManySigOps is returned when a transaction has too many sigops. + ErrTooManySigOps + + // ErrInvalidOpcode is returned when a transaction has invalid OP + // codes. + ErrInvalidOpcode + + // ErrTxAlreadyInMempool is returned when a transaction is in the + // mempool. + ErrTxAlreadyInMempool + + // ErrMissingInputs is returned when a transaction has missing inputs, + // that never existed or only existed once in the past. + ErrMissingInputs + + // ErrOversizeTx is returned when a transaction is too large. + ErrOversizeTx + + // ErrCoinbaseTx is returned when the transaction is coinbase tx. + ErrCoinbaseTx + + // ErrNonStandardVersion is returned when the transactions are not + // standard - a version currently non-standard. + ErrNonStandardVersion + + // ErrNonStandardScript is returned when the transactions are not + // standard - non-standard script. + ErrNonStandardScript + + // ErrBareMultiSig is returned when the transactions are not standard - + // bare multisig script (2-of-3). + ErrBareMultiSig + + // ErrScriptSigNotPushOnly is returned when the transactions are not + // standard - not-pushonly scriptSig. + ErrScriptSigNotPushOnly + + // ErrScriptSigSize is returned when the transactions are not standard + // - too large scriptSig (>1650 bytes). + ErrScriptSigSize + + // ErrTxTooLarge is returned when the transactions are not standard - + // too large tx size. + ErrTxTooLarge + + // ErrDust is returned when the transactions are not standard - output + // too small. + ErrDust + + // ErrMultiOpReturn is returned when the transactions are not standard + // - muiltiple OP_RETURNs. + ErrMultiOpReturn + + // ErrNonFinal is returned when spending a timelocked transaction that + // hasn't expired yet. + ErrNonFinal + + // ErrNonBIP68Final is returned when a transaction that is locked by + // BIP68 sequence logic and not expired yet. + ErrNonBIP68Final + + // ErrSameNonWitnessData is returned when another tx with the same + // non-witness data is already in the mempool. For instance, these two + // txns share the same `txid` but different `wtxid`. + ErrSameNonWitnessData + + // ErrNonMandatoryScriptVerifyFlag is returned when passing a raw tx to + // `testmempoolaccept`, which gives the error followed by (Witness + // program hash mismatch). + ErrNonMandatoryScriptVerifyFlag + + // errSentinel is used to indicate the end of the error list. This + // should always be the last error code. + errSentinel +) + +// Error implements the error interface. It returns the error message defined +// in `bitcoind`. + +// Some of the dashes used in the original error string is removed, e.g. +// "missing-inputs" is now "missing inputs". This is ok since we will normalize +// the errors before matching. +// +// references: +// - https://github.com/bitcoin/bitcoin/blob/master/test/functional/rpc_rawtransaction.py#L342 +// - https://github.com/bitcoin/bitcoin/blob/master/test/functional/data/invalid_txs.py +// - https://github.com/bitcoin/bitcoin/blob/master/test/functional/mempool_accept.py +// - https://github.com/bitcoin/bitcoin/blob/master/test/functional/mempool_accept_wtxid.py +// - https://github.com/bitcoin/bitcoin/blob/master/test/functional/mempool_dust.py +// - https://github.com/bitcoin/bitcoin/blob/master/test/functional/mempool_limit.py +// - https://github.com/bitcoin/bitcoin/blob/master/src/validation.cpp +func (r BitcoindRPCErr) Error() string { + switch r { + case ErrMissingInputsOrSpent: + return "bad-txns-inputs-missingorspent" + + case ErrMaxBurnExceeded: + return "Unspendable output exceeds maximum configured by user (maxburnamount)" + + case ErrMaxFeeExceeded: + return "max-fee-exceeded" + + case ErrTxAlreadyKnown: + return "txn-already-known" + + case ErrTxAlreadyConfirmed: + return "Transaction already in block chain" + + case ErrMempoolConflict: + return "txn mempool conflict" + + case ErrReplacementAddsUnconfirmed: + return "replacement adds unconfirmed" + + case ErrInsufficientFee: + return "insufficient fee" + + case ErrTooManyReplacements: + return "too many potential replacements" + + case ErrMempoolMinFeeNotMet: + return "mempool min fee not met" + + case ErrConflictingTx: + return "bad txns spends conflicting tx" + + case ErrEmptyOutput: + return "bad txns vout empty" + + case ErrEmptyInput: + return "bad txns vin empty" + + case ErrTxTooSmall: + return "tx size small" + + case ErrDuplicateInput: + return "bad txns inputs duplicate" + + case ErrEmptyPrevOut: + return "bad txns prevout null" + + case ErrBelowOutValue: + return "bad txns in belowout" + + case ErrNegativeOutput: + return "bad txns vout negative" + + case ErrLargeOutput: + return "bad txns vout toolarge" + + case ErrLargeTotalOutput: + return "bad txns txouttotal toolarge" + + case ErrScriptVerifyFlag: + return "mandatory script verify flag failed" + + case ErrTooManySigOps: + return "bad txns too many sigops" + + case ErrInvalidOpcode: + return "disabled opcode" + + case ErrTxAlreadyInMempool: + return "txn already in mempool" + + case ErrMissingInputs: + return "missing inputs" + + case ErrOversizeTx: + return "bad txns oversize" + + case ErrCoinbaseTx: + return "coinbase" + + case ErrNonStandardVersion: + return "version" + + case ErrNonStandardScript: + return "scriptpubkey" + + case ErrBareMultiSig: + return "bare multisig" + + case ErrScriptSigNotPushOnly: + return "scriptsig not pushonly" + + case ErrScriptSigSize: + return "scriptsig size" + + case ErrTxTooLarge: + return "tx size" + + case ErrDust: + return "dust" + + case ErrMultiOpReturn: + return "multi op return" + + case ErrNonFinal: + return "non final" + + case ErrNonBIP68Final: + return "non BIP68 final" + + case ErrSameNonWitnessData: + return "txn-same-nonwitness-data-in-mempool" + + case ErrNonMandatoryScriptVerifyFlag: + return "non-mandatory-script-verify-flag" + } + + return "unknown error" +} + +// BtcdErrMap takes the errors returned from btcd's `testmempoolaccept` and +// `sendrawtransaction` RPCs and map them to the errors defined above, which +// are results from calling either `testmempoolaccept` or `sendrawtransaction` +// in `bitcoind`. +// +// Errors not mapped in `btcd`: +// - deployment error from `validateSegWitDeployment`. +// - the error when total inputs is higher than max allowed value from +// `CheckTransactionInputs`. +// - the error when total outputs is higher than total inputs from +// `CheckTransactionInputs`. +// - errors from `CalcSequenceLock`. +// +// NOTE: This is not an exhaustive list of errors, but it covers the +// usage case of LND. +// +//nolint:lll +var BtcdErrMap = map[string]error{ + // BIP125 related errors. + // + // When fee rate used or fees paid doesn't meet the requirements. + "replacement transaction has an insufficient fee rate": ErrInsufficientFee, + "replacement transaction has an insufficient absolute fee": ErrInsufficientFee, + + // When a transaction causes too many transactions being replaced. This + // is set by `MAX_REPLACEMENT_CANDIDATES` in `bitcoind` and defaults to + // 100. + "replacement transaction evicts more transactions than permitted": ErrTooManyReplacements, + + // When a transaction adds new unconfirmed inputs. + "replacement transaction spends new unconfirmed input": ErrReplacementAddsUnconfirmed, + + // A transaction that spends conflicting tx outputs that are rejected. + "replacement transaction spends parent transaction": ErrConflictingTx, + + // A transaction that conflicts with an unconfirmed tx. Happens when + // RBF is not enabled. + "output already spent in mempool": ErrMempoolConflict, + + // A transaction with no outputs. + "transaction has no outputs": ErrEmptyOutput, + + // A transaction with no inputs. + "transaction has no inputs": ErrEmptyInput, + + // A transaction with duplicate inputs. + "transaction contains duplicate inputs": ErrDuplicateInput, + + // A non-coinbase transaction with coinbase-like outpoint. + "transaction input refers to previous output that is null": ErrEmptyPrevOut, + + // A transaction pays too little fee. + "fees which is under the required amount": ErrMempoolMinFeeNotMet, + "has insufficient priority": ErrInsufficientFee, + "has been rejected by the rate limiter due to low fees": ErrInsufficientFee, + + // A transaction with negative output value. + "transaction output has negative value": ErrNegativeOutput, + + // A transaction with too large output value. + "transaction output value is higher than max allowed value": ErrLargeOutput, + + // A transaction with too large sum of output values. + "total value of all transaction outputs exceeds max allowed value": ErrLargeTotalOutput, + + // A transaction with too many sigops. + "sigop cost is too hight": ErrTooManySigOps, + + // A transaction already in the blockchain. + "database contains entry for spent tx output": ErrTxAlreadyKnown, + "transaction already exists in blockchain": ErrTxAlreadyConfirmed, + + // A transaction in the mempool. + "already have transaction in mempool": ErrTxAlreadyInMempool, + + // A transaction with missing inputs, that never existed or only + // existed once in the past. + "either does not exist or has already been spent": ErrMissingInputs, + "orphan transaction": ErrMissingInputs, + + // A really large transaction. + "serialized transaction is too big": ErrOversizeTx, + + // A coinbase transaction. + "transaction is an invalid coinbase": ErrCoinbaseTx, + + // Some nonstandard transactions - a version currently non-standard. + "transaction version": ErrNonStandardVersion, + + // Some nonstandard transactions - non-standard script. + "non-standard script form": ErrNonStandardScript, + "has a non-standard input": ErrNonStandardScript, + + // Some nonstandard transactions - bare multisig script + // (2-of-3). + "milti-signature script": ErrBareMultiSig, + + // Some nonstandard transactions - not-pushonly scriptSig. + "signature script is not push only": ErrScriptSigNotPushOnly, + + // Some nonstandard transactions - too large scriptSig (>1650 + // bytes). + "signature script size is larger than max allowed": ErrScriptSigSize, + + // Some nonstandard transactions - too large tx size. + "weight of transaction is larger than max allowed": ErrTxTooLarge, + + // Some nonstandard transactions - output too small. + "payment is dust": ErrDust, + + // Some nonstandard transactions - muiltiple OP_RETURNs. + "more than one transaction output in a nulldata script": ErrMultiOpReturn, + + // A timelocked transaction. + "transaction is not finalized": ErrNonFinal, + "tried to spend coinbase transaction output": ErrNonFinal, + + // A transaction that is locked by BIP68 sequence logic. + "transaction's sequence locks on inputs not met": ErrNonBIP68Final, + + // TODO(yy): find/return the following errors in `btcd`. + // + // A tiny transaction(in non-witness bytes) that is disallowed. + // "unmatched btcd error 1": ErrTxTooSmall, + // "unmatched btcd error 2": ErrScriptVerifyFlag, + // // A transaction with invalid OP codes. + // "unmatched btcd error 3": ErrInvalidOpcode, + // // Minimally-small transaction(in non-witness bytes) that is + // // allowed. + // "unmatched btcd error 4": ErrSameNonWitnessData, +} + +// MapRPCErr takes an error returned from calling RPC methods from various +// chain backend and map it to an defined error here. It uses the `BtcdErrMap` +// defined above, whose keys are btcd error strings and values are errors made +// from bitcoind error strings. +// +// NOTE: we assume neutrino shares the same error strings as btcd. +func MapRPCErr(rpcErr error) error { + // Iterate the map and find the matching error. + for btcdErr, err := range BtcdErrMap { + // Match it against btcd's error first. + if matchErrStr(rpcErr, btcdErr) { + return err + } + } + + // If not found, try to match it against bitcoind's error. + for i := uint32(0); i < uint32(errSentinel); i++ { + err := BitcoindRPCErr(i) + if matchErrStr(rpcErr, err.Error()) { + return err + } + } + + // If not matched, return the original error wrapped. + return fmt.Errorf("%w: %v", ErrUndefined, rpcErr) +} + +// matchErrStr takes an error returned from RPC client and matches it against +// the specified string. If the expected string pattern is found in the error +// passed, return true. Both the error strings are normalized before matching. +func matchErrStr(err error, s string) bool { + // Replace all dashes found in the error string with spaces. + strippedErrStr := strings.ReplaceAll(err.Error(), "-", " ") + + // Replace all dashes found in the error string with spaces. + strippedMatchStr := strings.ReplaceAll(s, "-", " ") + + // Match against the lowercase. + return strings.Contains( + strings.ToLower(strippedErrStr), + strings.ToLower(strippedMatchStr), + ) +} diff --git a/rpcclient/errors_test.go b/rpcclient/errors_test.go new file mode 100644 index 0000000000..e074622b11 --- /dev/null +++ b/rpcclient/errors_test.go @@ -0,0 +1,122 @@ +package rpcclient + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +// TestMatchErrStr checks that `matchErrStr` can correctly replace the dashes +// with spaces and turn title cases into lowercases for a given error and match +// it against the specified string pattern. +func TestMatchErrStr(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + bitcoindErr error + matchStr string + matched bool + }{ + { + name: "error without dashes", + bitcoindErr: errors.New("missing input"), + matchStr: "missing input", + matched: true, + }, + { + name: "match str without dashes", + bitcoindErr: errors.New("missing-input"), + matchStr: "missing input", + matched: true, + }, + { + name: "error with dashes", + bitcoindErr: errors.New("missing-input"), + matchStr: "missing input", + matched: true, + }, + { + name: "match str with dashes", + bitcoindErr: errors.New("missing-input"), + matchStr: "missing-input", + matched: true, + }, + { + name: "error with title case and dash", + bitcoindErr: errors.New("Missing-Input"), + matchStr: "missing input", + matched: true, + }, + { + name: "match str with title case and dash", + bitcoindErr: errors.New("missing-input"), + matchStr: "Missing-Input", + matched: true, + }, + { + name: "unmatched error", + bitcoindErr: errors.New("missing input"), + matchStr: "missingorspent", + matched: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + matched := matchErrStr(tc.bitcoindErr, tc.matchStr) + require.Equal(t, tc.matched, matched) + }) + } +} + +// TestMapRPCErr checks that `MapRPCErr` can correctly map a given error to +// the corresponding error in the `BtcdErrMap` or `BitcoindErrors` map. +func TestMapRPCErr(t *testing.T) { + t.Parallel() + + require := require.New(t) + + // Get all known bitcoind errors. + bitcoindErrors := make([]error, 0, errSentinel) + for i := uint32(0); i < uint32(errSentinel); i++ { + err := BitcoindRPCErr(i) + bitcoindErrors = append(bitcoindErrors, err) + } + + // An unknown error should be mapped to ErrUndefined. + errUnknown := errors.New("unknown error") + err := MapRPCErr(errUnknown) + require.ErrorIs(err, ErrUndefined) + + // A known error should be mapped to the corresponding error in the + // `BtcdErrMap` or `bitcoindErrors` map. + for btcdErrStr, mappedErr := range BtcdErrMap { + err := MapRPCErr(errors.New(btcdErrStr)) + require.ErrorIs(err, mappedErr) + + err = MapRPCErr(mappedErr) + require.ErrorIs(err, mappedErr) + } + + for _, bitcoindErr := range bitcoindErrors { + err = MapRPCErr(bitcoindErr) + require.ErrorIs(err, bitcoindErr) + } +} + +// TestBitcoindErrorSentinel checks that all defined BitcoindRPCErr errors are +// added to the method `Error`. +func TestBitcoindErrorSentinel(t *testing.T) { + t.Parallel() + + rt := require.New(t) + + for i := uint32(0); i < uint32(errSentinel); i++ { + err := BitcoindRPCErr(i) + rt.NotEqualf(err.Error(), "unknown error", "error code %d is "+ + "not defined, make sure to update it inside the Error "+ + "method", i) + } +} diff --git a/rpcclient/examples/btcdwebsockets/main.go b/rpcclient/examples/btcdwebsockets/main.go index e3f4c13e40..878526b076 100644 --- a/rpcclient/examples/btcdwebsockets/main.go +++ b/rpcclient/examples/btcdwebsockets/main.go @@ -5,7 +5,7 @@ package main import ( - "io/ioutil" + "os" "log" "path/filepath" "time" @@ -33,7 +33,7 @@ func main() { // Connect to local btcd RPC server using websockets. btcdHomeDir := btcutil.AppDataDir("btcd", false) - certs, err := ioutil.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) + certs, err := os.ReadFile(filepath.Join(btcdHomeDir, "rpc.cert")) if err != nil { log.Fatal(err) } diff --git a/rpcclient/examples/btcwalletwebsockets/main.go b/rpcclient/examples/btcwalletwebsockets/main.go index 3cbd9a3667..a63ef3db91 100644 --- a/rpcclient/examples/btcwalletwebsockets/main.go +++ b/rpcclient/examples/btcwalletwebsockets/main.go @@ -5,7 +5,7 @@ package main import ( - "io/ioutil" + "os" "log" "path/filepath" "time" @@ -29,7 +29,7 @@ func main() { // Connect to local btcwallet RPC server using websockets. certHomeDir := btcutil.AppDataDir("btcwallet", false) - certs, err := ioutil.ReadFile(filepath.Join(certHomeDir, "rpc.cert")) + certs, err := os.ReadFile(filepath.Join(certHomeDir, "rpc.cert")) if err != nil { log.Fatal(err) } diff --git a/rpcclient/infrastructure.go b/rpcclient/infrastructure.go index a32511175b..c4c85b3039 100644 --- a/rpcclient/infrastructure.go +++ b/rpcclient/infrastructure.go @@ -20,7 +20,6 @@ import ( "net/http" "net/url" "os" - "strings" "sync" "sync/atomic" "time" @@ -102,22 +101,6 @@ type jsonRequest struct { responseChan chan *Response } -// BackendVersion represents the version of the backend the client is currently -// connected to. -type BackendVersion uint8 - -const ( - // BitcoindPre19 represents a bitcoind version before 0.19.0. - BitcoindPre19 BackendVersion = iota - - // BitcoindPost19 represents a bitcoind version equal to or greater than - // 0.19.0. - BitcoindPost19 - - // Btcd represents a catch-all btcd version. - Btcd -) - // Client represents a Bitcoin RPC client which allows easy access to the // various RPC methods available on a Bitcoin RPC server. Each of the wrapper // functions handle the details of converting the passed and return types to and @@ -133,7 +116,7 @@ const ( type Client struct { id uint64 // atomic, so must stay 64-bit aligned - // config holds the connection configuration assoiated with this client. + // config holds the connection configuration associated with this client. config *ConnConfig // chainParams holds the params for the chain that this client is using, @@ -151,7 +134,7 @@ type Client struct { // backendVersion is the version of the backend the client is currently // connected to. This should be retrieved through GetVersion. backendVersionMu sync.Mutex - backendVersion *BackendVersion + backendVersion BackendVersion // mtx is a mutex to protect access to connection related fields. mtx sync.Mutex @@ -239,14 +222,21 @@ func (c *Client) removeRequest(id uint64) *jsonRequest { c.requestLock.Lock() defer c.requestLock.Unlock() - element := c.requestMap[id] - if element != nil { - delete(c.requestMap, id) - request := c.requestList.Remove(element).(*jsonRequest) - return request + element, ok := c.requestMap[id] + if !ok { + return nil } - return nil + delete(c.requestMap, id) + + var request *jsonRequest + if c.batch { + request = c.batchList.Remove(element).(*jsonRequest) + } else { + request = c.requestList.Remove(element).(*jsonRequest) + } + + return request } // removeAllRequests removes all the jsonRequests which contain the response @@ -361,7 +351,7 @@ type Response struct { } // result checks whether the unmarshaled response contains a non-nil error, -// returning an unmarshaled btcjson.RPCError (or an unmarshaling error) if so. +// returning an unmarshaled btcjson.RPCError (or an unmarshalling error) if so. // If the response is not an error, the raw bytes of the request are // returned for further unmashaling into specific result types. func (r rawResponse) result() (result []byte, err error) { @@ -443,7 +433,7 @@ func (c *Client) handleMessage(msg []byte) { // to have come from reading from the websocket connection in wsInHandler, // should be logged. func (c *Client) shouldLogReadError(err error) bool { - // No logging when the connetion is being forcibly disconnected. + // No logging when the connection is being forcibly disconnected. select { case <-c.shutdown: return false @@ -946,6 +936,11 @@ func newFutureError(err error) chan *Response { return responseChan } +// Expose newFutureError for developer usage when creating custom commands. +func NewFutureError(err error) chan *Response { + return newFutureError(err) +} + // ReceiveFuture receives from the passed futureResult channel to extract a // reply or any errors. The examined errors include an error in the // futureResult and the error in the reply from the server. This will block @@ -1575,36 +1570,6 @@ func (c *Client) Connect(tries int) error { return err } -const ( - // bitcoind19Str is the string representation of bitcoind v0.19.0. - bitcoind19Str = "0.19.0" - - // bitcoindVersionPrefix specifies the prefix included in every bitcoind - // version exposed through GetNetworkInfo. - bitcoindVersionPrefix = "/Satoshi:" - - // bitcoindVersionSuffix specifies the suffix included in every bitcoind - // version exposed through GetNetworkInfo. - bitcoindVersionSuffix = "/" -) - -// parseBitcoindVersion parses the bitcoind version from its string -// representation. -func parseBitcoindVersion(version string) BackendVersion { - // Trim the version of its prefix and suffix to determine the - // appropriate version number. - version = strings.TrimPrefix( - strings.TrimSuffix(version, bitcoindVersionSuffix), - bitcoindVersionPrefix, - ) - switch { - case version < bitcoind19Str: - return BitcoindPre19 - default: - return BitcoindPost19 - } -} - // BackendVersion retrieves the version of the backend the client is currently // connected to. func (c *Client) BackendVersion() (BackendVersion, error) { @@ -1612,7 +1577,7 @@ func (c *Client) BackendVersion() (BackendVersion, error) { defer c.backendVersionMu.Unlock() if c.backendVersion != nil { - return *c.backendVersion, nil + return c.backendVersion, nil } // We'll start by calling GetInfo. This method doesn't exist for @@ -1652,7 +1617,7 @@ func (c *Client) BackendVersion() (BackendVersion, error) { version := parseBitcoindVersion(networkInfo.SubVersion) c.backendVersion = &version - return *c.backendVersion, nil + return c.backendVersion, nil } func (c *Client) sendAsync() FutureGetBulkResult { @@ -1688,28 +1653,38 @@ func (c *Client) Send() error { return nil } - // clear batchlist in case of an error - defer func() { + batchResp, err := c.sendAsync().Receive() + if err != nil { + // Clear batchlist in case of an error. + // + // TODO(yy): need to double check to make sure there's no + // concurrent access to this batch list, otherwise we may miss + // some batched requests. c.batchList = list.New() - }() - result, err := c.sendAsync().Receive() - - if err != nil { return err } - for iter := c.batchList.Front(); iter != nil; iter = iter.Next() { - var requestError error - request := iter.Value.(*jsonRequest) - individualResult := result[request.id] - fullResult, err := json.Marshal(individualResult.Result) + // Iterate each response and send it to the corresponding request. + for id, resp := range batchResp { + // Perform a GC on batchList and requestMap before moving + // forward. + request := c.removeRequest(id) + + // If there's an error, we log it and continue to the next + // request. + fullResult, err := json.Marshal(resp.Result) if err != nil { - return err + log.Errorf("Unable to marshal result: %v for req=%v", + err, request.id) + + continue } - if individualResult.Error != nil { - requestError = individualResult.Error + // If there's a response error, we send it back the request. + var requestError error + if resp.Error != nil { + requestError = resp.Error } result := Response{ @@ -1718,5 +1693,6 @@ func (c *Client) Send() error { } request.responseChan <- &result } + return nil } diff --git a/rpcclient/notify.go b/rpcclient/notify.go index 1c2814c313..1f5cd48075 100644 --- a/rpcclient/notify.go +++ b/rpcclient/notify.go @@ -472,13 +472,13 @@ func (c *Client) handleNotification(ntfn *rawNotification) { } } -// wrongNumParams is an error type describing an unparseable JSON-RPC -// notificiation due to an incorrect number of parameters for the +// wrongNumParams is an error type describing an unparsable JSON-RPC +// notification due to an incorrect number of parameters for the // expected notification type. The value is the number of parameters // of the invalid notification. type wrongNumParams int -// Error satisifies the builtin error interface. +// Error satisfies the builtin error interface. func (e wrongNumParams) Error() string { return fmt.Sprintf("wrong number of parameters (%d)", e) } @@ -599,7 +599,7 @@ func parseFilteredBlockDisconnectedParams(params []json.RawMessage) (int32, return 0, nil, err } - // Unmarshal second parmeter as a slice of bytes. + // Unmarshal second parameter as a slice of bytes. blockHeaderBytes, err := parseHexParam(params[1]) if err != nil { return 0, nil, err diff --git a/rpcclient/rawtransactions.go b/rpcclient/rawtransactions.go index 1df6195220..c72cabe5ec 100644 --- a/rpcclient/rawtransactions.go +++ b/rpcclient/rawtransactions.go @@ -8,6 +8,7 @@ import ( "bytes" "encoding/hex" "encoding/json" + "fmt" "github.com/btcsuite/btcd/btcjson" "github.com/btcsuite/btcd/btcutil" @@ -16,9 +17,9 @@ import ( ) const ( - // defaultMaxFeeRate is the default maximum fee rate in sat/KB enforced + // defaultMaxFeeRate is the default maximum fee rate in BTC/kvB enforced // by bitcoind v0.19.0 or after for transaction broadcast. - defaultMaxFeeRate = btcutil.SatoshiPerBitcoin / 10 + defaultMaxFeeRate btcjson.BTCPerkvB = 0.1 ) // SigHashType enumerates the available signature hashing types that the @@ -358,19 +359,19 @@ func (c *Client) SendRawTransactionAsync(tx *wire.MsgTx, allowHighFees bool) Fut } var cmd *btcjson.SendRawTransactionCmd - switch version { // Starting from bitcoind v0.19.0, the MaxFeeRate field should be used. - case BitcoindPost19: + // + // When unified softforks format is supported, it's 0.19 and above. + if version.SupportUnifiedSoftForks() { // Using a 0 MaxFeeRate is interpreted as a maximum fee rate not // being enforced by bitcoind. - var maxFeeRate int32 + var maxFeeRate btcjson.BTCPerkvB if !allowHighFees { maxFeeRate = defaultMaxFeeRate } cmd = btcjson.NewBitcoindSendRawTransactionCmd(txHex, maxFeeRate) - - // Otherwise, use the AllowHighFees field. - default: + } else { + // Otherwise, use the AllowHighFees field. cmd = btcjson.NewSendRawTransactionCmd(txHex, &allowHighFees) } @@ -718,7 +719,7 @@ func (c *Client) SignRawTransactionWithWallet3Async(tx *wire.MsgTx, // // This function should only used if a non-default signature hash type is // desired. Otherwise, see SignRawTransactionWithWallet if the RPC server already -// knows the input transactions, or SignRawTransactionWihWallet2 if it does not. +// knows the input transactions, or SignRawTransactionWithWallet2 if it does not. func (c *Client) SignRawTransactionWithWallet3(tx *wire.MsgTx, inputs []btcjson.RawTxWitnessInput, hashType SigHashType) (*wire.MsgTx, bool, error) { @@ -882,3 +883,202 @@ func (c *Client) DecodeScriptAsync(serializedScript []byte) FutureDecodeScriptRe func (c *Client) DecodeScript(serializedScript []byte) (*btcjson.DecodeScriptResult, error) { return c.DecodeScriptAsync(serializedScript).Receive() } + +// FutureTestMempoolAcceptResult is a future promise to deliver the result +// of a TestMempoolAccept RPC invocation (or an applicable error). +type FutureTestMempoolAcceptResult chan *Response + +// Receive waits for the Response promised by the future and returns the +// response from TestMempoolAccept. +func (r FutureTestMempoolAcceptResult) Receive() ( + []*btcjson.TestMempoolAcceptResult, error) { + + response, err := ReceiveFuture(r) + if err != nil { + return nil, err + } + + // Unmarshal as an array of TestMempoolAcceptResult items. + var results []*btcjson.TestMempoolAcceptResult + + err = json.Unmarshal(response, &results) + if err != nil { + return nil, err + } + + return results, nil +} + +// TestMempoolAcceptAsync returns an instance of a type that can be used to get +// the result of the RPC at some future time by invoking the Receive function +// on the returned instance. +// +// See TestMempoolAccept for the blocking version and more details. +func (c *Client) TestMempoolAcceptAsync(txns []*wire.MsgTx, + maxFeeRate btcjson.BTCPerkvB) FutureTestMempoolAcceptResult { + + // Due to differences in the testmempoolaccept API for different + // backends, we'll need to inspect our version and construct the + // appropriate request. + version, err := c.BackendVersion() + if err != nil { + return newFutureError(err) + } + + log.Debugf("TestMempoolAcceptAsync: backend version %s", version) + + // Exit early if the version is below 22.0.0. + // + // Based on the history of `testmempoolaccept` in bitcoind, + // - introduced in 0.17.0 + // - unchanged in 0.18.0 + // - allowhighfees(bool) param is changed to maxfeerate(numeric) in + // 0.19.0 + // - unchanged in 0.20.0 + // - added fees and vsize fields in its response in 0.21.0 + // - allow more than one txes in param rawtx and added package-error + // and wtxid fields in its response in 0.22.0 + // - unchanged in 0.23.0 + // - unchanged in 0.24.0 + // - added effective-feerate and effective-includes fields in its + // response in 0.25.0 + // + // We decide to not support this call for versions below 22.0.0. as the + // request/response formats are very different. + if !version.SupportTestMempoolAccept() { + err := fmt.Errorf("%w: %v", ErrBackendVersion, version) + return newFutureError(err) + } + + // The maximum number of transactions allowed is 25. + if len(txns) > 25 { + err := fmt.Errorf("%w: too many transactions provided", + ErrInvalidParam) + return newFutureError(err) + } + + // Exit early if an empty array of transactions is provided. + if len(txns) == 0 { + err := fmt.Errorf("%w: no transactions provided", + ErrInvalidParam) + return newFutureError(err) + } + + // Iterate all the transactions and turn them into hex strings. + rawTxns := make([]string, 0, len(txns)) + for _, tx := range txns { + // Serialize the transaction and convert to hex string. + buf := bytes.NewBuffer(make([]byte, 0, tx.SerializeSize())) + + // TODO(yy): add similar checks found in `BtcDecode` to + // `BtcEncode` - atm it just serializes bytes without any + // bitcoin-specific checks. + if err := tx.Serialize(buf); err != nil { + err = fmt.Errorf("%w: %v", ErrInvalidParam, err) + return newFutureError(err) + } + + rawTx := hex.EncodeToString(buf.Bytes()) + rawTxns = append(rawTxns, rawTx) + + // Sanity check the provided tx is valid, which can be removed + // once we have similar checks added in `BtcEncode`. + // + // NOTE: must be performed after buf.Bytes is copied above. + // + // TODO(yy): remove it once the above TODO is addressed. + if err := tx.Deserialize(buf); err != nil { + err = fmt.Errorf("%w: %v", ErrInvalidParam, err) + return newFutureError(err) + } + } + + cmd := btcjson.NewTestMempoolAcceptCmd(rawTxns, maxFeeRate) + + return c.SendCmd(cmd) +} + +// TestMempoolAccept returns result of mempool acceptance tests indicating if +// raw transaction(s) would be accepted by mempool. +// +// If multiple transactions are passed in, parents must come before children +// and package policies apply: the transactions cannot conflict with any +// mempool transactions or each other. +// +// If one transaction fails, other transactions may not be fully validated (the +// 'allowed' key will be blank). +// +// The maximum number of transactions allowed is 25. +func (c *Client) TestMempoolAccept(txns []*wire.MsgTx, + maxFeeRate btcjson.BTCPerkvB) ([]*btcjson.TestMempoolAcceptResult, error) { + + return c.TestMempoolAcceptAsync(txns, maxFeeRate).Receive() +} + +// FutureGetTxSpendingPrevOut is a future promise to deliver the result of a +// GetTxSpendingPrevOut RPC invocation (or an applicable error). +type FutureGetTxSpendingPrevOut chan *Response + +// Receive waits for the Response promised by the future and returns the +// response from GetTxSpendingPrevOut. +func (r FutureGetTxSpendingPrevOut) Receive() ( + []*btcjson.GetTxSpendingPrevOutResult, error) { + + response, err := ReceiveFuture(r) + if err != nil { + return nil, err + } + + // Unmarshal as an array of GetTxSpendingPrevOutResult items. + var results []*btcjson.GetTxSpendingPrevOutResult + + err = json.Unmarshal(response, &results) + if err != nil { + return nil, err + } + + return results, nil +} + +// GetTxSpendingPrevOutAsync returns an instance of a type that can be used to +// get the result of the RPC at some future time by invoking the Receive +// function on the returned instance. +// +// See GetTxSpendingPrevOut for the blocking version and more details. +func (c *Client) GetTxSpendingPrevOutAsync( + outpoints []wire.OutPoint) FutureGetTxSpendingPrevOut { + + // Due to differences in the testmempoolaccept API for different + // backends, we'll need to inspect our version and construct the + // appropriate request. + version, err := c.BackendVersion() + if err != nil { + return newFutureError(err) + } + + log.Debugf("GetTxSpendingPrevOutAsync: backend version %s", version) + + // Exit early if the version is below 24.0.0. + if !version.SupportGetTxSpendingPrevOut() { + err := fmt.Errorf("%w: %v", ErrBackendVersion, version) + return newFutureError(err) + } + + // Exit early if an empty array of outpoints is provided. + if len(outpoints) == 0 { + err := fmt.Errorf("%w: no outpoints provided", ErrInvalidParam) + return newFutureError(err) + } + + cmd := btcjson.NewGetTxSpendingPrevOutCmd(outpoints) + + return c.SendCmd(cmd) +} + +// GetTxSpendingPrevOut returns the result from calling `gettxspendingprevout` +// RPC. +func (c *Client) GetTxSpendingPrevOut(outpoints []wire.OutPoint) ( + []*btcjson.GetTxSpendingPrevOutResult, error) { + + return c.GetTxSpendingPrevOutAsync(outpoints).Receive() +} diff --git a/rpcclient/wallet.go b/rpcclient/wallet.go index 7b7e7212c9..f43c20074a 100644 --- a/rpcclient/wallet.go +++ b/rpcclient/wallet.go @@ -2610,7 +2610,7 @@ func (c *Client) GetInfo() (*btcjson.InfoWalletResult, error) { return c.GetInfoAsync().Receive() } -// FutureImportPubKeyResult is a future promise to deliver the result of an +// FutureWalletCreateFundedPsbtResult is a future promise to deliver the result of an // WalletCreateFundedPsbt RPC invocation (or an applicable error). type FutureWalletCreateFundedPsbtResult chan *Response @@ -2661,7 +2661,7 @@ func (c *Client) WalletCreateFundedPsbt( type FutureWalletProcessPsbtResult chan *Response // Receive waits for the Response promised by the future and returns an updated -// PSBT with signed inputs from the wallet and a boolen indicating if the +// PSBT with signed inputs from the wallet and a boolean indicating if the // transaction has a complete set of signatures. func (r FutureWalletProcessPsbtResult) Receive() (*btcjson.WalletProcessPsbtResult, error) { res, err := ReceiveFuture(r) diff --git a/rpcclient/zmq.go b/rpcclient/zmq.go index 1a5405eb77..52adeed76b 100644 --- a/rpcclient/zmq.go +++ b/rpcclient/zmq.go @@ -24,7 +24,7 @@ func (r FutureGetZmqNotificationsResult) Receive() (btcjson.GetZmqNotificationRe return notifications, nil } -// GetZmqNotificationsAsync returns an instance ofa type that can be used to get +// GetZmqNotificationsAsync returns an instance of a type that can be used to get // the result of a custom RPC request at some future time by invoking the Receive // function on the returned instance. // diff --git a/rpcserver.go b/rpcserver.go index 94980c463f..a3c4062bcc 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -75,6 +75,10 @@ const ( // maxProtocolVersion is the max protocol version the server supports. maxProtocolVersion = 70002 + + // defaultMaxFeeRate is the default value to use(0.1 BTC/kvB) when the + // `MaxFee` field is not set when calling `testmempoolaccept`. + defaultMaxFeeRate = 0.1 ) var ( @@ -166,8 +170,10 @@ var rpcHandlersBeforeInit = map[string]commandHandler{ "getrawtransaction": handleGetRawTransaction, "gettxout": handleGetTxOut, "help": handleHelp, + "invalidateblock": handleInvalidateBlock, "node": handleNode, "ping": handlePing, + "reconsiderblock": handleReconsiderBlock, "searchrawtransactions": handleSearchRawTransactions, "sendrawtransaction": handleSendRawTransaction, "setgenerate": handleSetGenerate, @@ -179,6 +185,8 @@ var rpcHandlersBeforeInit = map[string]commandHandler{ "verifychain": handleVerifyChain, "verifymessage": handleVerifyMessage, "version": handleVersion, + "testmempoolaccept": handleTestMempoolAccept, + "gettxspendingprevout": handleGetTxSpendingPrevOut, } // list of commands that we recognize, but for which btcd has no support because @@ -235,9 +243,7 @@ var rpcUnimplemented = map[string]struct{}{ "getmempoolentry": {}, "getnetworkinfo": {}, "getwork": {}, - "invalidateblock": {}, "preciousblock": {}, - "reconsiderblock": {}, } // Commands that are available to a limited user @@ -278,6 +284,8 @@ var rpcLimited = map[string]struct{}{ "getrawmempool": {}, "getrawtransaction": {}, "gettxout": {}, + "invalidateblock": {}, + "reconsiderblock": {}, "searchrawtransactions": {}, "sendrawtransaction": {}, "submitblock": {}, @@ -640,23 +648,6 @@ func handleDebugLevel(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) return "Done.", nil } -// witnessToHex formats the passed witness stack as a slice of hex-encoded -// strings to be used in a JSON response. -func witnessToHex(witness wire.TxWitness) []string { - // Ensure nil is returned when there are no entries versus an empty - // slice so it can properly be omitted as necessary. - if len(witness) == 0 { - return nil - } - - result := make([]string, 0, len(witness)) - for _, wit := range witness { - result = append(result, hex.EncodeToString(wit)) - } - - return result -} - // createVinList returns a slice of JSON objects for the inputs of the passed // transaction. func createVinList(mtx *wire.MsgTx) []btcjson.Vin { @@ -666,7 +657,7 @@ func createVinList(mtx *wire.MsgTx) []btcjson.Vin { txIn := mtx.TxIn[0] vinList[0].Coinbase = hex.EncodeToString(txIn.SignatureScript) vinList[0].Sequence = txIn.Sequence - vinList[0].Witness = witnessToHex(txIn.Witness) + vinList[0].Witness = txIn.Witness.ToHexStrings() return vinList } @@ -686,7 +677,7 @@ func createVinList(mtx *wire.MsgTx) []btcjson.Vin { } if mtx.HasWitness() { - vinEntry.Witness = witnessToHex(txIn.Witness) + vinEntry.Witness = txIn.Witness.ToHexStrings() } } @@ -840,7 +831,7 @@ func handleDecodeScript(s *rpcServer, cmd interface{}, closeChan <-chan struct{} // Get information about the script. // Ignore the error here since an error means the script couldn't parse - // and there is no additinal information about it anyways. + // and there is no additional information about it anyways. scriptClass, addrs, reqSigs, _ := txscript.ExtractPkScriptAddrs(script, s.cfg.ChainParams) addresses := make([]string, len(addrs)) @@ -2861,6 +2852,23 @@ func handleGetTxOut(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (i return txOutReply, nil } +// handleInvalidateBlock implements the invalidateblock command. +func handleInvalidateBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*btcjson.InvalidateBlockCmd) + + invalidateHash, err := chainhash.NewHashFromStr(c.BlockHash) + if err != nil { + return nil, &btcjson.RPCError{ + Code: btcjson.ErrRPCDeserialization, + Message: fmt.Sprintf("Failed to deserialize blockhash from string of %s", + invalidateHash), + } + } + + err = s.cfg.Chain.InvalidateBlock(invalidateHash) + return nil, err +} + // handleHelp implements the help command. func handleHelp(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { c := cmd.(*btcjson.HelpCmd) @@ -3046,7 +3054,7 @@ func createVinListPrevOut(s *rpcServer, mtx *wire.MsgTx, chainParams *chaincfg.P } if len(txIn.Witness) != 0 { - vinEntry.Witness = witnessToHex(txIn.Witness) + vinEntry.Witness = txIn.Witness.ToHexStrings() } // Add the entry to the list now if it already passed the filter @@ -3134,6 +3142,23 @@ func fetchMempoolTxnsForAddress(s *rpcServer, addr btcutil.Address, numToSkip, n return mpTxns[numToSkip:rangeEnd], numToSkip } +// handleReconsiderBlock implements the reconsiderblock command. +func handleReconsiderBlock(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { + c := cmd.(*btcjson.ReconsiderBlockCmd) + + reconsiderHash, err := chainhash.NewHashFromStr(c.BlockHash) + if err != nil { + return nil, &btcjson.RPCError{ + Code: btcjson.ErrRPCDeserialization, + Message: fmt.Sprintf("Failed to deserialize blockhash from string of %s", + reconsiderHash), + } + } + + err = s.cfg.Chain.ReconsiderBlock(reconsiderHash) + return nil, err +} + // handleSearchRawTransactions implements the searchrawtransactions command. func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (interface{}, error) { // Respond with an error if the address index is not enabled. @@ -3214,7 +3239,7 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan addressTxns := make([]retrievedTx, 0, numRequested) if reverse { // Transactions in the mempool are not in a block header yet, - // so the block header field in the retieved transaction struct + // so the block header field in the retrieved transaction struct // is left nil. mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr, uint32(numToSkip), uint32(numRequested)) @@ -3268,7 +3293,7 @@ func handleSearchRawTransactions(s *rpcServer, cmd interface{}, closeChan <-chan // order and the number of results is still under the number requested. if !reverse && len(addressTxns) < numRequested { // Transactions in the mempool are not in a block header yet, - // so the block header field in the retieved transaction struct + // so the block header field in the retrieved transaction struct // is left nil. mpTxns, mpSkipped := fetchMempoolTxnsForAddress(s, addr, uint32(numToSkip)-numSkipped, uint32(numRequested- @@ -3806,6 +3831,168 @@ func handleVersion(s *rpcServer, cmd interface{}, closeChan <-chan struct{}) (in return result, nil } +// handleTestMempoolAccept implements the testmempoolaccept command. +func handleTestMempoolAccept(s *rpcServer, cmd interface{}, + closeChan <-chan struct{}) (interface{}, error) { + + c := cmd.(*btcjson.TestMempoolAcceptCmd) + + // Create txns to hold the decoded tx. + txns := make([]*btcutil.Tx, 0, len(c.RawTxns)) + + // Iterate the raw hex slice and decode them. + for _, rawTx := range c.RawTxns { + rawBytes, err := hex.DecodeString(rawTx) + if err != nil { + return nil, rpcDecodeHexError(rawTx) + } + + tx, err := btcutil.NewTxFromBytes(rawBytes) + if err != nil { + return nil, &btcjson.RPCError{ + Code: btcjson.ErrRPCDeserialization, + Message: "TX decode failed: " + err.Error(), + } + } + + txns = append(txns, tx) + } + + results := make([]*btcjson.TestMempoolAcceptResult, 0, len(txns)) + for _, tx := range txns { + // Create a test result item. + item := &btcjson.TestMempoolAcceptResult{ + Txid: tx.Hash().String(), + Wtxid: tx.WitnessHash().String(), + } + + // Check the mempool acceptance. + result, err := s.cfg.TxMemPool.CheckMempoolAcceptance(tx) + + // If an error is returned, this tx is not allow, hence we + // record the reason. + if err != nil { + item.Allowed = false + + // TODO(yy): differentiate the errors and put package + // error in `PackageError` field. + item.RejectReason = err.Error() + + results = append(results, item) + + // Move to the next transaction. + continue + } + + // If this transaction is an orphan, it's not allowed. + if result.MissingParents != nil { + item.Allowed = false + + // NOTE: "missing-inputs" is what bitcoind returns + // here, so we mimic the same error message. + item.RejectReason = "missing-inputs" + + results = append(results, item) + + // Move to the next transaction. + continue + } + + // Otherwise this tx is allowed if its fee rate is below the + // max fee rate, we now patch the fields in + // `TestMempoolAcceptItem` as much as possible. + // + // Calculate the fee field and validate its fee rate. + item.Fees, item.Allowed = validateFeeRate( + result.TxFee, result.TxSize, c.MaxFeeRate, + ) + + // If the fee rate check passed, assign the corresponding + // fields. + if item.Allowed { + item.Vsize = int32(result.TxSize) + } else { + // NOTE: "max-fee-exceeded" is what bitcoind returns + // here, so we mimic the same error message. + item.RejectReason = "max-fee-exceeded" + } + + results = append(results, item) + } + + return results, nil +} + +// handleGetTxSpendingPrevOut implements the gettxspendingprevout command. +func handleGetTxSpendingPrevOut(s *rpcServer, cmd interface{}, + closeChan <-chan struct{}) (interface{}, error) { + + c := cmd.(*btcjson.GetTxSpendingPrevOutCmd) + + // Convert the outpoints. + ops := make([]wire.OutPoint, 0, len(c.Outputs)) + for _, o := range c.Outputs { + hash, err := chainhash.NewHashFromStr(o.Txid) + if err != nil { + return nil, err + } + + ops = append(ops, wire.OutPoint{ + Hash: *hash, + Index: o.Vout, + }) + } + + // Check mempool spend for all the outpoints. + results := make([]*btcjson.GetTxSpendingPrevOutResult, 0, len(ops)) + for _, op := range ops { + // Create a result entry. + result := &btcjson.GetTxSpendingPrevOutResult{ + Txid: op.Hash.String(), + Vout: op.Index, + } + + // Check the mempool spend. + spendingTx := s.cfg.TxMemPool.CheckSpend(op) + + // Set the spending txid if found. + if spendingTx != nil { + result.SpendingTxid = spendingTx.Hash().String() + } + + results = append(results, result) + } + + return results, nil +} + +// validateFeeRate checks that the fee rate used by transaction doesn't exceed +// the max fee rate specified. +func validateFeeRate(feeSats btcutil.Amount, txSize int64, + maxFeeRate float64) (*btcjson.TestMempoolAcceptFees, bool) { + + // Calculate fee rate in sats/kvB. + feeRateSatsPerKVB := feeSats * 1e3 / btcutil.Amount(txSize) + + // Convert sats/vB to BTC/kvB. + feeRate := feeRateSatsPerKVB.ToBTC() + + // Get the max fee rate, if not provided, default to 0.1 BTC/kvB. + if maxFeeRate == 0 { + maxFeeRate = defaultMaxFeeRate + } + + // If the fee rate is above the max fee rate, this tx is not accepted. + if feeRate > maxFeeRate { + return nil, false + } + + return &btcjson.TestMempoolAcceptFees{ + Base: feeSats.ToBTC(), + EffectiveFeeRate: feeRate, + }, true +} + // rpcServer provides a concurrent safe RPC server to a chain server. type rpcServer struct { started int32 @@ -4168,7 +4355,7 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin // change the read deadline for the new connection and having one breaks // long polling. However, not having a read deadline on the initial // connection would mean clients can connect and idle forever. Thus, - // hijack the connecton from the HTTP server, clear the read deadline, + // hijack the connection from the HTTP server, clear the read deadline, // and handle writing the response manually. hj, ok := w.(http.Hijacker) if !ok { @@ -4191,7 +4378,7 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin // Attempt to parse the raw body into a JSON-RPC request. // Setup a close notifier. Since the connection is hijacked, - // the CloseNotifer on the ResponseWriter is not available. + // the CloseNotifier on the ResponseWriter is not available. closeChan := make(chan struct{}, 1) go func() { _, err = conn.Read(make([]byte, 1)) @@ -4241,7 +4428,7 @@ func (s *rpcServer) jsonRPCRead(w http.ResponseWriter, r *http.Request, isAdmin // Btcd does not respond to any request without and "id" or "id":null, // regardless the indicated JSON-RPC protocol version unless RPC quirks // are enabled. With RPC quirks enabled, such requests will be responded - // to if the reqeust does not indicate JSON-RPC version. + // to if the request does not indicate JSON-RPC version. // // RPC quirks can be enabled by the user to avoid compatibility issues // with software relying on Core's behavior. @@ -4479,10 +4666,10 @@ func genCertPair(certFile, keyFile string) error { } // Write cert and key files. - if err = ioutil.WriteFile(certFile, cert, 0666); err != nil { + if err = os.WriteFile(certFile, cert, 0666); err != nil { return err } - if err = ioutil.WriteFile(keyFile, key, 0600); err != nil { + if err = os.WriteFile(keyFile, key, 0600); err != nil { os.Remove(certFile) return err } @@ -4633,7 +4820,7 @@ type rpcserverConfig struct { DB database.DB // TxMemPool defines the transaction memory pool to interact with. - TxMemPool *mempool.TxPool + TxMemPool mempool.TxMempool // These fields allow the RPC server to interface with mining. // diff --git a/rpcserver_test.go b/rpcserver_test.go new file mode 100644 index 0000000000..0aa9391321 --- /dev/null +++ b/rpcserver_test.go @@ -0,0 +1,497 @@ +package main + +import ( + "encoding/hex" + "errors" + "testing" + + "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/mempool" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" +) + +// TestHandleTestMempoolAcceptFailDecode checks that when invalid hex string is +// used as the raw txns, the corresponding error is returned. +func TestHandleTestMempoolAcceptFailDecode(t *testing.T) { + t.Parallel() + + require := require.New(t) + + // Create a testing server. + s := &rpcServer{} + + testCases := []struct { + name string + txns []string + expectedErrCode btcjson.RPCErrorCode + }{ + { + name: "hex decode fail", + txns: []string{"invalid"}, + expectedErrCode: btcjson.ErrRPCDecodeHexString, + }, + { + name: "tx decode fail", + txns: []string{"696e76616c6964"}, + expectedErrCode: btcjson.ErrRPCDeserialization, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // Create a request that uses invalid raw txns. + cmd := btcjson.NewTestMempoolAcceptCmd(tc.txns, 0) + + // Call the method under test. + closeChan := make(chan struct{}) + result, err := handleTestMempoolAccept( + s, cmd, closeChan, + ) + + // Ensure the expected error is returned. + require.Error(err) + rpcErr, ok := err.(*btcjson.RPCError) + require.True(ok) + require.Equal(tc.expectedErrCode, rpcErr.Code) + + // No result should be returned. + require.Nil(result) + }) + } +} + +var ( + // TODO(yy): make a `btctest` package and move these testing txns there + // so they be used in other tests. + // + // txHex1 is taken from `txscript/data/tx_valid.json`. + txHex1 = "0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b" + + "49aa43ad90ba26000000000490047304402203f16c6f40162ab686621ef3" + + "000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507a" + + "c48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01ffffffff0" + + "140420f00000000001976a914660d4ef3a743e3e696ad990364e555c271a" + + "d504b88ac00000000" + + // txHex2 is taken from `txscript/data/tx_valid.json`. + txHex2 = "0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b" + + "49aa43ad90ba260000000004a0048304402203f16c6f40162ab686621ef3" + + "000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc3507a" + + "c48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2bab01fffffff" + + "f0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c27" + + "1ad504b88ac00000000" + + // txHex3 is taken from `txscript/data/tx_valid.json`. + txHex3 = "0100000001b14bdcbc3e01bdaad36cc08e81e69c82e1060bc14e518db2b" + + "49aa43ad90ba260000000004a01ff47304402203f16c6f40162ab686621e" + + "f3000b04e75418a0c0cb2d8aebeac894ae360ac1e780220ddc15ecdfc350" + + "7ac48e1681a33eb60996631bf6bf5bc0a0682c4db743ce7ca2b01fffffff" + + "f0140420f00000000001976a914660d4ef3a743e3e696ad990364e555c27" + + "1ad504b88ac00000000" +) + +// decodeTxHex decodes the given hex string into a transaction. +func decodeTxHex(t *testing.T, txHex string) *btcutil.Tx { + rawBytes, err := hex.DecodeString(txHex) + require.NoError(t, err) + tx, err := btcutil.NewTxFromBytes(rawBytes) + require.NoError(t, err) + + return tx +} + +// TestHandleTestMempoolAcceptMixedResults checks that when different txns get +// different responses from calling the mempool method `CheckMempoolAcceptance` +// their results are correctly returned. +func TestHandleTestMempoolAcceptMixedResults(t *testing.T) { + t.Parallel() + + require := require.New(t) + + // Create a mock mempool. + mm := &mempool.MockTxMempool{} + + // Create a testing server with the mock mempool. + s := &rpcServer{cfg: rpcserverConfig{ + TxMemPool: mm, + }} + + // Decode the hex so we can assert the mock mempool is called with it. + tx1 := decodeTxHex(t, txHex1) + tx2 := decodeTxHex(t, txHex2) + tx3 := decodeTxHex(t, txHex3) + + // Create a slice to hold the expected results. We will use three txns + // so we expect threeresults. + expectedResults := make([]*btcjson.TestMempoolAcceptResult, 3) + + // We now mock the first call to `CheckMempoolAcceptance` to return an + // error. + dummyErr := errors.New("dummy error") + mm.On("CheckMempoolAcceptance", tx1).Return(nil, dummyErr).Once() + + // Since the call failed, we expect the first result to give us the + // error. + expectedResults[0] = &btcjson.TestMempoolAcceptResult{ + Txid: tx1.Hash().String(), + Wtxid: tx1.WitnessHash().String(), + Allowed: false, + RejectReason: dummyErr.Error(), + } + + // We mock the second call to `CheckMempoolAcceptance` to return a + // result saying the tx is missing inputs. + mm.On("CheckMempoolAcceptance", tx2).Return( + &mempool.MempoolAcceptResult{ + MissingParents: []*chainhash.Hash{}, + }, nil, + ).Once() + + // We expect the second result to give us the missing-inputs error. + expectedResults[1] = &btcjson.TestMempoolAcceptResult{ + Txid: tx2.Hash().String(), + Wtxid: tx2.WitnessHash().String(), + Allowed: false, + RejectReason: "missing-inputs", + } + + // We mock the third call to `CheckMempoolAcceptance` to return a + // result saying the tx allowed. + const feeSats = btcutil.Amount(1000) + mm.On("CheckMempoolAcceptance", tx3).Return( + &mempool.MempoolAcceptResult{ + TxFee: feeSats, + TxSize: 100, + }, nil, + ).Once() + + // We expect the third result to give us the fee details. + expectedResults[2] = &btcjson.TestMempoolAcceptResult{ + Txid: tx3.Hash().String(), + Wtxid: tx3.WitnessHash().String(), + Allowed: true, + Vsize: 100, + Fees: &btcjson.TestMempoolAcceptFees{ + Base: feeSats.ToBTC(), + EffectiveFeeRate: feeSats.ToBTC() * 1e3 / 100, + }, + } + + // Create a mock request with default max fee rate of 0.1 BTC/KvB. + cmd := btcjson.NewTestMempoolAcceptCmd( + []string{txHex1, txHex2, txHex3}, 0.1, + ) + + // Call the method handler and assert the expected results are + // returned. + closeChan := make(chan struct{}) + results, err := handleTestMempoolAccept(s, cmd, closeChan) + require.NoError(err) + require.Equal(expectedResults, results) + + // Assert the mocked method is called as expected. + mm.AssertExpectations(t) +} + +// TestValidateFeeRate checks that `validateFeeRate` behaves as expected. +func TestValidateFeeRate(t *testing.T) { + t.Parallel() + + const ( + // testFeeRate is in BTC/kvB. + testFeeRate = 0.1 + + // testTxSize is in vb. + testTxSize = 100 + + // testFeeSats is in sats. + // We have 0.1BTC/kvB = + // 0.1 * 1e8 sats/kvB = + // 0.1 * 1e8 / 1e3 sats/vb = 0.1 * 1e5 sats/vb. + testFeeSats = btcutil.Amount(testFeeRate * 1e5 * testTxSize) + ) + + testCases := []struct { + name string + feeSats btcutil.Amount + txSize int64 + maxFeeRate float64 + expectedFees *btcjson.TestMempoolAcceptFees + allowed bool + }{ + { + // When the fee rate(0.1) is above the max fee + // rate(0.01), we expect a nil result and false. + name: "fee rate above max", + feeSats: testFeeSats, + txSize: testTxSize, + maxFeeRate: testFeeRate / 10, + expectedFees: nil, + allowed: false, + }, + { + // When the fee rate(0.1) is no greater than the max + // fee rate(0.1), we expect a result and true. + name: "fee rate below max", + feeSats: testFeeSats, + txSize: testTxSize, + maxFeeRate: testFeeRate, + expectedFees: &btcjson.TestMempoolAcceptFees{ + Base: testFeeSats.ToBTC(), + EffectiveFeeRate: testFeeRate, + }, + allowed: true, + }, + { + // When the fee rate(1) is above the default max fee + // rate(0.1), we expect a nil result and false. + name: "fee rate above default max", + feeSats: testFeeSats, + txSize: testTxSize / 10, + expectedFees: nil, + allowed: false, + }, + { + // When the fee rate(0.1) is no greater than the + // default max fee rate(0.1), we expect a result and + // true. + name: "fee rate below default max", + feeSats: testFeeSats, + txSize: testTxSize, + expectedFees: &btcjson.TestMempoolAcceptFees{ + Base: testFeeSats.ToBTC(), + EffectiveFeeRate: testFeeRate, + }, + allowed: true, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + require := require.New(t) + + result, allowed := validateFeeRate( + tc.feeSats, tc.txSize, tc.maxFeeRate, + ) + + require.Equal(tc.expectedFees, result) + require.Equal(tc.allowed, allowed) + }) + } +} + +// TestHandleTestMempoolAcceptFees checks that the `Fees` field is correctly +// populated based on the max fee rate and the tx being checked. +func TestHandleTestMempoolAcceptFees(t *testing.T) { + t.Parallel() + + // Create a mock mempool. + mm := &mempool.MockTxMempool{} + + // Create a testing server with the mock mempool. + s := &rpcServer{cfg: rpcserverConfig{ + TxMemPool: mm, + }} + + const ( + // Set transaction's fee rate to be 0.2BTC/kvB. + feeRate = defaultMaxFeeRate * 2 + + // txSize is 100vb. + txSize = 100 + + // feeSats is 2e6 sats. + feeSats = feeRate * 1e8 * txSize / 1e3 + ) + + testCases := []struct { + name string + maxFeeRate float64 + txHex string + rejectReason string + allowed bool + }{ + { + // When the fee rate(0.2) used by the tx is below the + // max fee rate(2) specified, the result should allow + // it. + name: "below max fee rate", + maxFeeRate: feeRate * 10, + txHex: txHex1, + allowed: true, + }, + { + // When the fee rate(0.2) used by the tx is above the + // max fee rate(0.02) specified, the result should + // disallow it. + name: "above max fee rate", + maxFeeRate: feeRate / 10, + txHex: txHex1, + allowed: false, + rejectReason: "max-fee-exceeded", + }, + { + // When the max fee rate is not set, the default + // 0.1BTC/kvB is used and the fee rate(0.2) used by the + // tx is above it, the result should disallow it. + name: "above default max fee rate", + txHex: txHex1, + allowed: false, + rejectReason: "max-fee-exceeded", + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + require := require.New(t) + + // Decode the hex so we can assert the mock mempool is + // called with it. + tx := decodeTxHex(t, txHex1) + + // We mock the call to `CheckMempoolAcceptance` to + // return the result. + mm.On("CheckMempoolAcceptance", tx).Return( + &mempool.MempoolAcceptResult{ + TxFee: feeSats, + TxSize: txSize, + }, nil, + ).Once() + + // We expect the third result to give us the fee + // details. + expected := &btcjson.TestMempoolAcceptResult{ + Txid: tx.Hash().String(), + Wtxid: tx.WitnessHash().String(), + Allowed: tc.allowed, + } + + if tc.allowed { + expected.Vsize = txSize + expected.Fees = &btcjson.TestMempoolAcceptFees{ + Base: feeSats / 1e8, + EffectiveFeeRate: feeRate, + } + } else { + expected.RejectReason = tc.rejectReason + } + + // Create a mock request with specified max fee rate. + cmd := btcjson.NewTestMempoolAcceptCmd( + []string{txHex1}, tc.maxFeeRate, + ) + + // Call the method handler and assert the expected + // result is returned. + closeChan := make(chan struct{}) + r, err := handleTestMempoolAccept(s, cmd, closeChan) + require.NoError(err) + + // Check the interface type. + results, ok := r.([]*btcjson.TestMempoolAcceptResult) + require.True(ok) + + // Expect exactly one result. + require.Len(results, 1) + + // Check the result is returned as expected. + require.Equal(expected, results[0]) + + // Assert the mocked method is called as expected. + mm.AssertExpectations(t) + }) + } +} + +// TestGetTxSpendingPrevOut checks that handleGetTxSpendingPrevOut handles the +// cmd as expected. +func TestGetTxSpendingPrevOut(t *testing.T) { + t.Parallel() + + require := require.New(t) + + // Create a mock mempool. + mm := &mempool.MockTxMempool{} + defer mm.AssertExpectations(t) + + // Create a testing server with the mock mempool. + s := &rpcServer{cfg: rpcserverConfig{ + TxMemPool: mm, + }} + + // First, check the error case. + // + // Create a request that will cause an error. + cmd := &btcjson.GetTxSpendingPrevOutCmd{ + Outputs: []*btcjson.GetTxSpendingPrevOutCmdOutput{ + {Txid: "invalid"}, + }, + } + + // Call the method handler and assert the error is returned. + closeChan := make(chan struct{}) + results, err := handleGetTxSpendingPrevOut(s, cmd, closeChan) + require.Error(err) + require.Nil(results) + + // We now check the normal case. Two outputs will be tested - one found + // in mempool and other not. + // + // Decode the hex so we can assert the mock mempool is called with it. + tx := decodeTxHex(t, txHex1) + + // Create testing outpoints. + opInMempool := wire.OutPoint{Hash: chainhash.Hash{1}, Index: 1} + opNotInMempool := wire.OutPoint{Hash: chainhash.Hash{2}, Index: 1} + + // We only expect to see one output being found as spent in mempool. + expectedResults := []*btcjson.GetTxSpendingPrevOutResult{ + { + Txid: opInMempool.Hash.String(), + Vout: opInMempool.Index, + SpendingTxid: tx.Hash().String(), + }, + { + Txid: opNotInMempool.Hash.String(), + Vout: opNotInMempool.Index, + }, + } + + // We mock the first call to `CheckSpend` to return a result saying the + // output is found. + mm.On("CheckSpend", opInMempool).Return(tx).Once() + + // We mock the second call to `CheckSpend` to return a result saying the + // output is NOT found. + mm.On("CheckSpend", opNotInMempool).Return(nil).Once() + + // Create a request with the above outputs. + cmd = &btcjson.GetTxSpendingPrevOutCmd{ + Outputs: []*btcjson.GetTxSpendingPrevOutCmdOutput{ + { + Txid: opInMempool.Hash.String(), + Vout: opInMempool.Index, + }, + { + Txid: opNotInMempool.Hash.String(), + Vout: opNotInMempool.Index, + }, + }, + } + + // Call the method handler and assert the expected result is returned. + closeChan = make(chan struct{}) + results, err = handleGetTxSpendingPrevOut(s, cmd, closeChan) + require.NoError(err) + require.Equal(expectedResults, results) +} diff --git a/rpcserverhelp.go b/rpcserverhelp.go index f1203de8d9..71f96e99fd 100644 --- a/rpcserverhelp.go +++ b/rpcserverhelp.go @@ -376,7 +376,7 @@ var helpDescsEnUS = map[string]string{ // GetCurrentNetCmd help. "getcurrentnet--synopsis": "Get bitcoin network the server is running on.", - "getcurrentnet--result0": "The network identifer", + "getcurrentnet--result0": "The network identifier", // GetDifficultyCmd help. "getdifficulty--synopsis": "Returns the proof-of-work difficulty as a multiple of the minimum difficulty.", @@ -544,6 +544,10 @@ var helpDescsEnUS = map[string]string{ "gettxout-vout": "The index of the output", "gettxout-includemempool": "Include the mempool when true", + // InvalidateBlockCmd help. + "invalidateblock--synopsis": "Invalidates the block of the given block hash. To re-validate the invalidated block, use the reconsiderblock rpc", + "invalidateblock-blockhash": "The block hash of the block to invalidate", + // HelpCmd help. "help--synopsis": "Returns a list of all commands or help for a specified command.", "help-command": "The command to retrieve help for", @@ -681,6 +685,10 @@ var helpDescsEnUS = map[string]string{ "loadtxfilter-addresses": "Array of addresses to add to the transaction filter", "loadtxfilter-outpoints": "Array of outpoints to add to the transaction filter", + // ReconsiderBlockCmd help. + "reconsiderblock--synopsis": "Reconsiders the block of the given block hash. Can be used to re-validate blocks invalidated with invalidateblock", + "reconsiderblock-blockhash": "The block hash of the block to reconsider", + // Rescan help. "rescan--synopsis": "Rescan block chain for transactions to addresses.\n" + "When the endblock parameter is omitted, the rescan continues through the best block in the main chain.\n" + @@ -717,6 +725,34 @@ var helpDescsEnUS = map[string]string{ "versionresult-patch": "The patch component of the JSON-RPC API version", "versionresult-prerelease": "Prerelease info about the current build", "versionresult-buildmetadata": "Metadata about the current build", + + // TestMempoolAcceptCmd help. + "testmempoolaccept--synopsis": "Returns result of mempool acceptance tests indicating if raw transaction(s) would be accepted by mempool.", + "testmempoolaccept-rawtxns": "Serialized transactions to test.", + "testmempoolaccept-maxfeerate": "Maximum acceptable fee rate in BTC/kB", + + // TestMempoolAcceptCmd result help. + "testmempoolacceptresult-txid": "The transaction hash in hex.", + "testmempoolacceptresult-wtxid": "The transaction witness hash in hex.", + "testmempoolacceptresult-package-error": "Package validation error, if any (only possible if rawtxs had more than 1 transaction).", + "testmempoolacceptresult-allowed": "Whether the transaction would be accepted to the mempool.", + "testmempoolacceptresult-vsize": "Virtual transaction size as defined in BIP 141.(only present when 'allowed' is true)", + "testmempoolacceptresult-reject-reason": "Rejection string (only present when 'allowed' is false).", + "testmempoolacceptresult-fees": "Transaction fees (only present if 'allowed' is true).", + "testmempoolacceptfees-base": "Transaction fees (only present if 'allowed' is true).", + "testmempoolacceptfees-effective-feerate": "The effective feerate in BTC per KvB.", + "testmempoolacceptfees-effective-includes": "Transactions whose fees and vsizes are included in effective-feerate. Each item is a transaction wtxid in hex.", + + // GetTxSpendingPrevOutCmd help. + "gettxspendingprevout--synopsis": "Scans the mempool to find transactions spending any of the given outputs", + "gettxspendingprevout-outputs": "The transaction outputs that we want to check, and within each, the txid (string) vout (numeric).", + "gettxspendingprevout-txid": "The transaction id", + "gettxspendingprevout-vout": "The output number", + + // GetTxSpendingPrevOutCmd result help. + "gettxspendingprevoutresult-txid": "The transaction hash in hex.", + "gettxspendingprevoutresult-vout": "The output index.", + "gettxspendingprevoutresult-spendingtxid": "The hash of the transaction that spends the output.", } // rpcResultTypes specifies the result types that each RPC command can return. @@ -760,7 +796,9 @@ var rpcResultTypes = map[string][]interface{}{ "gettxout": {(*btcjson.GetTxOutResult)(nil)}, "node": nil, "help": {(*string)(nil), (*string)(nil)}, + "invalidateblock": nil, "ping": nil, + "reconsiderblock": nil, "searchrawtransactions": {(*string)(nil), (*[]btcjson.SearchRawTransactionsResult)(nil)}, "sendrawtransaction": {(*string)(nil)}, "setgenerate": nil, @@ -772,6 +810,8 @@ var rpcResultTypes = map[string][]interface{}{ "verifychain": {(*bool)(nil)}, "verifymessage": {(*bool)(nil)}, "version": {(*map[string]btcjson.VersionResult)(nil)}, + "testmempoolaccept": {(*[]btcjson.TestMempoolAcceptResult)(nil)}, + "gettxspendingprevout": {(*[]btcjson.GetTxSpendingPrevOutResult)(nil)}, // Websocket commands. "loadtxfilter": nil, diff --git a/rpcwebsocket.go b/rpcwebsocket.go index aedbcf90b6..02f59d58bf 100644 --- a/rpcwebsocket.go +++ b/rpcwebsocket.go @@ -132,8 +132,8 @@ type wsNotificationManager struct { queueNotification chan interface{} // notificationMsgs feeds notificationHandler with notifications - // and client (un)registeration requests from a queue as well as - // registeration and unregisteration requests from clients. + // and client (un)registration requests from a queue as well as + // registration and unregistration requests from clients. notificationMsgs chan interface{} // Access channel for current number of connected clients. @@ -228,7 +228,7 @@ func (m *wsNotificationManager) NotifyBlockDisconnected(block *btcutil.Block) { // NotifyMempoolTx passes a transaction accepted by mempool to the // notification manager for transaction notification processing. If -// isNew is true, the tx is is a new transaction, rather than one +// isNew is true, the tx is a new transaction, rather than one // added to the mempool during a reorg. func (m *wsNotificationManager) NotifyMempoolTx(tx *btcutil.Tx, isNew bool) { n := ¬ificationTxAcceptedByMempool{ @@ -1236,7 +1236,7 @@ type wsResponse struct { // requested notifications to all connected websocket clients. Inbound // messages are read via the inHandler goroutine and generally dispatched to // their own handler. However, certain potentially long-running operations such -// as rescans, are sent to the asyncHander goroutine and are limited to one at a +// as rescans, are sent to the asyncHandler goroutine and are limited to one at a // time. There are two outbound message types - one for responding to client // requests and another for async notifications. Responses to client requests // use SendMessage which employs a buffered channel thereby limiting the number @@ -2144,7 +2144,7 @@ func handleNotifySpent(wsc *wsClient, icmd interface{}) (interface{}, error) { return nil, nil } -// handleNotifyNewTransations implements the notifynewtransactions command +// handleNotifyNewTransactions implements the notifynewtransactions command // extension for websocket connections. func handleNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, error) { cmd, ok := icmd.(*btcjson.NotifyNewTransactionsCmd) @@ -2157,7 +2157,7 @@ func handleNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, return nil, nil } -// handleStopNotifyNewTransations implements the stopnotifynewtransactions +// handleStopNotifyNewTransactions implements the stopnotifynewtransactions // command extension for websocket connections. func handleStopNotifyNewTransactions(wsc *wsClient, icmd interface{}) (interface{}, error) { wsc.server.ntfnMgr.UnregisterNewMempoolTxsUpdates(wsc) @@ -2724,7 +2724,7 @@ fetchRange: // was any) still exists in the database. If it // doesn't, we error. // - // A goto is used to branch executation back to + // A goto is used to branch execution back to // before the range was evaluated, as it must be // reevaluated for the new hashList. minBlock += int32(i) diff --git a/sample-btcd.conf b/sample-btcd.conf index 0a765fcabe..74b20e1660 100644 --- a/sample-btcd.conf +++ b/sample-btcd.conf @@ -48,7 +48,7 @@ ; Use Universal Plug and Play (UPnP) to automatically open the listen port ; and obtain the external IP address from supported devices. NOTE: This option -; will have no effect if exernal IP addresses are specified. +; will have no effect if external IP addresses are specified. ; upnp=1 ; Specify the external IP addresses your node is listening on. One address per diff --git a/scripts/tidy_modules.sh b/scripts/tidy_modules.sh new file mode 100755 index 0000000000..3fa5bfb252 --- /dev/null +++ b/scripts/tidy_modules.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +SUBMODULES=$(find . -mindepth 2 -name "go.mod" | cut -d'/' -f2) + + +# Run 'go mod tidy' for root. +go mod tidy + +# Run 'go mod tidy' for each module. +for submodule in $SUBMODULES +do + pushd $submodule + + go mod tidy + + popd +done diff --git a/server.go b/server.go index 356326ab1c..9fca2db20c 100644 --- a/server.go +++ b/server.go @@ -478,7 +478,7 @@ func (sp *serverPeer) OnVersion(_ *peer.Peer, msg *wire.MsgVersion) *wire.MsgRej addrManager.SetServices(remoteAddr, msg.Services) } - // Ignore peers that have a protcol version that is too old. The peer + // Ignore peers that have a protocol version that is too old. The peer // negotiation logic will disconnect it after this callback returns. if msg.ProtocolVersion < int32(peer.MinAcceptableProtocolVersion) { return nil @@ -2204,7 +2204,7 @@ func (s *server) outboundPeerConnected(c *connmgr.ConnReq, conn net.Conn) { go s.peerDoneHandler(sp) } -// peerDoneHandler handles peer disconnects by notifiying the server that it's +// peerDoneHandler handles peer disconnects by notifying the server that it's // done along with other performing other desirable cleanup. func (s *server) peerDoneHandler(sp *serverPeer) { sp.WaitForDisconnect() @@ -2231,7 +2231,7 @@ func (s *server) peerDoneHandler(sp *serverPeer) { func (s *server) peerHandler() { // Start the address manager and sync manager, both of which are needed // by peers. This is done here since their lifecycle is closely tied - // to this handler and rather than adding more channels to sychronize + // to this handler and rather than adding more channels to synchronize // things, it's easier and slightly faster to simply start and stop them // in this handler. s.addrManager.Start() @@ -2823,6 +2823,11 @@ func newServer(listenAddrs, agentBlacklist, agentWhitelist []string, checkpoints = mergeCheckpoints(s.chainParams.Checkpoints, cfg.addCheckpoints) } + // Log that the node is pruned. + if cfg.Prune != 0 { + btcdLog.Infof("Prune set to %d MiB", cfg.Prune) + } + // Create a new block chain instance with the appropriate configuration. var err error s.chain, err = blockchain.New(&blockchain.Config{ diff --git a/service_windows.go b/service_windows.go index 378c9204f8..01edf3db77 100644 --- a/service_windows.go +++ b/service_windows.go @@ -153,7 +153,7 @@ func installService() error { // Support events to the event log using the standard "standard" Windows // EventCreate.exe message file. This allows easy logging of custom - // messges instead of needing to create our own message catalog. + // messages instead of needing to create our own message catalog. eventlog.Remove(svcName) eventsSupported := uint32(eventlog.Error | eventlog.Warning | eventlog.Info) return eventlog.InstallAsEventCreate(svcName, eventsSupported) diff --git a/txscript/bench_test.go b/txscript/bench_test.go index 0d1aa91468..60b0d9e12e 100644 --- a/txscript/bench_test.go +++ b/txscript/bench_test.go @@ -7,7 +7,7 @@ package txscript import ( "bytes" "fmt" - "io/ioutil" + "os" "testing" "github.com/btcsuite/btcd/chaincfg" @@ -25,7 +25,7 @@ var ( func init() { // tx 620f57c92cf05a7f7e7f7d28255d5f7089437bc48e34dcfebf7751d08b7fb8f5 - txHex, err := ioutil.ReadFile("data/many_inputs_tx.hex") + txHex, err := os.ReadFile("data/many_inputs_tx.hex") if err != nil { panic(fmt.Sprintf("unable to read benchmark tx file: %v", err)) } diff --git a/txscript/data/script_tests.json b/txscript/data/script_tests.json index 5c054ed3e8..bd3b4e3125 100644 --- a/txscript/data/script_tests.json +++ b/txscript/data/script_tests.json @@ -666,7 +666,7 @@ ["0 0x02 0x0000 0", "CHECKMULTISIGVERIFY 1", "", "OK"], ["While not really correctly DER encoded, the empty signature is allowed by"], -["STRICTENC to provide a compact way to provide a delibrately invalid signature."], +["STRICTENC to provide a compact way to provide a deliberately invalid signature."], ["0", "0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 CHECKSIG NOT", "STRICTENC", "OK"], ["0 0", "1 0x21 0x02865c40293a680cb9c020e7b1e106d8c1916d3cef99aa431a56d253e69256dac0 1 CHECKMULTISIG NOT", "STRICTENC", "OK"], diff --git a/txscript/data/tx_invalid.json b/txscript/data/tx_invalid.json index db465109aa..9985014e9f 100644 --- a/txscript/data/tx_invalid.json +++ b/txscript/data/tx_invalid.json @@ -199,7 +199,7 @@ [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4259839 CHECKSEQUENCEVERIFY 1"]], "020000000100010000000000000000000000000000000000000000000000000000000000000000000000feff40000100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"], -["By-time locks, with argument just beyond txin.nSequence (but within numerical boundries)"], +["By-time locks, with argument just beyond txin.nSequence (but within numerical boundaries)"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4194305 CHECKSEQUENCEVERIFY 1"]], "020000000100010000000000000000000000000000000000000000000000000000000000000000000000000040000100000000000000000000000000", "P2SH,CHECKSEQUENCEVERIFY"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "4259839 CHECKSEQUENCEVERIFY 1"]], @@ -333,6 +333,32 @@ ["BIP143: wrong sighash (with FindAndDelete) = 17c50ec2181ecdfdc85ca081174b248199ba81fff730794d4f69b8ec031f2dce"], [[["9628667ad48219a169b41b020800162287d2c0f713c04157e95c484a8dcb7592", 7500, "0x00 0x20 0x9b66c15b4e0b4eb49fa877982cafded24859fe5b0e2dbfbe4f0df1de7743fd52", 200000]], "010000000001019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d47a6628964c1d000000ffffffff0101000000000000000007004830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c03959601010221023cb6055f4b57a1580c5a753e19610cafaedf7e0ff377731c77837fd666eae1712102c1b1db303ac232ffa8e5e7cc2cf5f96c6e40d3e6914061204c0541cb2043a0969552af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c039596017500000000", "P2SH,WITNESS"], +[[["bc7fd132fcf817918334822ee6d9bd95c889099c96e07ca2c1eb2cc70db63224", 0, "CODESEPARATOR 0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CHECKSIG"]], +"01000000012432b60dc72cebc1a27ce0969c0989c895bdd9e62e8234839117f8fc32d17fbc000000004a493046022100a576b52051962c25e642c0fd3d77ee6c92487048e5d90818bcf5b51abaccd7900221008204f8fb121be4ec3b24483b1f92d89b1b0548513a134e345c5442e86e8617a501ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["83e194f90b6ef21fa2e3a365b63794fb5daa844bdc9b25de30899fcfe7b01047", 0, "CODESEPARATOR CODESEPARATOR 0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CHECKSIG"]], +"01000000014710b0e7cf9f8930de259bdc4b84aa5dfb9437b665a3e3a21ff26e0bf994e183000000004a493046022100a166121a61b4eeb19d8f922b978ff6ab58ead8a5a5552bf9be73dc9c156873ea02210092ad9bc43ee647da4f6652c320800debcf08ec20a094a0aaf085f63ecb37a17201ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["326882a7f22b5191f1a0cc9962ca4b878cd969cf3b3a70887aece4d801a0ba5e", 0, "0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CODESEPARATOR CHECKSIG"]], +"01000000015ebaa001d8e4ec7a88703a3bcf69d98c874bca6299cca0f191512bf2a7826832000000004948304502203bf754d1c6732fbf87c5dcd81258aefd30f2060d7bd8ac4a5696f7927091dad1022100f5bcb726c4cf5ed0ed34cc13dadeedf628ae1045b7cb34421bc60b89f4cecae701ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["a955032f4d6b0c9bfe8cad8f00a8933790b9c1dc28c82e0f48e75b35da0e4944", 0, "0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CHECKSIGVERIFY CODESEPARATOR 0x21 0x038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041 CHECKSIGVERIFY CODESEPARATOR 1"]], +"010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a900000000924830450221009c0a27f886a1d8cb87f6f595fbc3163d28f7a81ec3c4b252ee7f3ac77fd13ffa02203caa8dfa09713c8c4d7ef575c75ed97812072405d932bd11e6a1593a98b679370148304502201e3861ef39a526406bad1e20ecad06be7375ad40ddb582c9be42d26c3a0d7b240221009d0a3985e96522e59635d19cc4448547477396ce0ef17a58e7d74c3ef464292301ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["a955032f4d6b0c9bfe8cad8f00a8933790b9c1dc28c82e0f48e75b35da0e4944", 0, "IF CODESEPARATOR ENDIF 0x21 0x0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71 CHECKSIGVERIFY CODESEPARATOR 1"]], +"010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a9000000004a48304502207a6974a77c591fa13dff60cabbb85a0de9e025c09c65a4b2285e47ce8e22f761022100f0efaac9ff8ac36b10721e0aae1fb975c90500b50c56e8a0cc52b0403f0425dd0100ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["a955032f4d6b0c9bfe8cad8f00a8933790b9c1dc28c82e0f48e75b35da0e4944", 0, "IF CODESEPARATOR ENDIF 0x21 0x0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71 CHECKSIGVERIFY CODESEPARATOR 1"]], +"010000000144490eda355be7480f2ec828dcc1b9903793a8008fad8cfe9b0c6b4d2f0355a9000000004a483045022100fa4a74ba9fd59c59f46c3960cf90cbe0d2b743c471d24a3d5d6db6002af5eebb02204d70ec490fd0f7055a7c45f86514336e3a7f03503dacecabb247fc23f15c83510151ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["ccf7f4053a02e653c36ac75c891b7496d0dc5ce5214f6c913d9cf8f1329ebee0", 0, "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG"]], +"0100000001e0be9e32f1f89c3d916c4f21e55cdcd096741b895cc76ac353e6023a05f4f7cc00000000d86149304602210086e5f736a2c3622ebb62bd9d93d8e5d76508b98be922b97160edc3dcca6d8c47022100b23c312ac232a4473f19d2aeb95ab7bdf2b65518911a0d72d50e38b5dd31dc820121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac4730440220508fa761865c8abd81244a168392876ee1d94e8ed83897066b5e2df2400dad24022043f5ee7538e87e9c6aef7ef55133d3e51da7cc522830a9c4d736977a76ef755c0121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["10c9f0effe83e97f80f067de2b11c6a00c3088a4bce42c5ae761519af9306f3c", 1, "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG"]], +"01000000013c6f30f99a5161e75a2ce4bca488300ca0c6112bde67f0807fe983feeff0c91001000000e608646561646265656675ab61493046022100ce18d384221a731c993939015e3d1bcebafb16e8c0b5b5d14097ec8177ae6f28022100bcab227af90bab33c3fe0a9abfee03ba976ee25dc6ce542526e9b2e56e14b7f10121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac493046022100c3b93edcc0fd6250eb32f2dd8a0bba1754b0f6c3be8ed4100ed582f3db73eba2022100bf75b5bd2eff4d6bf2bda2e34a40fcc07d4aa3cf862ceaa77b47b81eff829f9a01ab21038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["6056ebd549003b10cbbd915cea0d82209fe40b8617104be917a26fa92cbe3d6f", 0, "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG"]], +"01000000016f3dbe2ca96fa217e94b1017860be49f20820dea5c91bdcb103b0049d5eb566000000000fd1d0147304402203989ac8f9ad36b5d0919d97fa0a7f70c5272abee3b14477dc646288a8b976df5022027d19da84a066af9053ad3d1d7459d171b7e3a80bc6c4ef7a330677a6be548140147304402203989ac8f9ad36b5d0919d97fa0a7f70c5272abee3b14477dc646288a8b976df5022027d19da84a066af9053ad3d1d7459d171b7e3a80bc6c4ef7a330677a6be548140121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ac47304402203757e937ba807e4a5da8534c17f9d121176056406a6465054bdd260457515c1a02200f02eccf1bec0f3a0d65df37889143c2e88ab7acec61a7b6f5aa264139141a2b0121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["5a6b0021a6042a686b6b94abc36b387bef9109847774e8b1e51eb8cc55c53921", 1, "DUP HASH160 0x14 0xee5a6aa40facefb2655ac23c0c28c57c65c41f9b EQUALVERIFY CHECKSIG"]], +"01000000012139c555ccb81ee5b1e87477840991ef7b386bc3ab946b6b682a04a621006b5a01000000fdb40148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390121038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f2204148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a5800390175ac4830450220646b72c35beeec51f4d5bc1cbae01863825750d7f490864af354e6ea4f625e9c022100f04b98432df3a9641719dbced53393022e7249fb59db993af1118539830aab870148304502201723e692e5f409a7151db386291b63524c5eb2030df652b1f53022fd8207349f022100b90d9bbf2f3366ce176e5e780a00433da67d9e5c79312c6388312a296a580039017521038479a0fa998cd35259a2ef0a7a5c68662c1474f88ccb6d08a7677bbec7f22041ffffffff010000000000000000016a00000000", "P2SH,CONST_SCRIPTCODE"], +[[["b5b598de91787439afd5938116654e0b16b7a0d0f82742ba37564219c5afcbf9", 0, "DUP HASH160 0x14 0xf6f365c40f0739b61de827a44751e5e99032ed8f EQUALVERIFY CHECKSIG"], +["ab9805c6d57d7070d9a42c5176e47bb705023e6b67249fb6760880548298e742", 0, "HASH160 0x14 0xd8dacdadb7462ae15cd906f1878706d0da8660e6 EQUAL"]], +"0100000002f9cbafc519425637ba4227f8d0a0b7160b4e65168193d5af39747891de98b5b5000000006b4830450221008dd619c563e527c47d9bd53534a770b102e40faa87f61433580e04e271ef2f960220029886434e18122b53d5decd25f1f4acb2480659fea20aabd856987ba3c3907e0121022b78b756e2258af13779c1a1f37ea6800259716ca4b7f0b87610e0bf3ab52a01ffffffff42e7988254800876b69f24676b3e0205b77be476512ca4d970707dd5c60598ab00000000fd260100483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a53034930460221008431bdfa72bc67f9d41fe72e94c88fb8f359ffa30b33c72c121c5a877d922e1002210089ef5fc22dd8bfc6bf9ffdb01a9862d27687d424d1fefbab9e9c7176844a187a014c9052483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303210378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71210378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c7153aeffffffff01a08601000000000017a914d8dacdadb7462ae15cd906f1878706d0da8660e68700000000", "P2SH,CONST_SCRIPTCODE"], +[[["ceafe58e0f6e7d67c0409fbbf673c84c166e3c5d3c24af58f7175b18df3bb3db", 0, "DUP HASH160 0x14 0xf6f365c40f0739b61de827a44751e5e99032ed8f EQUALVERIFY CHECKSIG"], +["ceafe58e0f6e7d67c0409fbbf673c84c166e3c5d3c24af58f7175b18df3bb3db", 1, "2 0x48 0x3045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303 0x21 0x0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71 0x21 0x0378d430274f8c5ec1321338151e9f27f4c676a008bdf8638d07c0b6be9ab35c71 3 CHECKMULTISIG"]], +"0100000002dbb33bdf185b17f758af243c5d3c6e164cc873f6bb9f40c0677d6e0f8ee5afce000000006b4830450221009627444320dc5ef8d7f68f35010b4c050a6ed0d96b67a84db99fda9c9de58b1e02203e4b4aaa019e012e65d69b487fdf8719df72f488fa91506a80c49a33929f1fd50121022b78b756e2258af13779c1a1f37ea6800259716ca4b7f0b87610e0bf3ab52a01ffffffffdbb33bdf185b17f758af243c5d3c6e164cc873f6bb9f40c0677d6e0f8ee5afce010000009300483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303483045022015bd0139bcccf990a6af6ec5c1c52ed8222e03a0d51c334df139968525d2fcd20221009f9efe325476eb64c3958e4713e9eefe49bf1d820ed58d2112721b134e2a1a5303ffffffff01a0860100000000001976a9149bc0bbdd3024da4d0c38ed1aecf5c68dd1d3fa1288ac00000000", "P2SH,CONST_SCRIPTCODE"], ["Make diffs cleaner by leaving a comment here without comma at the end"] ] diff --git a/txscript/data/tx_valid.json b/txscript/data/tx_valid.json index d70fa54333..a2de155383 100644 --- a/txscript/data/tx_valid.json +++ b/txscript/data/tx_valid.json @@ -471,17 +471,17 @@ ["BIP143 example: P2WSH with OP_CODESEPARATOR and out-of-range SIGHASH_SINGLE."], [[["6eb316926b1c5d567cd6f5e6a84fec606fc53d7b474526d1fff3948020c93dfe", 0, "0x21 0x036d5c20fa14fb2f635474c1dc4ef5909d4568e5569b79fc94d3448486e14685f8 CHECKSIG", 156250000], ["f825690aee1b3dc247da796cacb12687a5e802429fd291cfd63e010f02cf1508", 0, "0x00 0x20 0x5d1b56b63d714eebe542309525f484b7e9d6f686b3781b6f61ef925d66d6f6a0", 4900000000]], -"01000000000102fe3dc9208094f3ffd12645477b3dc56f60ec4fa8e6f5d67c565d1c6b9216b36e000000004847304402200af4e47c9b9629dbecc21f73af989bdaa911f7e6f6c2e9394588a3aa68f81e9902204f3fcf6ade7e5abb1295b6774c8e0abd94ae62217367096bc02ee5e435b67da201ffffffff0815cf020f013ed6cf91d29f4202e8a58726b1ac6c79da47c23d1bee0a6925f80000000000ffffffff0100f2052a010000001976a914a30741f8145e5acadf23f751864167f32e0963f788ac000347304402200de66acf4527789bfda55fc5459e214fa6083f936b430a762c629656216805ac0220396f550692cd347171cbc1ef1f51e15282e837bb2b30860dc77c8f78bc8501e503473044022027dc95ad6b740fe5129e7e62a75dd00f291a2aeb1200b84b09d9e3789406b6c002201a9ecd315dd6a0e632ab20bbb98948bc0c6fb204f2c286963bb48517a7058e27034721026dccc749adc2a9d0d89497ac511f760f45c47dc5ed9cf352a58ac706453880aeadab210255a9626aebf5e29c0e6538428ba0d1dcf6ca98ffdf086aa8ced5e0d0215ea465ac00000000", "P2SH,WITNESS"], +"01000000000102fe3dc9208094f3ffd12645477b3dc56f60ec4fa8e6f5d67c565d1c6b9216b36e000000004847304402200af4e47c9b9629dbecc21f73af989bdaa911f7e6f6c2e9394588a3aa68f81e9902204f3fcf6ade7e5abb1295b6774c8e0abd94ae62217367096bc02ee5e435b67da201ffffffff0815cf020f013ed6cf91d29f4202e8a58726b1ac6c79da47c23d1bee0a6925f80000000000ffffffff0100f2052a010000001976a914a30741f8145e5acadf23f751864167f32e0963f788ac000347304402200de66acf4527789bfda55fc5459e214fa6083f936b430a762c629656216805ac0220396f550692cd347171cbc1ef1f51e15282e837bb2b30860dc77c8f78bc8501e503473044022027dc95ad6b740fe5129e7e62a75dd00f291a2aeb1200b84b09d9e3789406b6c002201a9ecd315dd6a0e632ab20bbb98948bc0c6fb204f2c286963bb48517a7058e27034721026dccc749adc2a9d0d89497ac511f760f45c47dc5ed9cf352a58ac706453880aeadab210255a9626aebf5e29c0e6538428ba0d1dcf6ca98ffdf086aa8ced5e0d0215ea465ac00000000", "P2SH,WITNESS,CONST_SCRIPTCODE"], ["BIP143 example: P2WSH with unexecuted OP_CODESEPARATOR and SINGLE|ANYONECANPAY"], [[["01c0cf7fba650638e55eb91261b183251fbb466f90dff17f10086817c542b5e9", 0, "0x00 0x20 0xba468eea561b26301e4cf69fa34bde4ad60c81e70f059f045ca9a79931004a4d", 16777215], ["1b2a9a426ba603ba357ce7773cb5805cb9c7c2b386d100d1fc9263513188e680", 0, "0x00 0x20 0xd9bbfbe56af7c4b7f960a70d7ea107156913d9e5a26b0a71429df5e097ca6537", 16777215]], -"01000000000102e9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff80e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffff0280969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac80969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000", "P2SH,WITNESS"], +"01000000000102e9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff80e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffff0280969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac80969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000", "P2SH,WITNESS,CONST_SCRIPTCODE"], ["BIP143 example: Same as the previous example with input-output pairs swapped"], [[["1b2a9a426ba603ba357ce7773cb5805cb9c7c2b386d100d1fc9263513188e680", 0, "0x00 0x20 0xd9bbfbe56af7c4b7f960a70d7ea107156913d9e5a26b0a71429df5e097ca6537", 16777215], ["01c0cf7fba650638e55eb91261b183251fbb466f90dff17f10086817c542b5e9", 0, "0x00 0x20 0xba468eea561b26301e4cf69fa34bde4ad60c81e70f059f045ca9a79931004a4d", 16777215]], -"0100000000010280e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffffe9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff0280969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac80969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000", "P2SH,WITNESS"], +"0100000000010280e68831516392fcd100d186b3c2c7b95c80b53c77e77c35ba03a66b429a2a1b0000000000ffffffffe9b542c5176808107ff1df906f46bb1f2583b16112b95ee5380665ba7fcfc0010000000000ffffffff0280969800000000001976a9146648a8cd4531e1ec47f35916de8e259237294d1e88ac80969800000000001976a914de4b231626ef508c9a74a8517e6783c0546d6b2888ac024730440220032521802a76ad7bf74d0e2c218b72cf0cbc867066e2e53db905ba37f130397e02207709e2188ed7f08f4c952d9d13986da504502b8c3be59617e043552f506c46ff83275163ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac02483045022100f6a10b8604e6dc910194b79ccfc93e1bc0ec7c03453caaa8987f7d6c3413566002206216229ede9b4d6ec2d325be245c5b508ff0339bf1794078e20bfe0babc7ffe683270063ab68210392972e2eb617b2388771abe27235fd5ac44af8e61693261550447a4c3e39da98ac00000000", "P2SH,WITNESS,CONST_SCRIPTCODE"], ["BIP143 example: P2SH-P2WSH 6-of-6 multisig signed with 6 different SIGHASH types"], [[["6eb98797a21c6c10aa74edf29d618be109f48a8e94c694f3701e08ca69186436", 1, "HASH160 0x14 0x9993a429037b5d912407a71c252019287b8d27a5 EQUAL", 987654321]], @@ -498,7 +498,7 @@ "010000000169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac8387f1581b0000b64830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0121037a3fb04bcdb09eba90f69961ba1692a3528e45e67c85b200df820212d7594d334aad4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e01ffffffff0101000000000000000000000000", "P2SH,WITNESS"], ["BIP143: correct sighash (without FindAndDelete) = 71c9cd9b2869b9c70b01b1f0360c148f42dee72297db312638df136f43311f23"], [[["f18783ace138abac5d3a7a5cf08e88fe6912f267ef936452e0c27d090621c169", 7500, "0x00 0x20 0x9e1be07558ea5cc8e02ed1d80c0911048afad949affa36d5c3951e3159dbea19", 200000]], -"0100000000010169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac8387f14c1d000000ffffffff01010000000000000000034830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e012102a9781d66b61fb5a7ef00ac5ad5bc6ffc78be7b44a566e3c87870e1079368df4c4aad4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0100000000", "P2SH,WITNESS"], +"0100000000010169c12106097dc2e0526493ef67f21269fe888ef05c7a3a5dacab38e1ac8387f14c1d000000ffffffff01010000000000000000034830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e012102a9781d66b61fb5a7ef00ac5ad5bc6ffc78be7b44a566e3c87870e1079368df4c4aad4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0100000000", "P2SH,WITNESS,CONST_SCRIPTCODE"], ["This is multisig version of the FindAndDelete tests"], ["Script is 2 CHECKMULTISIGVERIFY DROP"], ["52af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960175"], @@ -508,7 +508,9 @@ "01000000019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d47a662896581b0000fd6f01004830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c03959601522102cd74a2809ffeeed0092bc124fd79836706e41f048db3f6ae9df8708cefb83a1c2102e615999372426e46fd107b76eaf007156a507584aa2cc21de9eee3bdbd26d36c4c9552af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960175ffffffff0101000000000000000000000000", "P2SH,WITNESS"], ["BIP143: correct sighash (without FindAndDelete) = c1628a1e7c67f14ca0c27c06e4fdeec2e6d1a73c7a91d7c046ff83e835aebb72"], [[["9628667ad48219a169b41b020800162287d2c0f713c04157e95c484a8dcb7592", 7500, "0x00 0x20 0x9b66c15b4e0b4eb49fa877982cafded24859fe5b0e2dbfbe4f0df1de7743fd52", 200000]], -"010000000001019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d47a6628964c1d000000ffffffff0101000000000000000007004830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960101022102966f109c54e85d3aee8321301136cedeb9fc710fdef58a9de8a73942f8e567c021034ffc99dd9a79dd3cb31e2ab3e0b09e0e67db41ac068c625cd1f491576016c84e9552af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c039596017500000000", "P2SH,WITNESS"], +"010000000001019275cb8d4a485ce95741c013f7c0d28722160008021bb469a11982d47a6628964c1d000000ffffffff0101000000000000000007004830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c0395960101022102966f109c54e85d3aee8321301136cedeb9fc710fdef58a9de8a73942f8e567c021034ffc99dd9a79dd3cb31e2ab3e0b09e0e67db41ac068c625cd1f491576016c84e9552af4830450220487fb382c4974de3f7d834c1b617fe15860828c7f96454490edd6d891556dcc9022100baf95feb48f845d5bfc9882eb6aeefa1bc3790e39f59eaa46ff7f15ae626c53e0148304502205286f726690b2e9b0207f0345711e63fa7012045b9eb0f19c2458ce1db90cf43022100e89f17f86abc5b149eba4115d4f128bcf45d77fb3ecdd34f594091340c039596017500000000", "P2SH,WITNESS,CONST_SCRIPTCODE"], +[[["7a554c397846f025738965683b8448d79458c54b869f6391ece95145c962e65f", 0, "OP_HASH160 0x149512447916448e4193c321f2d599dff2538973f3 OP_EQUAL", 0]], +"02000000015fe662c94551e9ec91639f864bc55894d748843b6865897325f04678394c557a0000000039093006020101020101012103f0665be3ccc59a592608790e84bcf117349fc76c77d06cd3fb323548c310ff340cad0a09300602010102010101ffffffff010000000000000000015100000000", "CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,CLEANSTACK,DERSIG,DISCOURAGE_UPGRADABLE_NOPS,LOW_S,MINIMALDATA,NULLDUMMY,NULLFAIL,P2SH,SIGPUSHONLY,STRICTENC,WITNESS,DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM,MINIMALIF,WITNESS_PUBKEYTYPE,TAPROOT"], ["Make diffs cleaner by leaving a comment here without comma at the end"] ] diff --git a/txscript/doc.go b/txscript/doc.go index d6eddd5a65..957c91947c 100644 --- a/txscript/doc.go +++ b/txscript/doc.go @@ -17,7 +17,7 @@ bitcoin transaction scripts. Bitcoin transaction scripts are written in a stack-base, FORTH-like language. The bitcoin script language consists of a number of opcodes which fall into -several categories such pushing and popping data to and from the stack, +several categories such as pushing and popping data to and from the stack, performing basic and bitwise arithmetic, conditional branching, comparing hashes, and checking cryptographic signatures. Scripts are processed from left to right and intentionally do not provide loops. diff --git a/txscript/engine.go b/txscript/engine.go index 30206152b8..0cc3d96caa 100644 --- a/txscript/engine.go +++ b/txscript/engine.go @@ -114,6 +114,10 @@ const ( // ScriptVerifyDiscourageUpgradeablePubkeyType defines if unknown // public key versions (during tapscript execution) is non-standard. ScriptVerifyDiscourageUpgradeablePubkeyType + + // ScriptVerifyConstScriptCode fails non-segwit scripts if a signature + // match is found in the script code or if OP_CODESEPARATOR is used. + ScriptVerifyConstScriptCode ) const ( @@ -450,7 +454,7 @@ func checkMinimalDataPush(op *opcode, data []byte) error { return nil } -// executeOpcode peforms execution on the passed opcode. It takes into account +// executeOpcode performs execution on the passed opcode. It takes into account // whether or not it is hidden by conditionals, but some rules still must be // tested in this case. func (vm *Engine) executeOpcode(op *opcode, data []byte) error { @@ -605,7 +609,7 @@ func (vm *Engine) verifyWitnessProgram(witness wire.TxWitness) error { return scriptError(ErrWitnessProgramWrongLength, errStr) } - // We're attempting to to verify a taproot input, and the witness + // We're attempting to verify a taproot input, and the witness // program data push is of the expected size, so we'll be looking for a // normal key-path spend, or a merkle proof for a tapscript with // execution afterwards. @@ -1414,7 +1418,7 @@ func (vm *Engine) checkSignatureEncoding(sig []byte) error { func getStack(stack *stack) [][]byte { array := make([][]byte, stack.Depth()) for i := range array { - // PeekByteArry can't fail due to overflow, already checked + // PeekByteArray can't fail due to overflow, already checked array[len(array)-i-1], _ = stack.PeekByteArray(int32(i)) } return array diff --git a/txscript/engine_debug_test.go b/txscript/engine_debug_test.go index 5ebfe3f3cf..aa7283e22f 100644 --- a/txscript/engine_debug_test.go +++ b/txscript/engine_debug_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -// TestDebugEngine checks that the StepCallbck called during debug script +// TestDebugEngine checks that the StepCallback called during debug script // execution contains the expected data. func TestDebugEngine(t *testing.T) { t.Parallel() diff --git a/txscript/engine_test.go b/txscript/engine_test.go index 51a899be33..c88d27a60e 100644 --- a/txscript/engine_test.go +++ b/txscript/engine_test.go @@ -123,12 +123,12 @@ func TestCheckErrorCondition(t *testing.T) { t.Fatalf("failed to step %dth time: %v", i, err) } if done { - t.Fatalf("finshed early on %dth time", i) + t.Fatalf("finished early on %dth time", i) } err = vm.CheckErrorCondition(false) if !IsErrorCode(err, ErrScriptUnfinished) { - t.Fatalf("got unexepected error %v on %dth iteration", + t.Fatalf("got unexpected error %v on %dth iteration", err, i) } } diff --git a/txscript/error.go b/txscript/error.go index 1f046b9612..a5aaab1571 100644 --- a/txscript/error.go +++ b/txscript/error.go @@ -267,7 +267,7 @@ const ( ErrPubKeyType // ErrCleanStack is returned when the ScriptVerifyCleanStack flag - // is set, and after evalution, the stack does not contain only a + // is set, and after evaluation, the stack does not contain only a // single element. ErrCleanStack @@ -309,9 +309,9 @@ const ( ErrMinimalIf // ErrDiscourageUpgradableWitnessProgram is returned if - // ScriptVerifyWitness is set and the versino of an executing witness + // ScriptVerifyWitness is set and the version of an executing witness // program is outside the set of currently defined witness program - // vesions. + // versions. ErrDiscourageUpgradableWitnessProgram // ---------------------------------------- @@ -363,7 +363,7 @@ const ( ErrTapscriptCheckMultisig // ErrDiscourageUpgradeableTaprootVersion is returned if during - // tapscript execution, we encoutner a public key that isn't 0 or 32 + // tapscript execution, we encounter a public key that isn't 0 or 32 // bytes. ErrDiscourageUpgradeablePubKeyType @@ -408,6 +408,14 @@ const ( // is exceeded during taproot execution. ErrTaprootMaxSigOps + // ErrNonConstScriptCode is returned when a signature match is found when + // calling removeOpcodeByData in a non-segwit script. + ErrNonConstScriptCode + + // ErrCodeSeparator is returned when OP_CODESEPARATOR is used in a + // non-segwit script. + ErrCodeSeparator + // numErrorCodes is the maximum error code number used in tests. This // entry MUST be the last entry in the enum. numErrorCodes @@ -494,6 +502,8 @@ var errorCodeStrings = map[ErrorCode]string{ ErrInvalidTaprootSigLen: "ErrInvalidTaprootSigLen", ErrTaprootPubkeyIsEmpty: "ErrTaprootPubkeyIsEmpty", ErrTaprootMaxSigOps: "ErrTaprootMaxSigOps", + ErrNonConstScriptCode: "ErrNonConstScriptCode", + ErrCodeSeparator: "ErrCodeSeparator", } // String returns the ErrorCode as a human-readable name. diff --git a/txscript/error_test.go b/txscript/error_test.go index accdf11a8c..bb1f73e92e 100644 --- a/txscript/error_test.go +++ b/txscript/error_test.go @@ -96,6 +96,8 @@ func TestErrorCodeStringer(t *testing.T) { {ErrInvalidTaprootSigLen, "ErrInvalidTaprootSigLen"}, {ErrTaprootPubkeyIsEmpty, "ErrTaprootPubkeyIsEmpty"}, {ErrTaprootMaxSigOps, "ErrTaprootMaxSigOps"}, + {ErrNonConstScriptCode, "ErrNonConstScriptCode"}, + {ErrCodeSeparator, "ErrCodeSeparator"}, {0xffff, "Unknown ErrorCode (65535)"}, } diff --git a/txscript/opcode.go b/txscript/opcode.go index 4918b991c5..1cd3ba24fb 100644 --- a/txscript/opcode.go +++ b/txscript/opcode.go @@ -1953,6 +1953,12 @@ func opcodeCodeSeparator(op *opcode, data []byte, vm *Engine) error { if vm.taprootCtx != nil { vm.taprootCtx.codeSepPos = uint32(vm.tokenizer.OpcodePosition()) + } else if vm.witnessProgram == nil && + vm.hasFlag(ScriptVerifyConstScriptCode) { + + // Disable OP_CODESEPARATOR for non-segwit scripts. + str := "OP_CODESEPARATOR used in non-segwit script" + return scriptError(ErrCodeSeparator, str) } return nil @@ -2073,7 +2079,13 @@ func opcodeCheckSig(op *opcode, data []byte, vm *Engine) error { // TODO(roasbeef): return an error? } - valid := sigVerifier.Verify() + result := sigVerifier.Verify() + valid := result.sigValid + + if vm.hasFlag(ScriptVerifyConstScriptCode) && result.sigMatch { + str := "non-const script code" + return scriptError(ErrNonConstScriptCode, str) + } switch { // For tapscript, and prior execution with null fail active, if the @@ -2166,11 +2178,11 @@ func opcodeCheckSigAdd(op *opcode, data []byte, vm *Engine) error { return err } - valid := sigVerifier.Verify() + result := sigVerifier.Verify() // If the signature is invalid, this we fail execution, as it should // have been an empty signature. - if !valid { + if !result.sigValid { str := "signature not empty on failed checksig" return scriptError(ErrNullFail, str) } @@ -2303,7 +2315,13 @@ func opcodeCheckMultiSig(op *opcode, data []byte, vm *Engine) error { // no way for a signature to sign itself. if !vm.isWitnessVersionActive(0) { for _, sigInfo := range signatures { - script = removeOpcodeByData(script, sigInfo.signature) + var match bool + script, match = removeOpcodeByData(script, sigInfo.signature) + if vm.hasFlag(ScriptVerifyConstScriptCode) && match { + str := fmt.Sprintf("got match of %v in %v", sigInfo.signature, + script) + return scriptError(ErrNonConstScriptCode, str) + } } } diff --git a/txscript/reference_test.go b/txscript/reference_test.go index 59acdb8da7..b3f90cd5ba 100644 --- a/txscript/reference_test.go +++ b/txscript/reference_test.go @@ -11,7 +11,7 @@ import ( "errors" "fmt" "io/fs" - "io/ioutil" + "os" "path/filepath" "strconv" "strings" @@ -196,6 +196,8 @@ func parseScriptFlags(flagStr string) (ScriptFlags, error) { flags |= ScriptVerifyWitnessPubKeyType case "TAPROOT": flags |= ScriptVerifyTaproot + case "CONST_SCRIPTCODE": + flags |= ScriptVerifyConstScriptCode default: return flags, fmt.Errorf("invalid flag: %s", flag) } @@ -490,7 +492,7 @@ func testScripts(t *testing.T, tests [][]interface{}, useSigCache bool) { // TestScripts ensures all of the tests in script_tests.json execute with the // expected results as defined in the test data. func TestScripts(t *testing.T) { - file, err := ioutil.ReadFile("data/script_tests.json") + file, err := os.ReadFile("data/script_tests.json") if err != nil { t.Fatalf("TestScripts: %v\n", err) } @@ -521,7 +523,7 @@ func testVecF64ToUint32(f float64) uint32 { // TestTxInvalidTests ensures all of the tests in tx_invalid.json fail as // expected. func TestTxInvalidTests(t *testing.T) { - file, err := ioutil.ReadFile("data/tx_invalid.json") + file, err := os.ReadFile("data/tx_invalid.json") if err != nil { t.Fatalf("TestTxInvalidTests: %v\n", err) } @@ -679,7 +681,7 @@ testloop: // TestTxValidTests ensures all of the tests in tx_valid.json pass as expected. func TestTxValidTests(t *testing.T) { - file, err := ioutil.ReadFile("data/tx_valid.json") + file, err := os.ReadFile("data/tx_valid.json") if err != nil { t.Fatalf("TestTxValidTests: %v\n", err) } @@ -836,7 +838,7 @@ testloop: // in sighash.json. // https://github.com/bitcoin/bitcoin/blob/master/src/test/data/sighash.json func TestCalcSignatureHash(t *testing.T) { - file, err := ioutil.ReadFile("data/sighash.json") + file, err := os.ReadFile("data/sighash.json") if err != nil { t.Fatalf("TestCalcSignatureHash: %v\n", err) } @@ -1044,7 +1046,7 @@ func TestTaprootReferenceTests(t *testing.T) { return nil } - testJson, err := ioutil.ReadFile(path) + testJson, err := os.ReadFile(path) if err != nil { return fmt.Errorf("unable to read file: %v", err) } diff --git a/txscript/script.go b/txscript/script.go index 18723067ee..6d16f74f95 100644 --- a/txscript/script.go +++ b/txscript/script.go @@ -178,7 +178,7 @@ func DisasmString(script []byte) (string, error) { // removeOpcodeRaw will return the script after removing any opcodes that match // `opcode`. If the opcode does not appear in script, the original script will // be returned unmodified. Otherwise, a new script will be allocated to contain -// the filtered script. This metehod assumes that the script parses +// the filtered script. This method assumes that the script parses // successfully. // // NOTE: This function is only valid for version 0 scripts. Since the function @@ -244,7 +244,7 @@ func isCanonicalPush(opcode byte, data []byte) bool { // removeOpcodeByData will return the script minus any opcodes that perform a // canonical push of data that contains the passed data to remove. This // function assumes it is provided a version 0 script as any future version of -// script should avoid this functionality since it is unncessary due to the +// script should avoid this functionality since it is unnecessary due to the // signature scripts not being part of the witness-free transaction hash. // // WARNING: This will return the passed script unmodified unless a modification @@ -255,10 +255,10 @@ func isCanonicalPush(opcode byte, data []byte) bool { // NOTE: This function is only valid for version 0 scripts. Since the function // does not accept a script version, the results are undefined for other script // versions. -func removeOpcodeByData(script []byte, dataToRemove []byte) []byte { +func removeOpcodeByData(script []byte, dataToRemove []byte) ([]byte, bool) { // Avoid work when possible. if len(script) == 0 || len(dataToRemove) == 0 { - return script + return script, false } // Parse through the script looking for a canonical data push that contains @@ -266,32 +266,48 @@ func removeOpcodeByData(script []byte, dataToRemove []byte) []byte { const scriptVersion = 0 var result []byte var prevOffset int32 + var match bool tokenizer := MakeScriptTokenizer(scriptVersion, script) for tokenizer.Next() { - // In practice, the script will basically never actually contain the - // data since this function is only used during signature verification - // to remove the signature itself which would require some incredibly - // non-standard code to create. - // - // Thus, as an optimization, avoid allocating a new script unless there - // is actually a match that needs to be removed. - op, data := tokenizer.Opcode(), tokenizer.Data() - if isCanonicalPush(op, data) && bytes.Contains(data, dataToRemove) { - if result == nil { - fullPushLen := tokenizer.ByteIndex() - prevOffset - result = make([]byte, 0, int32(len(script))-fullPushLen) - result = append(result, script[0:prevOffset]...) - } - } else if result != nil { - result = append(result, script[prevOffset:tokenizer.ByteIndex()]...) + var found bool + result, prevOffset, found = removeOpcodeCanonical( + &tokenizer, script, dataToRemove, prevOffset, result, + ) + if found { + match = true } - - prevOffset = tokenizer.ByteIndex() } if result == nil { result = script } - return result + return result, match +} + +func removeOpcodeCanonical(t *ScriptTokenizer, script, dataToRemove []byte, + prevOffset int32, result []byte) ([]byte, int32, bool) { + + var found bool + + // In practice, the script will basically never actually contain the + // data since this function is only used during signature verification + // to remove the signature itself which would require some incredibly + // non-standard code to create. + // + // Thus, as an optimization, avoid allocating a new script unless there + // is actually a match that needs to be removed. + op, data := t.Opcode(), t.Data() + if isCanonicalPush(op, data) && bytes.Equal(data, dataToRemove) { + if result == nil { + fullPushLen := t.ByteIndex() - prevOffset + result = make([]byte, 0, int32(len(script))-fullPushLen) + result = append(result, script[0:prevOffset]...) + } + found = true + } else if result != nil { + result = append(result, script[prevOffset:t.ByteIndex()]...) + } + + return result, t.ByteIndex(), found } // AsSmallInt returns the passed opcode, which must be true according to diff --git a/txscript/script_test.go b/txscript/script_test.go index a90e1940e5..7842565c6c 100644 --- a/txscript/script_test.go +++ b/txscript/script_test.go @@ -357,6 +357,12 @@ func TestRemoveOpcodeByData(t *testing.T) { remove: []byte{1, 2, 3, 4}, after: []byte{OP_NOP}, }, + { + name: "", + before: []byte{OP_NOP, OP_DATA_8, 1, 2, 3, 4, 5, 6, 7, 8, OP_DATA_4, 1, 2, 3, 4}, + remove: []byte{1, 2, 3, 4}, + after: []byte{OP_NOP, OP_DATA_8, 1, 2, 3, 4, 5, 6, 7, 8}, + }, { name: "simple case", before: []byte{OP_DATA_4, 1, 2, 3, 4}, @@ -376,7 +382,9 @@ func TestRemoveOpcodeByData(t *testing.T) { bytes.Repeat([]byte{0}, 72)...), []byte{1, 2, 3, 4}...), remove: []byte{1, 2, 3, 4}, - after: nil, + after: append(append([]byte{OP_PUSHDATA1, 76}, + bytes.Repeat([]byte{0}, 72)...), + []byte{1, 2, 3, 4}...), }, { name: "simple case (pushdata1 miss)", @@ -400,7 +408,9 @@ func TestRemoveOpcodeByData(t *testing.T) { bytes.Repeat([]byte{0}, 252)...), []byte{1, 2, 3, 4}...), remove: []byte{1, 2, 3, 4}, - after: nil, + after: append(append([]byte{OP_PUSHDATA2, 0, 1}, + bytes.Repeat([]byte{0}, 252)...), + []byte{1, 2, 3, 4}...), }, { name: "simple case (pushdata2 miss)", @@ -425,7 +435,9 @@ func TestRemoveOpcodeByData(t *testing.T) { bytes.Repeat([]byte{0}, 65532)...), []byte{1, 2, 3, 4}...), remove: []byte{1, 2, 3, 4}, - after: nil, + after: append(append([]byte{OP_PUSHDATA4, 0, 0, 1, 0}, + bytes.Repeat([]byte{0}, 65532)...), + []byte{1, 2, 3, 4}...), }, { name: "simple case (pushdata4 miss noncanonical)", @@ -465,16 +477,17 @@ func TestRemoveOpcodeByData(t *testing.T) { // tstRemoveOpcodeByData is a convenience function to ensure the provided // script parses before attempting to remove the passed data. const scriptVersion = 0 - tstRemoveOpcodeByData := func(script []byte, data []byte) ([]byte, error) { + tstRemoveOpcodeByData := func(script []byte, data []byte) ([]byte, bool, error) { if err := checkScriptParses(scriptVersion, script); err != nil { - return nil, err + return nil, false, err } - return removeOpcodeByData(script, data), nil + result, match := removeOpcodeByData(script, data) + return result, match, nil } for _, test := range tests { - result, err := tstRemoveOpcodeByData(test.before, test.remove) + result, _, err := tstRemoveOpcodeByData(test.before, test.remove) if e := tstCheckScriptError(err, test.err); e != nil { t.Errorf("%s: %v", test.name, e) continue diff --git a/txscript/sigvalidate.go b/txscript/sigvalidate.go index 0bd00c326d..3cfe792638 100644 --- a/txscript/sigvalidate.go +++ b/txscript/sigvalidate.go @@ -20,9 +20,14 @@ import ( // pre-segwit, segwit v0, segwit v1 (taproot key spend validation), and the // base tapscript verification. type signatureVerifier interface { - // Verify returns true if the signature verifier context deems the + // Verify returns whether or not the signature verifier context deems the // signature to be valid for the given context. - Verify() bool + Verify() verifyResult +} + +type verifyResult struct { + sigValid bool + sigMatch bool } // baseSigVerifier is used to verify signatures for the _base_ system, meaning @@ -147,20 +152,23 @@ func (b *baseSigVerifier) verifySig(sigHash []byte) bool { return valid } -// Verify returns true if the signature verifier context deems the signature to -// be valid for the given context. +// Verify returns whether or not the signature verifier context deems the +// signature to be valid for the given context. // // NOTE: This is part of the baseSigVerifier interface. -func (b *baseSigVerifier) Verify() bool { +func (b *baseSigVerifier) Verify() verifyResult { // Remove the signature since there is no way for a signature // to sign itself. - subScript := removeOpcodeByData(b.subScript, b.fullSigBytes) + subScript, match := removeOpcodeByData(b.subScript, b.fullSigBytes) sigHash := calcSignatureHash( subScript, b.hashType, &b.vm.tx, b.vm.txIdx, ) - return b.verifySig(sigHash) + return verifyResult{ + sigValid: b.verifySig(sigHash), + sigMatch: match, + } } // A compile-time assertion to ensure baseSigVerifier implements the @@ -192,7 +200,7 @@ func newBaseSegwitSigVerifier(pkBytes, fullSigBytes []byte, // be valid for the given context. // // NOTE: This is part of the baseSigVerifier interface. -func (s *baseSegwitSigVerifier) Verify() bool { +func (s *baseSegwitSigVerifier) Verify() verifyResult { var sigHashes *TxSigHashes if s.vm.hashCache != nil { sigHashes = s.vm.hashCache @@ -208,10 +216,12 @@ func (s *baseSegwitSigVerifier) Verify() bool { // TODO(roasbeef): this doesn't need to return an error, should // instead be further up the stack? this only returns an error // if the input index is greater than the number of inputs - return false + return verifyResult{} } - return s.verifySig(sigHash) + return verifyResult{ + sigValid: s.verifySig(sigHash), + } } // A compile-time assertion to ensure baseSegwitSigVerifier implements the @@ -331,7 +341,7 @@ func newTaprootSigVerifier(pkBytes []byte, fullSigBytes []byte, // key and signature, and the passed sigHash as the message digest. func (t *taprootSigVerifier) verifySig(sigHash []byte) bool { // At this point, we can check to see if this signature is already - // included in the sigCcahe and is valid or not (if one was passed in). + // included in the sigCache and is valid or not (if one was passed in). cacheKey, _ := chainhash.NewHash(sigHash) if t.sigCache != nil { if t.sigCache.Exists(*cacheKey, t.fullSigBytes, t.pkBytes) { @@ -356,11 +366,11 @@ func (t *taprootSigVerifier) verifySig(sigHash []byte) bool { return false } -// Verify returns true if the signature verifier context deems the signature to -// be valid for the given context. +// Verify returns whether or not the signature verifier context deems the +// signature to be valid for the given context. // // NOTE: This is part of the baseSigVerifier interface. -func (t *taprootSigVerifier) Verify() bool { +func (t *taprootSigVerifier) Verify() verifyResult { var opts []TaprootSigHashOption if t.annex != nil { opts = append(opts, WithAnnex(t.annex)) @@ -374,10 +384,12 @@ func (t *taprootSigVerifier) Verify() bool { ) if err != nil { // TODO(roasbeef): propagate the error here? - return false + return verifyResult{} } - return t.verifySig(sigHash) + return verifyResult{ + sigValid: t.verifySig(sigHash), + } } // A compile-time assertion to ensure taprootSigVerifier implements the @@ -385,7 +397,7 @@ func (t *taprootSigVerifier) Verify() bool { var _ signatureVerifier = (*taprootSigVerifier)(nil) // baseTapscriptSigVerifier verifies a signature for an input spending a -// tapscript leaf from the prevoous output. +// tapscript leaf from the previous output. type baseTapscriptSigVerifier struct { *taprootSigVerifier @@ -439,16 +451,18 @@ func newBaseTapscriptSigVerifier(pkBytes, rawSig []byte, } } -// Verify returns true if the signature verifier context deems the signature to -// be valid for the given context. +// Verify returns whether or not the signature verifier context deems the +// signature to be valid for the given context. // // NOTE: This is part of the baseSigVerifier interface. -func (b *baseTapscriptSigVerifier) Verify() bool { +func (b *baseTapscriptSigVerifier) Verify() verifyResult { // If the public key is blank, then that means it wasn't 0 or 32 bytes, // so we'll treat this as an unknown public key version and return - // true. + // that it's valid. if b.pubKey == nil { - return true + return verifyResult{ + sigValid: true, + } } var opts []TaprootSigHashOption @@ -468,10 +482,12 @@ func (b *baseTapscriptSigVerifier) Verify() bool { ) if err != nil { // TODO(roasbeef): propagate the error here? - return false + return verifyResult{} } - return b.verifySig(sigHash) + return verifyResult{ + sigValid: b.verifySig(sigHash), + } } // A compile-time assertion to ensure baseTapscriptSigVerifier implements the diff --git a/txscript/standard.go b/txscript/standard.go index 5ef2ad167f..809a900a2a 100644 --- a/txscript/standard.go +++ b/txscript/standard.go @@ -46,7 +46,8 @@ const ( ScriptVerifyTaproot | ScriptVerifyDiscourageUpgradeableTaprootVersion | ScriptVerifyDiscourageOpSuccess | - ScriptVerifyDiscourageUpgradeablePubkeyType + ScriptVerifyDiscourageUpgradeablePubkeyType | + ScriptVerifyConstScriptCode ) // ScriptClass is an enumeration for the list of standard types of script. @@ -153,7 +154,7 @@ func isPubKeyScript(script []byte) bool { // is a standard pay-to-pubkey-hash script. It will return nil otherwise. func extractPubKeyHash(script []byte) []byte { // A pay-to-pubkey-hash script is of the form: - // OP_DUP OP_HASH160 <20-byte hash> OP_EQUALVERIFY OP_CHECKSIG + // OP_DUP OP_HASH160 OP_DATA_20 <20-byte hash> OP_EQUALVERIFY OP_CHECKSIG if len(script) == 25 && script[0] == OP_DUP && script[1] == OP_HASH160 && @@ -181,7 +182,7 @@ func isPubKeyHashScript(script []byte) bool { // versions. func extractScriptHash(script []byte) []byte { // A pay-to-script-hash script is of the form: - // OP_HASH160 <20-byte scripthash> OP_EQUAL + // OP_HASH160 OP_DATA_20 <20-byte scripthash> OP_EQUAL if len(script) == 23 && script[0] == OP_HASH160 && script[1] == OP_DATA_20 && diff --git a/txscript/standard_test.go b/txscript/standard_test.go index 283e2ccb7b..4993a65260 100644 --- a/txscript/standard_test.go +++ b/txscript/standard_test.go @@ -884,7 +884,7 @@ func TestMultiSigScript(t *testing.T) { } } -// TestCalcMultiSigStats ensures the CalcMutliSigStats function returns the +// TestCalcMultiSigStats ensures the CalcMultiSigStats function returns the // expected errors. func TestCalcMultiSigStats(t *testing.T) { t.Parallel() diff --git a/txscript/taproot.go b/txscript/taproot.go index 003eb19ae3..ee26cae967 100644 --- a/txscript/taproot.go +++ b/txscript/taproot.go @@ -65,7 +65,7 @@ func VerifyTaprootKeySpend(witnessProgram []byte, rawSig []byte, tx *wire.MsgTx, // program. rawKey := witnessProgram - // Extract the annex if it exists, so we can compute the proper proper + // Extract the annex if it exists, so we can compute the proper // sighash below. var annex []byte witness := tx.TxIn[inputIndex].Witness @@ -84,8 +84,8 @@ func VerifyTaprootKeySpend(witnessProgram []byte, rawSig []byte, tx *wire.MsgTx, return err } - valid := keySpendVerifier.Verify() - if valid { + result := keySpendVerifier.Verify() + if result.sigValid { return nil } @@ -255,7 +255,7 @@ func ComputeTaprootOutputKey(pubKey *btcec.PublicKey, scriptRoot, ) - // With the tap tweek computed, we'll need to convert the merkle root + // With the tap tweak computed, we'll need to convert the merkle root // into something in the domain we can manipulate: a scalar value mod // N. var tweakScalar btcec.ModNScalar diff --git a/txscript/taproot_test.go b/txscript/taproot_test.go index 01b3780e9c..9c5bb573a4 100644 --- a/txscript/taproot_test.go +++ b/txscript/taproot_test.go @@ -224,7 +224,7 @@ func TestTaprootTweakNoMutation(t *testing.T) { return false } - // We shuold be able to re-derive the private key from raw + // We should be able to re-derive the private key from raw // bytes and have that match up again. privKeyCopy, _ := btcec.PrivKeyFromBytes(privBytes[:]) if *privKey != *privKeyCopy { diff --git a/version.go b/version.go index d7835910f8..e02315d434 100644 --- a/version.go +++ b/version.go @@ -18,7 +18,7 @@ const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqr const ( appMajor uint = 0 appMinor uint = 24 - appPatch uint = 0 + appPatch uint = 2 // appPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. diff --git a/wire/bench_test.go b/wire/bench_test.go index d19dd775f2..2f63fa30a6 100644 --- a/wire/bench_test.go +++ b/wire/bench_test.go @@ -549,7 +549,7 @@ func BenchmarkDeserializeTxSmall(b *testing.B) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash - 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0xff, 0xff, 0xff, 0xff, // Previous output index 0x07, // Varint for length of signature script 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script 0xff, 0xff, 0xff, 0xff, // Sequence @@ -671,7 +671,7 @@ func BenchmarkSerializeTxSmall(b *testing.B) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // // Previous output hash - 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0xff, 0xff, 0xff, 0xff, // Previous output index 0x07, // Varint for length of signature script 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script 0xff, 0xff, 0xff, 0xff, // Sequence diff --git a/wire/fixedIO_test.go b/wire/fixedIO_test.go index ccd67ae411..0952a9b476 100644 --- a/wire/fixedIO_test.go +++ b/wire/fixedIO_test.go @@ -9,7 +9,7 @@ import ( "io" ) -// fixedWriter implements the io.Writer interface and intentially allows +// fixedWriter implements the io.Writer interface and intentionally allows // testing of error paths by forcing short writes. type fixedWriter struct { b []byte @@ -44,7 +44,7 @@ func newFixedWriter(max int) io.Writer { return &fw } -// fixedReader implements the io.Reader interface and intentially allows +// fixedReader implements the io.Reader interface and intentionally allows // testing of error paths by forcing short reads. type fixedReader struct { buf []byte diff --git a/wire/msgalert.go b/wire/msgalert.go index 71c4e220fe..b99ac89de9 100644 --- a/wire/msgalert.go +++ b/wire/msgalert.go @@ -83,7 +83,7 @@ const maxAlertSize = MaxMessagePayload - maxSignatureSize - MaxVarIntPayload - 1 // fit into a maximum size alert. // // maxAlertSize = fixedAlertSize + max(SetCancel) + max(SetSubVer) + 3*(string) -// for caculating maximum number of cancel IDs, set all other var sizes to 0 +// for calculating maximum number of cancel IDs, set all other var sizes to 0 // maxAlertSize = fixedAlertSize + (MaxVarIntPayload-1) + x*sizeOf(int32) // x = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 4 const maxCountSetCancel = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / 4 @@ -92,7 +92,7 @@ const maxCountSetCancel = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) // fit into a maximum size alert. // // maxAlertSize = fixedAlertSize + max(SetCancel) + max(SetSubVer) + 3*(string) -// for caculating maximum number of subversions, set all other var sizes to 0 +// for calculating maximum number of subversions, set all other var sizes to 0 // maxAlertSize = fixedAlertSize + (MaxVarIntPayload-1) + x*sizeOf(string) // x = (maxAlertSize - fixedAlertSize - MaxVarIntPayload + 1) / sizeOf(string) // subversion would typically be something like "/Satoshi:0.7.2/" (15 bytes) diff --git a/wire/msgblock.go b/wire/msgblock.go index 77585e3fb6..59dbbb1c06 100644 --- a/wire/msgblock.go +++ b/wire/msgblock.go @@ -45,6 +45,20 @@ type MsgBlock struct { Transactions []*MsgTx } +// Copy creates a deep copy of MsgBlock. +func (msg *MsgBlock) Copy() *MsgBlock { + block := &MsgBlock{ + Header: msg.Header, + Transactions: make([]*MsgTx, len(msg.Transactions)), + } + + for i, tx := range msg.Transactions { + block.Transactions[i] = tx.Copy() + } + + return block +} + // AddTransaction adds a transaction to the message. func (msg *MsgBlock) AddTransaction(tx *MsgTx) error { msg.Transactions = append(msg.Transactions, tx) @@ -231,7 +245,7 @@ func (msg *MsgBlock) Serialize(w io.Writer) error { // SerializeNoWitness encodes a block to w using an identical format to // Serialize, with all (if any) witness data stripped from all transactions. -// This method is provided in additon to the regular Serialize, in order to +// This method is provided in addition to the regular Serialize, in order to // allow one to selectively encode transaction witness data to non-upgraded // peers which are unaware of the new encoding. func (msg *MsgBlock) SerializeNoWitness(w io.Writer) error { diff --git a/wire/msgblock_test.go b/wire/msgblock_test.go index 2a861b208b..f0e938697c 100644 --- a/wire/msgblock_test.go +++ b/wire/msgblock_test.go @@ -562,7 +562,7 @@ var blockOneBytes = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash - 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0xff, 0xff, 0xff, 0xff, // Previous output index 0x07, // Varint for length of signature script 0x04, 0xff, 0xff, 0x00, 0x1d, 0x01, 0x04, // Signature script (coinbase) 0xff, 0xff, 0xff, 0xff, // Sequence diff --git a/wire/msggetheaders.go b/wire/msggetheaders.go index f49e4c0dd4..38e5c6bfcd 100644 --- a/wire/msggetheaders.go +++ b/wire/msggetheaders.go @@ -23,7 +23,7 @@ import ( // // The algorithm for building the block locator hashes should be to add the // hashes in reverse order until you reach the genesis block. In order to keep -// the list of locator hashes to a resonable number of entries, first add the +// the list of locator hashes to a reasonable number of entries, first add the // most recent 10 block hashes, then double the step each loop iteration to // exponentially decrease the number of hashes the further away from head and // closer to the genesis block you get. diff --git a/wire/msgtx.go b/wire/msgtx.go index eab265c35d..1864ec6e36 100644 --- a/wire/msgtx.go +++ b/wire/msgtx.go @@ -5,6 +5,7 @@ package wire import ( + "encoding/hex" "errors" "fmt" "io" @@ -111,6 +112,14 @@ const ( maxWitnessItemSize = 4_000_000 ) +var ( + // errSuperfluousWitnessRecord is returned during tx deserialization when + // a tx has the witness marker flag set but has no witnesses. + errSuperfluousWitnessRecord = fmt.Errorf( + "witness flag set but tx has no witnesses", + ) +) + // TxFlagMarker is the first byte of the FLAG field in a bitcoin tx // message. It allows decoders to distinguish a regular serialized // transaction from one that would require a different parsing logic. @@ -302,6 +311,22 @@ func (t TxWitness) SerializeSize() int { return n } +// ToHexStrings formats the witness stack as a slice of hex-encoded strings. +func (t TxWitness) ToHexStrings() []string { + // Ensure nil is returned when there are no entries versus an empty + // slice so it can properly be omitted as necessary. + if len(t) == 0 { + return nil + } + + result := make([]string, len(t)) + for idx, wit := range t { + result[idx] = hex.EncodeToString(wit) + } + + return result +} + // TxOut defines a bitcoin transaction output. type TxOut struct { Value int64 @@ -353,6 +378,11 @@ func (msg *MsgTx) TxHash() chainhash.Hash { return chainhash.DoubleHashRaw(msg.SerializeNoWitness) } +// TxID generates the transaction ID of the transaction. +func (msg *MsgTx) TxID() string { + return msg.TxHash().String() +} + // WitnessHash generates the hash of the transaction serialized according to // the new witness serialization defined in BIP0141 and BIP0144. The final // output is used within the Segregated Witness commitment of all the witnesses @@ -579,8 +609,7 @@ func (msg *MsgTx) btcDecode(r io.Reader, pver uint32, enc MessageEncoding, txin.Witness = make([][]byte, witCount) for j := uint64(0); j < witCount; j++ { txin.Witness[j], err = readScriptBuf( - r, pver, buf, sbuf, maxWitnessItemSize, - "script witness item", + r, pver, buf, sbuf, "script witness item", ) if err != nil { return err @@ -589,6 +618,12 @@ func (msg *MsgTx) btcDecode(r io.Reader, pver uint32, enc MessageEncoding, sbuf = sbuf[len(txin.Witness[j]):] } } + + // Check that if the witness flag is set that we actually have + // witnesses. This check is also done by bitcoind. + if !msg.HasWitness() { + return errSuperfluousWitnessRecord + } } if _, err := io.ReadFull(r, buf[:4]); err != nil { @@ -982,7 +1017,7 @@ func writeOutPointBuf(w io.Writer, pver uint32, version int32, op *OutPoint, // // NOTE: b MUST either be nil or at least an 8-byte slice. func readScriptBuf(r io.Reader, pver uint32, buf, s []byte, - maxAllowed uint32, fieldName string) ([]byte, error) { + fieldName string) ([]byte, error) { count, err := ReadVarIntBuf(r, pver, buf) if err != nil { @@ -992,9 +1027,9 @@ func readScriptBuf(r io.Reader, pver uint32, buf, s []byte, // Prevent byte array larger than the max message size. It would // be possible to cause memory exhaustion and panics without a sane // upper bound on this count. - if count > uint64(maxAllowed) { + if count > maxWitnessItemSize { str := fmt.Sprintf("%s is larger than the max allowed size "+ - "[count %d, max %d]", fieldName, count, maxAllowed) + "[count %d, max %d]", fieldName, count, maxWitnessItemSize) return nil, messageError("readScript", str) } @@ -1021,8 +1056,9 @@ func readTxInBuf(r io.Reader, pver uint32, version int32, ti *TxIn, return err } - ti.SignatureScript, err = readScriptBuf(r, pver, buf, s, MaxMessagePayload, - "transaction input signature script") + ti.SignatureScript, err = readScriptBuf( + r, pver, buf, s, "transaction input signature script", + ) if err != nil { return err } @@ -1085,8 +1121,7 @@ func readTxOutBuf(r io.Reader, pver uint32, version int32, to *TxOut, to.Value = int64(littleEndian.Uint64(buf)) to.PkScript, err = readScriptBuf( - r, pver, buf, s, MaxMessagePayload, - "transaction output public key script", + r, pver, buf, s, "transaction output public key script", ) return err } diff --git a/wire/msgtx_test.go b/wire/msgtx_test.go index 5ec753b62d..e6b7bae44c 100644 --- a/wire/msgtx_test.go +++ b/wire/msgtx_test.go @@ -6,6 +6,7 @@ package wire import ( "bytes" + "errors" "fmt" "io" "reflect" @@ -672,7 +673,7 @@ func TestTxOverflowErrors(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash - 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0xff, 0xff, 0xff, 0xff, // Previous output index 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // Varint for length of signature script }, pver, BaseEncoding, txVer, &MessageError{}, @@ -688,7 +689,7 @@ func TestTxOverflowErrors(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash - 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0xff, 0xff, 0xff, 0xff, // Previous output index 0x00, // Varint for length of signature script 0xff, 0xff, 0xff, 0xff, // Sequence 0x01, // Varint for number of output transactions @@ -733,7 +734,7 @@ func TestTxSerializeSizeStripped(t *testing.T) { in *MsgTx // Tx to encode size int // Expected serialized size }{ - // No inputs or outpus. + // No inputs or outputs. {noTx, 10}, // Transcaction with an input and an output. @@ -756,6 +757,34 @@ func TestTxSerializeSizeStripped(t *testing.T) { } } +// TestTxID performs tests to ensure the serialize size for various transactions +// is accurate. +func TestTxID(t *testing.T) { + // Empty tx message. + noTx := NewMsgTx(1) + noTx.Version = 1 + + tests := []struct { + in *MsgTx // Tx to encode. + txid string // Expected transaction ID. + }{ + // No inputs or outputs. + {noTx, "d21633ba23f70118185227be58a63527675641ad37967e2aa461559f577aec43"}, + + // Transaction with an input and an output. + {multiTx, "0100d15a522ff38de05c164ca0a56379a1b77dd1e4805a6534dc9b3d88290e9d"}, + + // Transaction with an input which includes witness data, and + // one output. + {multiWitnessTx, "0f167d1385a84d1518cfee208b653fc9163b605ccf1b75347e2850b3e2eb19f3"}, + } + + for i, test := range tests { + txid := test.in.TxID() + require.Equal(t, test.txid, txid, "test #%d", i) + } +} + // TestTxWitnessSize performs tests to ensure that the serialized size for // various types of transactions that include witness data is accurate. func TestTxWitnessSize(t *testing.T) { @@ -849,6 +878,17 @@ func TestTxOutPointFromString(t *testing.T) { } } +// TestTxSuperfluousWitnessRecord ensures that btcd fails to parse a tx with +// the witness marker flag set but without any actual witnesses. +func TestTxSuperfluousWitnessRecord(t *testing.T) { + m := &MsgTx{} + rbuf := bytes.NewReader(multiWitnessFlagNoWitness) + err := m.BtcDecode(rbuf, ProtocolVersion, WitnessEncoding) + if !errors.Is(err, errSuperfluousWitnessRecord) { + t.Fatalf("should have failed with %v", errSuperfluousWitnessRecord) + } +} + // multiTx is a MsgTx with an input and output and used in various tests. var multiTx = &MsgTx{ Version: 1, @@ -910,7 +950,7 @@ var multiTxEncoded = []byte{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash - 0xff, 0xff, 0xff, 0xff, // Prevous output index + 0xff, 0xff, 0xff, 0xff, // Previous output index 0x07, // Varint for length of signature script 0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62, // Signature script 0xff, 0xff, 0xff, 0xff, // Sequence @@ -1001,6 +1041,33 @@ var multiWitnessTx = &MsgTx{ }, } +// multiWitnessFlagNoWitness is the wire encoded bytes for multiWitnessTx with +// the witness flag set but with witnesses omitted. +var multiWitnessFlagNoWitness = []byte{ + 0x1, 0x0, 0x0, 0x0, // Version + TxFlagMarker, // Marker byte indicating 0 inputs, or a segwit encoded tx + WitnessFlag, // Flag byte + 0x1, // Varint for number of inputs + 0xa5, 0x33, 0x52, 0xd5, 0x13, 0x57, 0x66, 0xf0, + 0x30, 0x76, 0x59, 0x74, 0x18, 0x26, 0x3d, 0xa2, + 0xd9, 0xc9, 0x58, 0x31, 0x59, 0x68, 0xfe, 0xa8, + 0x23, 0x52, 0x94, 0x67, 0x48, 0x1f, 0xf9, 0xcd, // Previous output hash + 0x13, 0x0, 0x0, 0x0, // Little endian previous output index + 0x0, // No sig script (this is a witness input) + 0xff, 0xff, 0xff, 0xff, // Sequence + 0x1, // Varint for number of outputs + 0xb, 0x7, 0x6, 0x0, 0x0, 0x0, 0x0, 0x0, // Output amount + 0x16, // Varint for length of pk script + 0x0, // Version 0 witness program + 0x14, // OP_DATA_20 + 0x9d, 0xda, 0xc6, 0xf3, 0x9d, 0x51, 0xe0, 0x39, + 0x8e, 0x53, 0x2a, 0x22, 0xc4, 0x1b, 0xa1, 0x89, + 0x40, 0x6a, 0x85, 0x23, // 20-byte pub key hash + 0x00, // No item on the witness stack for the first input + 0x00, // No item on the witness stack for the second input + 0x0, 0x0, 0x0, 0x0, // Lock time +} + // multiWitnessTxEncoded is the wire encoded bytes for multiWitnessTx including inputs // with witness data using protocol version 70012 and is used in the various // tests. diff --git a/wire/msgversion.go b/wire/msgversion.go index 3077f12760..957bae395a 100644 --- a/wire/msgversion.go +++ b/wire/msgversion.go @@ -46,7 +46,7 @@ type MsgVersion struct { // connections. Nonce uint64 - // The user agent that generated messsage. This is a encoded as a varString + // The user agent that generated message. This is a encoded as a varString // on the wire. This has a max length of MaxUserAgentLen. UserAgent string