diff --git a/contrib/linearize/example-linearize.cfg b/contrib/linearize/example-linearize.cfg index 3082d42964..e89f57f4c2 100644 --- a/contrib/linearize/example-linearize.cfg +++ b/contrib/linearize/example-linearize.cfg @@ -19,8 +19,8 @@ max_height=313000 # bootstrap.dat input/output settings (linearize-data) # mainnet -netmagic=f9beb4d9 -genesis=000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f +netmagic=43524f57 +genesis=0b2c703dc93bb63a36c4e33b85be4855ddbca2ac951a7a0a29b8de0408200a3c input=/home/example/.raven/blocks # testnet diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py index 53b80342eb..02facde42c 100755 --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -13,8 +13,8 @@ import re import os import os.path +import subprocess import sys -import hashlib import datetime import time from collections import namedtuple @@ -49,23 +49,11 @@ def wordreverse(in_buf): out_words.reverse() return b''.join(out_words) -def calc_hdr_hash(blk_hdr): - hash1 = hashlib.sha256() - hash1.update(blk_hdr) - hash1_o = hash1.digest() - - hash2 = hashlib.sha256() - hash2.update(hash1_o) - hash2_o = hash2.digest() - - return hash2_o - def calc_hash_str(blk_hdr): - hash = calc_hdr_hash(blk_hdr) - hash = bufreverse(hash) - hash = wordreverse(hash) - hash_str = hexlify(hash).decode('utf-8') - return hash_str + x16r_hash_cmd = os.path.dirname(os.path.realpath(__file__)) + "/../../src/test/test_raven_hash" + cmd = [x16r_hash_cmd, hexlify(blk_hdr).decode('utf-8'), "2"] + blk_hash = subprocess.run(cmd, stdout=subprocess.PIPE, check=True).stdout.decode('ascii') + return blk_hash def get_blk_dt(blk_hdr): members = struct.unpack(" #include #include @@ -39,6 +40,37 @@ static RPCTimerInterface* timerInterface = nullptr; /* Map of name to timer. */ static std::map > deadlineTimers; +struct RPCCommandExecutionInfo +{ + std::string method; + int64_t start; +}; + +struct RPCServerInfo +{ + std::mutex mtx; + std::list active_commands GUARDED_BY(mtx); +}; + +static RPCServerInfo g_rpc_server_info; + +struct RPCCommandExecution +{ + std::list::iterator it; + explicit RPCCommandExecution(const std::string& method) + { + g_rpc_server_info.mtx.lock(); + it = g_rpc_server_info.active_commands.insert(g_rpc_server_info.active_commands.end(), {method, GetTimeMicros()}); + g_rpc_server_info.mtx.unlock(); + } + ~RPCCommandExecution() + { + g_rpc_server_info.mtx.lock(); + g_rpc_server_info.active_commands.erase(it); + g_rpc_server_info.mtx.unlock(); + } +}; + static struct CRPCSignals { boost::signals2::signal Started; @@ -263,6 +295,44 @@ UniValue uptime(const JSONRPCRequest& jsonRequest) return GetTime() - GetStartupTime(); } +UniValue getrpcinfo(const JSONRPCRequest& jsonRequest) +{ + + if (jsonRequest.fHelp || jsonRequest.params.size() > 0) + throw std::runtime_error( + "getrpcinfo\n" + "Returns details of the RPC server.\n" + "\nResult:\n" + "{\n" + " \"active_commands\" (array) All active commands\n" + " [\n" + " { (object) Information about an active command\n" + " \"method\" (string) The name of the RPC command \n" + " \"duration\" (numeric) The running time in microseconds\n" + " },...\n" + " ],\n" + "}\n" + + HelpExampleCli("getrpcinfo", "") + + HelpExampleRpc("getrpcinfo", "") + ); + + g_rpc_server_info.mtx.lock(); + UniValue active_commands(UniValue::VARR); + for (const RPCCommandExecutionInfo& info : g_rpc_server_info.active_commands) { + UniValue entry(UniValue::VOBJ); + entry.pushKV("method", info.method); + entry.pushKV("duration", GetTimeMicros() - info.start); + active_commands.push_back(entry); + } + + UniValue result(UniValue::VOBJ); + result.pushKV("active_commands", active_commands); + g_rpc_server_info.mtx.unlock(); + + return result; +} + + /** * Call Table */ @@ -270,6 +340,7 @@ static const CRPCCommand vRPCCommands[] = { // category name actor (function) argNames // --------------------- ------------------------ ----------------------- ---------- /* Overall control/query calls */ + { "control", "getrpcinfo", &getrpcinfo, {} }, { "control", "help", &help, {"command"} }, { "control", "stop", &stop, {} }, { "control", "uptime", &uptime, {} }, @@ -496,6 +567,7 @@ UniValue CRPCTable::execute(const JSONRPCRequest &request) const try { + RPCCommandExecution execution(request.strMethod); // Execute, convert arguments to array if necessary if (request.params.isObject()) { return pcmd->actor(transformNamedArguments(request, pcmd->argNames)); diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index 0186f5f20e..c1990df3de 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -1502,9 +1502,9 @@ BOOST_FIXTURE_TEST_SUITE(script_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(script_FindAndDelete_test) { - BOOST_TEST_MESSAGE("Running script FindAndDelete Test"); + BOOST_TEST_MESSAGE("Running script find_and_delete Test"); - // Exercise the FindAndDelete functionality + // Exercise the find_and_delete functionality CScript s; CScript d; CScript expect; @@ -1541,7 +1541,7 @@ BOOST_FIXTURE_TEST_SUITE(script_tests, BasicTestingSetup) s = ScriptFromHex("0302ff030302ff03"); d = ScriptFromHex("02"); - expect = s; // FindAndDelete matches entire opcodes + expect = s; // find_and_delete matches entire opcodes BOOST_CHECK_EQUAL(s.FindAndDelete(d), 0); BOOST_CHECK(s == expect); @@ -1586,13 +1586,13 @@ BOOST_FIXTURE_TEST_SUITE(script_tests, BasicTestingSetup) s = CScript() << OP_0 << OP_0 << OP_1 << OP_1; d = CScript() << OP_0 << OP_1; - expect = CScript() << OP_0 << OP_1; // FindAndDelete is single-pass + expect = CScript() << OP_0 << OP_1; // find_and_delete is single-pass BOOST_CHECK_EQUAL(s.FindAndDelete(d), 1); BOOST_CHECK(s == expect); s = CScript() << OP_0 << OP_0 << OP_1 << OP_0 << OP_1 << OP_1; d = CScript() << OP_0 << OP_1; - expect = CScript() << OP_0 << OP_1; // FindAndDelete is single-pass + expect = CScript() << OP_0 << OP_1; // find_and_delete is single-pass BOOST_CHECK_EQUAL(s.FindAndDelete(d), 2); BOOST_CHECK(s == expect); diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index 65506cdc1b..d04453f906 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -24,7 +24,7 @@ extern UniValue read_json(const std::string &jsondata); -// Old script.cpp SignatureHash function +// Old script.cpp signature_hash function uint256 static SignatureHashOld(CScript scriptCode, const CTransaction &txTo, unsigned int nIn, int nHashType) { static const uint256 one(uint256S("0000000000000000000000000000000000000000000000000000000000000001")); @@ -168,7 +168,7 @@ BOOST_FIXTURE_TEST_SUITE(sighash_tests, BasicTestingSetup) #endif } - // Goal: check that SignatureHash generates correct hash + // Goal: check that signature_hash generates correct hash BOOST_AUTO_TEST_CASE(sighash_from_data_test) { BOOST_TEST_MESSAGE("Running SigHas From Data Test"); diff --git a/src/test/sigopcount_tests.cpp b/src/test/sigopcount_tests.cpp index b8e334e3c9..86703265a7 100644 --- a/src/test/sigopcount_tests.cpp +++ b/src/test/sigopcount_tests.cpp @@ -28,9 +28,9 @@ BOOST_FIXTURE_TEST_SUITE(sigopcount_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(GetSigOpCount_test) { - BOOST_TEST_MESSAGE("Running GetSigOpCount Test"); + BOOST_TEST_MESSAGE("Running get_sig_op_count Test"); - // Test CScript::GetSigOpCount() + // Test CScript::get_sig_op_count() CScript s1; BOOST_CHECK_EQUAL(s1.GetSigOpCount(false), 0U); BOOST_CHECK_EQUAL(s1.GetSigOpCount(true), 0U); diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py index 9eb58f4958..03203ada6c 100755 --- a/test/functional/combine_logs.py +++ b/test/functional/combine_logs.py @@ -3,13 +3,16 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Combine logs from multiple raven nodes as well as the test_framework log. + +""" +Combine logs from multiple raven nodes as well as the test_framework log. This streams the combined log output to stdout. Use combine_logs.py > outputfile -to write to an outputfile.""" +to write to an outputfile. +""" import argparse -from collections import (defaultdict, namedtuple) +from collections import defaultdict, namedtuple import heapq import itertools import os @@ -89,6 +92,8 @@ def get_log_events(source, logfile): except FileNotFoundError: print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr) + +# noinspection PyProtectedMember def print_logs(log_events, color=False, html=False): """Renders the iterator of log events into text or html.""" if not html: diff --git a/test/functional/create_cache.py b/test/functional/create_cache.py index 5e6454d66b..917ada3c54 100755 --- a/test/functional/create_cache.py +++ b/test/functional/create_cache.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Create a blockchain cache. + +""" +Create a blockchain cache. Creating a cache of the blockchain speeds up test execution when running multiple functional tests. This helper script is executed by test_runner when multiple diff --git a/test/functional/example_test.py b/test/functional/example_test.py index fc2b846dff..69ae43dc18 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -3,35 +3,25 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""An example functional test + +""" +An example functional test The module-level docstring should include a high-level description of what the test is doing. It's the first thing people see when they open the file and should give the reader information about *what* the test is testing and *how* it's being tested """ + # Imports should be in PEP8 ordering (std library first, then third party # libraries then local imports). + from collections import defaultdict # Avoid wildcard * imports if possible -from test_framework.blocktools import (create_block, create_coinbase) -from test_framework.mininode import ( - CInv, - NetworkThread, - NodeConn, - NodeConnCB, - mininode_lock, - msg_block, - msg_getdata, -) +from test_framework.mininode import CInv, NetworkThread, NodeConn, NodeConnCB, mininode_lock, MsgGetdata from test_framework.test_framework import RavenTestFramework -from test_framework.util import ( - assert_equal, - connect_nodes, - p2p_port, - wait_until, -) +from test_framework.util import assert_equal, connect_nodes, p2p_port # NodeConnCB is a class containing callbacks to be executed when a P2P # message is received from the node-under-test. Subclass NodeConnCB and @@ -40,7 +30,7 @@ class BaseNode(NodeConnCB): def __init__(self): """Initialize the NodeConnCB - Used to inialize custom properties for the Node that aren't + Used to initialize custom properties for the Node that aren't included by default in the base class. Be aware that the NodeConnCB base class already stores a counter for each P2P message type and the last received message of each type, which should be sufficient for the @@ -56,8 +46,8 @@ def on_block(self, conn, message): """Override the standard on_block callback Store the hash of a received block in the dictionary.""" - message.block.calc_sha256() - self.block_receive_map[message.block.sha256] += 1 + message.block.calc_x16r() + self.block_receive_map[message.block.calc_x16r] += 1 def on_inv(self, conn, message): """Override the standard on_inv callback""" @@ -80,7 +70,7 @@ class ExampleTest(RavenTestFramework): def set_test_params(self): """Override test parameters for your individual test. - This method must be overridden and num_nodes must be exlicitly set.""" + This method must be overridden and num_nodes must be explicitly set.""" self.setup_clean_chain = True self.num_nodes = 3 # Use self.extra_args to change command-line arguments for the nodes @@ -136,8 +126,7 @@ def run_test(self): # Create a P2P connection to one of the nodes node0 = BaseNode() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)] node0.add_connection(connections[0]) # Start up network handling in another thread. This needs to be called @@ -171,21 +160,7 @@ def run_test(self): self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1 - height = 1 - - for i in range(10): - # Use the mininode and blocktools functionality to manually build a block - # Calling the generate() rpc is easier, but this allows us to exactly - # control the blocks and transactions. - block = create_block(self.tip, create_coinbase(height), self.block_time) - block.solve() - block_message = msg_block(block) - # Send message is used to send a P2P message to the node over our NodeConn connection - node0.send_message(block_message) - self.tip = block.sha256 - blocks.append(self.tip) - self.block_time += 1 - height += 1 + self.nodes[0].generate(10) self.log.info("Wait for node1 to reach current tip (height 11) using RPC") self.nodes[1].waitforblockheight(11) @@ -201,14 +176,11 @@ def run_test(self): self.log.info("Wait for node2 reach current tip. Test that it has propagated all the blocks to us") - getdata_request = msg_getdata() + getdata_request = MsgGetdata() for block in blocks: getdata_request.inv.append(CInv(2, block)) node2.send_message(getdata_request) - - # wait_until() will loop until a predicate condition is met. Use it to test properties of the - # NodeConnCB objects. - wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5, lock=mininode_lock) + self.sync_all([self.nodes[1:2]]) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving diff --git a/test/functional/feature_assets.py b/test/functional/feature_assets.py index 482df7ec08..fa065b36f5 100755 --- a/test/functional/feature_assets.py +++ b/test/functional/feature_assets.py @@ -3,12 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Testing asset use cases -""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_is_hash_string, assert_does_not_contain_key, assert_raises_rpc_error, JSONRPCException, Decimal) +"""Testing asset use cases""" +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, assert_is_hash_string, assert_does_not_contain_key, assert_raises_rpc_error, JSONRPCException, Decimal import string @@ -35,7 +34,7 @@ def big_test(self): self.log.info("Calling issue()...") address0 = n0.getnewaddress() ipfs_hash = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8" - n0.issue(asset_name="MY_ASSET", qty=1000, to_address=address0, change_address="", \ + n0.issue(asset_name="MY_ASSET", qty=1000, to_address=address0, change_address="", units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) self.log.info("Waiting for ten confirmations after issue...") @@ -62,8 +61,7 @@ def big_test(self): assert_equal(len(myassets["MY_ASSET"]["outpoints"]), 1) assert_equal(len(myassets["MY_ASSET!"]["outpoints"]), 1) assert_is_hash_string(myassets["MY_ASSET"]["outpoints"][0]["txid"]) - assert_equal(myassets["MY_ASSET"]["outpoints"][0]["txid"], \ - myassets["MY_ASSET!"]["outpoints"][0]["txid"]) + assert_equal(myassets["MY_ASSET"]["outpoints"][0]["txid"], myassets["MY_ASSET!"]["outpoints"][0]["txid"]) assert(int(myassets["MY_ASSET"]["outpoints"][0]["vout"]) >= 0) assert(int(myassets["MY_ASSET!"]["outpoints"][0]["vout"]) >= 0) assert_equal(myassets["MY_ASSET"]["outpoints"][0]["amount"], 1000) @@ -103,10 +101,10 @@ def big_test(self): assert_equal(sum(n0.listaddressesbyasset("MY_ASSET").values()), 1000) assert_equal(sum(n1.listaddressesbyasset("MY_ASSET").values()), 1000) for assaddr in n0.listaddressesbyasset("MY_ASSET").keys(): - if n0.validateaddress(assaddr)["ismine"] == True: + if n0.validateaddress(assaddr)["ismine"]: changeaddress = assaddr assert_equal(n0.listassetbalancesbyaddress(changeaddress)["MY_ASSET"], 800) - assert(changeaddress != None) + assert(changeaddress is not None) assert_equal(n0.listassetbalancesbyaddress(address0)["MY_ASSET!"], 1) self.log.info("Burning all units to test reissue on zero units...") @@ -117,8 +115,7 @@ def big_test(self): self.log.info("Calling reissue()...") address1 = n0.getnewaddress() ipfs_hash2 = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8" - n0.reissue(asset_name="MY_ASSET", qty=2000, to_address=address0, change_address=address1, \ - reissuable=False, new_unit=-1, new_ipfs=ipfs_hash2) + n0.reissue(asset_name="MY_ASSET", qty=2000, to_address=address0, change_address=address1, reissuable=False, new_unit=-1, new_ipfs=ipfs_hash2) self.log.info("Waiting for ten confirmations after reissue...") self.sync_all() @@ -147,8 +144,7 @@ def big_test(self): n0.listassets(asset="RAVEN*", verbose=False, count=2, start=-2) self.log.info("Creating some sub-assets...") - n0.issue(asset_name="MY_ASSET/SUB1", qty=1000, to_address=address0, change_address=address0,\ - units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) + n0.issue(asset_name="MY_ASSET/SUB1", qty=1000, to_address=address0, change_address=address0, units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) self.sync_all() self.log.info("Waiting for ten confirmations after issuesubasset...") @@ -175,22 +171,16 @@ def issue_param_checks(self): n0 = self.nodes[0] # just plain bad asset name - assert_raises_rpc_error(-8, "Invalid asset name: bad-asset-name", \ - n0.issue, "bad-asset-name") + assert_raises_rpc_error(-8, "Invalid asset name: bad-asset-name", n0.issue, "bad-asset-name") # trying to issue things that can't be issued - assert_raises_rpc_error(-8, "Unsupported asset type: OWNER", \ - n0.issue, "AN_OWNER!") - assert_raises_rpc_error(-8, "Unsupported asset type: VOTE", \ - n0.issue, "A_VOTE^PEDRO") + assert_raises_rpc_error(-8, "Unsupported asset type: OWNER", n0.issue, "AN_OWNER!") + assert_raises_rpc_error(-8, "Unsupported asset type: VOTE", n0.issue, "A_VOTE^PEDRO") # check bad unique params - assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", \ - n0.issue, "A_UNIQUE#ASSET", 2) - assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", \ - n0.issue, "A_UNIQUE#ASSET", 1, "", "", 1) - assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", \ - n0.issue, "A_UNIQUE#ASSET", 1, "", "", 0, True) + assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", n0.issue, "A_UNIQUE#ASSET", 2) + assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", n0.issue, "A_UNIQUE#ASSET", 1, "", "", 1) + assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", n0.issue, "A_UNIQUE#ASSET", 1, "", "", 0, True) def chain_assets(self): self.log.info("Issuing chained assets in depth issue()...") @@ -199,15 +189,13 @@ def chain_assets(self): chain_address = n0.getnewaddress() ipfs_hash = "QmacSRmrkVmvJfbCpmU6pK72furJ8E8fbKHindrLxmYMQo" chain_string = "CHAIN1" - n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", \ - units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) + n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) for c in string.ascii_uppercase: chain_string += '/' + c if len(chain_string) > 30: break - n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", \ - units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) + n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) n0.generate(1) self.sync_all() @@ -218,14 +206,12 @@ def chain_assets(self): self.log.info("Issuing chained assets in width issue()...") chain_address = n0.getnewaddress() chain_string = "CHAIN2" - n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", \ - units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) + n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) for c in string.ascii_uppercase: asset_name = chain_string + '/' + c - n0.issue(asset_name=asset_name, qty=1000, to_address=chain_address, change_address="", \ - units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) + n0.issue(asset_name=asset_name, qty=1000, to_address=chain_address, change_address="", units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) n0.generate(1) self.sync_all() @@ -235,21 +221,18 @@ def chain_assets(self): self.log.info("Chaining reissue transactions...") address0 = n0.getnewaddress() - n0.issue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n0.issue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", units=4, reissuable=True, has_ipfs=False) n0.generate(1) self.sync_all() - n0.reissue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", \ - reissuable=True) + n0.reissue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", reissuable=True) assert_raises_rpc_error(-4, "Error: The transaction was rejected! Reason given: bad-tx-reissue-chaining-not-allowed", n0.reissue, "CHAIN_REISSUE", 1000, address0, "", True) n0.generate(1) self.sync_all() - n0.reissue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", \ - reissuable=True) + n0.reissue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", reissuable=True) n0.generate(1) self.sync_all() @@ -276,8 +259,7 @@ def ipfs_state(self): # bad hash (isn't a valid multihash sha2-256) self.log.info("Testing issue asset with invalid IPFS...") try: - n0.issue(asset_name=asset_name1, qty=1000, to_address=address1, change_address=address2, \ - units=0, reissuable=True, has_ipfs=True, ipfs_hash=bad_hash) + n0.issue(asset_name=asset_name1, qty=1000, to_address=address1, change_address=address2, units=0, reissuable=True, has_ipfs=True, ipfs_hash=bad_hash) except JSONRPCException as e: if "Invalid IPFS/Txid hash" not in e.error['message']: raise AssertionError("Expected substring not found:" + e.error['message']) @@ -289,8 +271,7 @@ def ipfs_state(self): ######################################## # no hash self.log.info("Testing issue asset with no IPFS...") - n0.issue(asset_name=asset_name2, qty=1000, to_address=address1, change_address=address2, \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name=asset_name2, qty=1000, to_address=address1, change_address=address2, units=0, reissuable=True, has_ipfs=False) n0.generate(1) ad = n0.getassetdata(asset_name2) assert_equal(0, ad['has_ipfs']) @@ -300,8 +281,7 @@ def ipfs_state(self): # reissue w/ bad hash self.log.info("Testing re-issue asset with invalid IPFS...") try: - n0.reissue(asset_name=asset_name2, qty=2000, to_address=address1, change_address=address2, \ - reissuable=True, new_unit=-1, new_ipfs=bad_hash) + n0.reissue(asset_name=asset_name2, qty=2000, to_address=address1, change_address=address2, reissuable=True, new_unit=-1, new_ipfs=bad_hash) except JSONRPCException as e: if "Invalid IPFS/Txid hash" not in e.error['message']: raise AssertionError("Expected substring not found:" + e.error['message']) @@ -313,8 +293,7 @@ def ipfs_state(self): ######################################## # reissue w/ hash self.log.info("Testing re-issue asset with valid IPFS...") - n0.reissue(asset_name=asset_name2, qty=2000, to_address=address1, change_address=address2, \ - reissuable=True, new_unit=-1, new_ipfs=ipfs_hash) + n0.reissue(asset_name=asset_name2, qty=2000, to_address=address1, change_address=address2, reissuable=True, new_unit=-1, new_ipfs=ipfs_hash) n0.generate(1) ad = n0.getassetdata(asset_name2) assert_equal(1, ad['has_ipfs']) @@ -342,6 +321,7 @@ def db_corruption_regression(self): a = n0.generate(1)[0] n0.reissue(asset_name, 500, n0.getnewaddress()) + # noinspection PyStatementEffect n0.generate(1)[0] self.log.info(f"Invalidating {a}...") @@ -365,41 +345,13 @@ def reissue_prec_change(self): n0.reissue(asset_name, 10.0**(-i), address, "", True, i+1) n0.generate(1) assert_equal(i+1, n0.listassets("*", True)[asset_name]["units"]) - assert_raises_rpc_error(-25, "Error: Unable to reissue asset: unit must be larger than current unit selection", \ - n0.reissue, asset_name, 10.0**(-i), address, "", True, i) + assert_raises_rpc_error(-25, "Error: Unable to reissue asset: unit must be larger than current unit selection", n0.reissue, asset_name, 10.0**(-i), address, "", True, i) n0.reissue(asset_name, 0.00000001, address) n0.generate(1) assert_equal(Decimal('11.11111111'), n0.listassets("*", True)[asset_name]["amount"]) - def issue_transfer_change(self): - self.log.info("Testing specified RVN and asset change on issue and transfer...") - n0 = self.nodes[0] - - asset_name = "TRC" - issue_qty = 50 - issue_address = n0.getnewaddress() - issue_rvn_change = n0.getnewaddress() - - assert_equal(0, n0.getreceivedbyaddress(issue_rvn_change)) - n0.issue(asset_name, issue_qty, issue_address, issue_rvn_change) - n0.generate(1) - assert(n0.getreceivedbyaddress(issue_rvn_change) > 0) - - transfer_address = n0.getnewaddress() - transfer_asset_change = n0.getnewaddress() - transfer_rvn_change = n0.getnewaddress() - transfer_qty = 5 - change_qty = issue_qty - transfer_qty - - assert_equal(0, n0.getreceivedbyaddress(transfer_rvn_change)) - n0.transfer(asset_name, 5, transfer_address, "", 0, transfer_rvn_change, transfer_asset_change) - n0.generate(1) - assert(n0.getreceivedbyaddress(transfer_rvn_change) > 0) - assert_equal(transfer_qty, n0.listassetbalancesbyaddress(transfer_address)[asset_name]) - assert_equal(change_qty, n0.listassetbalancesbyaddress(transfer_asset_change)[asset_name]) - def run_test(self): self.activate_assets() self.big_test() @@ -408,7 +360,6 @@ def run_test(self): self.ipfs_state() self.db_corruption_regression() self.reissue_prec_change() - self.issue_transfer_change() if __name__ == '__main__': diff --git a/test/functional/feature_assets_mempool.py b/test/functional/feature_assets_mempool.py index 8a4e5de59c..d2b22ba6e3 100755 --- a/test/functional/feature_assets_mempool.py +++ b/test/functional/feature_assets_mempool.py @@ -3,15 +3,12 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Testing asset mempool use cases -""" +"""Testing asset mempool use cases""" + from test_framework.test_framework import RavenTestFramework from test_framework.util import assert_equal, disconnect_all_nodes, connect_all_nodes_bi - -import string - class AssetMempoolTest(RavenTestFramework): def set_test_params(self): self.setup_clean_chain = True @@ -68,8 +65,7 @@ def issue_mempool_test_extended(self): # Reissue that asset address1 = n0.getnewaddress() - n0.reissue(asset_name=asset_name, qty=2000, to_address=address1, change_address='', \ - reissuable=True, new_unit=-1) + n0.reissue(asset_name=asset_name, qty=2000, to_address=address1, change_address='', reissuable=True, new_unit=-1) n0.generate(15) # Get a transfer address @@ -91,7 +87,7 @@ def issue_mempool_test_extended(self): # Connect the nodes, a reorg should occur connect_all_nodes_bi(self.nodes, True) - # Asset the reorg occured + # Asset the reorg occurred assert_equal(n0.getblockcount(), n1.getblockcount()) assert_equal(n0.getbestblockhash(), n1.getbestblockhash()) diff --git a/test/functional/feature_assets_reorg.py b/test/functional/feature_assets_reorg.py index f02c839c1d..a7123f4265 100755 --- a/test/functional/feature_assets_reorg.py +++ b/test/functional/feature_assets_reorg.py @@ -3,14 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Testing asset reorg use cases - -""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, disconnect_all_nodes, connect_all_nodes_bi) +"""Testing asset reorg use cases""" -import string +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, disconnect_all_nodes, connect_all_nodes_bi class AssetReorgTest(RavenTestFramework): def set_test_params(self): @@ -115,7 +112,6 @@ def reorg_chain_state_test(self): # Mine 44 blocks on chain 2 n1.generate(20) node_1_hash_20 = n1.getbestblockhash() - node_1_height_20 = n1.getblockcount() n1.generate(24) node_1_hash_44 = n1.getbestblockhash() diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index cc706789ba..cd81760c46 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test logic for skipping signature validation on old blocks. + +""" +Test logic for skipping signature validation on old blocks. Test logic for skipping signature validation on blocks which we've assumed valid (https://github.com/RavenProject/Ravencoin/pull/9484) @@ -30,27 +32,18 @@ block 200. node2 will reject block 102 since it's assumed valid, but it isn't buried by at least two weeks' work. """ -import time -from test_framework.blocktools import (create_block, create_coinbase) +import time +from test_framework.blocktools import create_block, create_coinbase from test_framework.key import CECKey -from test_framework.mininode import (CBlockHeader, - COutPoint, - CTransaction, - CTxIn, - CTxOut, - NetworkThread, - NodeConn, - NodeConnCB, - msg_block, - msg_headers) -from test_framework.script import (CScript, OP_TRUE) +from test_framework.mininode import CBlockHeader, COutPoint, CTransaction, CTxIn, CTxOut, NetworkThread, NodeConn, NodeConnCB, MsgBlock, MsgHeaders +from test_framework.script import CScript, OP_TRUE from test_framework.test_framework import RavenTestFramework -from test_framework.util import (p2p_port, assert_equal) +from test_framework.util import p2p_port, assert_equal class BaseNode(NodeConnCB): def send_header_for_blocks(self, new_blocks): - headers_message = msg_headers() + headers_message = MsgHeaders() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) @@ -72,12 +65,13 @@ def send_blocks_until_disconnected(self, node): if not node.connection: break try: - node.send_message(msg_block(self.blocks[i])) + node.send_message(MsgBlock(self.blocks[i])) except IOError as e: assert str(e) == 'Not connected, no pushbuf' break - def assert_blockchain_height(self, node, height): + @staticmethod + def assert_blockchain_height(node, height): """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" last_height = node.getblock(node.getbestblockhash())['height'] timeout = 10 @@ -88,7 +82,7 @@ def assert_blockchain_height(self, node, height): last_height = current_height if timeout < 0: assert False, "blockchain too short after timeout: %d" % current_height - timeout - 0.25 + timeout = timeout - 0.25 continue elif current_height > height: assert False, "blockchain too long: %d" % current_height @@ -99,8 +93,7 @@ def run_test(self): # Connect to node0 node0 = BaseNode() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)] node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread @@ -139,9 +132,9 @@ def run_test(self): # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) + tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), script_sig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) - tx.calc_sha256() + tx.calc_x16r() block102 = create_block(self.tip, create_coinbase(height), self.block_time) self.block_time += 1 @@ -190,7 +183,7 @@ def run_test(self): # Send all blocks to node1. All blocks will be accepted. for i in range(2202): - node1.send_message(msg_block(self.blocks[i])) + node1.send_message(MsgBlock(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. node1.sync_with_ping(120) assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202) diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index 99d71fdb33..7a30deeda8 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -3,24 +3,15 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test BIP68 implementation.""" +import time +import random from test_framework.test_framework import RavenTestFramework -from test_framework.util import ( satoshi_round, - assert_raises_rpc_error, - get_bip9_status, - assert_equal, - assert_greater_than, - sync_blocks) -from test_framework.blocktools import ( CTransaction, - COIN, - CTxIn, - COutPoint, - CTxOut, - CScript, - create_block, - create_coinbase) -from test_framework.mininode import (ToHex, from_hex) +from test_framework.util import satoshi_round, assert_raises_rpc_error, get_bip9_status, assert_equal,assert_greater_than, sync_blocks +from test_framework.blocktools import CTransaction, COIN, CTxIn, COutPoint, CTxOut, CScript, create_block, create_coinbase +from test_framework.mininode import to_hex, from_hex SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31) SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height) @@ -66,10 +57,10 @@ def test_disable_flag(self): # If sequence locks were used, this would require 1 block for the # input to mature. sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 - tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] + tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), n_sequence=sequence_value)] tx1.vout = [CTxOut(value, CScript([b'a']))] - tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"] + tx1_signed = self.nodes[0].signrawtransaction(to_hex(tx1))["hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) @@ -78,17 +69,17 @@ def test_disable_flag(self): tx2 = CTransaction() tx2.nVersion = 2 sequence_value = sequence_value & 0x7fffffff - tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] + tx2.vin = [CTxIn(COutPoint(tx1_id, 0), n_sequence=sequence_value)] tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))] tx2.rehash() - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, to_hex(tx2)) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. tx2.nVersion = 1 - self.nodes[0].sendrawtransaction(ToHex(tx2)) + self.nodes[0].sendrawtransaction(to_hex(tx2)) # Calculate the median time past of a prior block ("confirmations" before # the current tip). @@ -105,7 +96,6 @@ def test_sequence_lock_confirmed_inputs(self): while len(addresses) < max_outputs: addresses.append(self.nodes[0].getnewaddress()) while len(self.nodes[0].listunspent()) < 200: - import random random.shuffle(addresses) num_outputs = random.randint(1, max_outputs) outputs = {} @@ -167,17 +157,17 @@ def test_sequence_lock_confirmed_inputs(self): time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY if input_will_pass and time_delta > cur_time - orig_time: sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) - elif (not input_will_pass and time_delta <= cur_time - orig_time): + elif not input_will_pass and time_delta <= cur_time - orig_time: sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1 sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG - tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) + tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), n_sequence=sequence_value)) value += utxos[j]["amount"]*COIN # Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output - tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50 + tx_size = len(to_hex(tx)) // 2 + 120 * num_inputs + 50 tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a']))) - rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"] + rawtx = self.nodes[0].signrawtransaction(to_hex(tx))["hex"] - if (using_sequence_locks and not should_pass): + if using_sequence_locks and not should_pass: # This transaction should be rejected assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) else: @@ -202,9 +192,9 @@ def test_sequence_lock_unconfirmed_inputs(self): # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 - tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] + tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), n_sequence=0)] tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] - tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] + tx2_raw = self.nodes[0].signrawtransaction(to_hex(tx2))["hex"] tx2 = from_hex(tx2, tx2_raw) tx2.rehash() @@ -220,16 +210,16 @@ def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): tx = CTransaction() tx.nVersion = 2 - tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] + tx.vin = [CTxIn(COutPoint(orig_tx.x16r, 0), n_sequence=sequence_value)] tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))] tx.rehash() - if (orig_tx.hash in node.getrawmempool()): + if orig_tx.hash in node.getrawmempool(): # sendrawtransaction should fail if the tx is in the mempool - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, to_hex(tx)) else: # sendrawtransaction should succeed if the tx is not in the mempool - node.sendrawtransaction(ToHex(tx)) + node.sendrawtransaction(to_hex(tx)) return tx @@ -275,9 +265,9 @@ def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): assert(tx5.hash not in self.nodes[0].getrawmempool()) utxos = self.nodes[0].listunspent() - tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) + tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), n_sequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN) - raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"] + raw_tx5 = self.nodes[0].signrawtransaction(to_hex(tx5))["hex"] assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) @@ -307,7 +297,7 @@ def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): block.solve() tip = block.sha256 height += 1 - self.nodes[0].submitblock(ToHex(block)) + self.nodes[0].submitblock(to_hex(block)) cur_time += 1 mempool = self.nodes[0].getrawmempool() @@ -333,26 +323,26 @@ def test_bip68_not_consensus(self): # Make an anyone-can-spend transaction tx2 = CTransaction() tx2.nVersion = 1 - tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] + tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), n_sequence=0)] tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] # sign tx2 - tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] + tx2_raw = self.nodes[0].signrawtransaction(to_hex(tx2))["hex"] tx2 = from_hex(tx2, tx2_raw) tx2.rehash() - self.nodes[0].sendrawtransaction(ToHex(tx2)) + self.nodes[0].sendrawtransaction(to_hex(tx2)) # Now make an invalid spend of tx2 according to BIP68 sequence_value = 100 # 100 block relative locktime tx3 = CTransaction() tx3.nVersion = 2 - tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] + tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), n_sequence=sequence_value)] tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))] tx3.rehash() - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, to_hex(tx3)) # make a block that violates bip68; ensure that the tip updates tip = int(self.nodes[0].getbestblockhash(), 16) @@ -363,10 +353,10 @@ def test_bip68_not_consensus(self): block.rehash() block.solve() - self.nodes[0].submitblock(ToHex(block)) + self.nodes[0].submitblock(to_hex(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) - def activateCSV(self): + def activate_csv(self): # activation should happen at block height 432 (3 periods) # getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block. min_activation_height = 432 @@ -386,7 +376,7 @@ def test_version2_relay(self): rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] tx = from_hex(CTransaction(), rawtxfund) tx.nVersion = 2 - tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"] + tx_signed = self.nodes[1].signrawtransaction(to_hex(tx))["hex"] self.nodes[1].sendrawtransaction(tx_signed) if __name__ == '__main__': diff --git a/test/functional/feature_bip_softforks.py b/test/functional/feature_bip_softforks.py index 7cf5abb9c2..bf64a03004 100755 --- a/test/functional/feature_bip_softforks.py +++ b/test/functional/feature_bip_softforks.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP 9 soft forks. + +""" +Test BIP 9 soft forks. Connect to a single node. regtest lock-in with 108/144 block signalling @@ -16,11 +18,11 @@ test that enforcement has not triggered (which triggers ACTIVE) test that enforcement has triggered """ + from io import BytesIO import shutil import time import itertools - from test_framework.test_framework import ComparisonTestFramework from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, assert_equal from test_framework.mininode import CTransaction, NetworkThread @@ -40,7 +42,8 @@ def run_test(self): NetworkThread().start() # Start up network handling in another thread self.test.run() - def create_transaction(self, node, coinbase, to_address, amount): + @staticmethod + def create_transaction(node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{ "txid" : from_txid, "vout" : 0}] outputs = { to_address : amount } @@ -51,14 +54,17 @@ def create_transaction(self, node, coinbase, to_address, amount): tx.nVersion = 2 return tx - def sign_transaction(self, node, tx): + @staticmethod + def sign_transaction(node, tx): signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize())) tx = CTransaction() f = BytesIO(hex_str_to_bytes(signresult['hex'])) tx.deserialize(f) return tx - def generate_blocks(self, number, version, test_blocks = []): + def generate_blocks(self, number, version, test_blocks=None): + if test_blocks is None: + test_blocks = [] for _ in range(number): block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = version @@ -74,9 +80,9 @@ def get_bip9_status(self, key): info = self.nodes[0].getblockchaininfo() return info['bip9_softforks'][key] - def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno): - assert_equal(self.get_bip9_status(bipName)['status'], 'defined') - assert_equal(self.get_bip9_status(bipName)['since'], 0) + def test_bip(self, bip_name, activated_version, invalidate, invalidate_post_signature, bit_no): + assert_equal(self.get_bip9_status(bip_name)['status'], 'defined') + assert_equal(self.get_bip9_status(bip_name)['since'], 0) # generate some coins for later self.coinbase_blocks = self.nodes[0].generate(2) @@ -85,11 +91,11 @@ def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignatu self.nodeaddress = self.nodes[0].getnewaddress() self.last_block_time = int(time.time()) - assert_equal(self.get_bip9_status(bipName)['status'], 'defined') - assert_equal(self.get_bip9_status(bipName)['since'], 0) + assert_equal(self.get_bip9_status(bip_name)['status'], 'defined') + assert_equal(self.get_bip9_status(bip_name)['since'], 0) tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - assert(bipName not in tmpl['vbavailable']) + assert(bip_name not in tmpl['rules']) + assert(bip_name not in tmpl['vbavailable']) assert_equal(tmpl['vbrequired'], 0) assert_equal(tmpl['version'], 0x20000000) @@ -98,44 +104,44 @@ def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignatu test_blocks = self.generate_blocks(141, 4) yield TestInstance(test_blocks, sync_every_block=False) - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - assert_equal(self.get_bip9_status(bipName)['since'], 144) - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) + assert_equal(self.get_bip9_status(bip_name)['status'], 'started') + assert_equal(self.get_bip9_status(bip_name)['since'], 144) + assert_equal(self.get_bip9_status(bip_name)['statistics']['elapsed'], 0) + assert_equal(self.get_bip9_status(bip_name)['statistics']['count'], 0) tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - assert_equal(tmpl['vbavailable'][bipName], bitno) + assert(bip_name not in tmpl['rules']) + assert_equal(tmpl['vbavailable'][bip_name], bit_no) assert_equal(tmpl['vbrequired'], 0) assert(tmpl['version'] & activated_version) # Test 1-A # check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period - test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not) + self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not) test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready) yield TestInstance(test_blocks, sync_every_block=False) - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) + assert_equal(self.get_bip9_status(bip_name)['statistics']['elapsed'], 46) + assert_equal(self.get_bip9_status(bip_name)['statistics']['count'], 10) + assert_equal(self.get_bip9_status(bip_name)['statistics']['possible'], True) # Test 1-B # check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not) yield TestInstance(test_blocks, sync_every_block=False) - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False) + assert_equal(self.get_bip9_status(bip_name)['statistics']['elapsed'], 47) + assert_equal(self.get_bip9_status(bip_name)['statistics']['count'], 10) + assert_equal(self.get_bip9_status(bip_name)['statistics']['possible'], False) # Test 1-C # finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready) yield TestInstance(test_blocks, sync_every_block=False) - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) - assert_equal(self.get_bip9_status(bipName)['status'], 'started') + assert_equal(self.get_bip9_status(bip_name)['statistics']['elapsed'], 0) + assert_equal(self.get_bip9_status(bip_name)['statistics']['count'], 0) + assert_equal(self.get_bip9_status(bip_name)['statistics']['possible'], True) + assert_equal(self.get_bip9_status(bip_name)['status'], 'started') # Test 2 # Fail to achieve LOCKED_IN 100 out of 144 signal bit 1 @@ -146,13 +152,13 @@ def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignatu test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not) yield TestInstance(test_blocks, sync_every_block=False) - assert_equal(self.get_bip9_status(bipName)['status'], 'started') - assert_equal(self.get_bip9_status(bipName)['since'], 144) - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0) + assert_equal(self.get_bip9_status(bip_name)['status'], 'started') + assert_equal(self.get_bip9_status(bip_name)['since'], 144) + assert_equal(self.get_bip9_status(bip_name)['statistics']['elapsed'], 0) + assert_equal(self.get_bip9_status(bip_name)['statistics']['count'], 0) tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) - assert_equal(tmpl['vbavailable'][bipName], bitno) + assert(bip_name not in tmpl['rules']) + assert_equal(tmpl['vbavailable'][bip_name], bit_no) assert_equal(tmpl['vbrequired'], 0) assert(tmpl['version'] & activated_version) @@ -166,29 +172,29 @@ def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignatu yield TestInstance(test_blocks, sync_every_block=False) # check counting stats and "possible" flag before last block of this period achieves LOCKED_IN... - assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143) - assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107) - assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True) - assert_equal(self.get_bip9_status(bipName)['status'], 'started') + assert_equal(self.get_bip9_status(bip_name)['statistics']['elapsed'], 143) + assert_equal(self.get_bip9_status(bip_name)['statistics']['count'], 107) + assert_equal(self.get_bip9_status(bip_name)['statistics']['possible'], True) + assert_equal(self.get_bip9_status(bip_name)['status'], 'started') # ...continue with Test 3 test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready) yield TestInstance(test_blocks, sync_every_block=False) - assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') - assert_equal(self.get_bip9_status(bipName)['since'], 576) + assert_equal(self.get_bip9_status(bip_name)['status'], 'locked_in') + assert_equal(self.get_bip9_status(bip_name)['since'], 576) tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) + assert(bip_name not in tmpl['rules']) # Test 4 # 143 more version 536870913 blocks (waiting period-1) test_blocks = self.generate_blocks(143, 4) yield TestInstance(test_blocks, sync_every_block=False) - assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') - assert_equal(self.get_bip9_status(bipName)['since'], 576) + assert_equal(self.get_bip9_status(bip_name)['status'], 'locked_in') + assert_equal(self.get_bip9_status(bip_name)['since'], 576) tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName not in tmpl['rules']) + assert(bip_name not in tmpl['rules']) # Test 5 # Check that the new rule is enforced @@ -197,7 +203,7 @@ def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignatu invalidate(spendtx) spendtx = self.sign_transaction(self.nodes[0], spendtx) spendtx.rehash() - invalidatePostSignature(spendtx) + invalidate_post_signature(spendtx) spendtx.rehash() block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = activated_version @@ -211,13 +217,13 @@ def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignatu self.height += 1 yield TestInstance([[block, True]]) - assert_equal(self.get_bip9_status(bipName)['status'], 'active') - assert_equal(self.get_bip9_status(bipName)['since'], 720) + assert_equal(self.get_bip9_status(bip_name)['status'], 'active') + assert_equal(self.get_bip9_status(bip_name)['since'], 720) tmpl = self.nodes[0].getblocktemplate({}) - assert(bipName in tmpl['rules']) - assert(bipName not in tmpl['vbavailable']) + assert(bip_name in tmpl['rules']) + assert(bip_name not in tmpl['vbavailable']) assert_equal(tmpl['vbrequired'], 0) - assert(not (tmpl['version'] & (1 << bitno))) + assert(not (tmpl['version'] & (1 << bit_no))) # Test 6 # Check that the new sequence lock rules are enforced @@ -226,7 +232,7 @@ def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignatu invalidate(spendtx) spendtx = self.sign_transaction(self.nodes[0], spendtx) spendtx.rehash() - invalidatePostSignature(spendtx) + invalidate_post_signature(spendtx) spendtx.rehash() block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1) @@ -251,23 +257,25 @@ def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignatu def get_tests(self): for test in itertools.chain( - self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0), - self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0), - self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0) + self.test_bip('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0), + self.test_bip('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0), + self.test_bip('csv', 0x20000001, self.donothing, self.csv_invalidate, 0) ): yield test def donothing(self, tx): return - def csv_invalidate(self, tx): + @staticmethod + def csv_invalidate(tx): """Modify the signature in vin 0 of the tx to fail CSV Prepends -1 CSV DROP in the scriptSig itself. """ tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig))) - def sequence_lock_invalidate(self, tx): + @staticmethod + def sequence_lock_invalidate(tx): """Modify the nSequence to make it fails once sequence lock rule is activated (high timespan). """ diff --git a/test/functional/p2p_fullblock.py b/test/functional/feature_block.py similarity index 87% rename from test/functional/p2p_fullblock.py rename to test/functional/feature_block.py index 4fb203baa2..444e972cc2 100755 --- a/test/functional/p2p_fullblock.py +++ b/test/functional/feature_block.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test block processing. + +""" +Test block processing. This reimplements tests from the ravenj/FullBlockTestGenerator used by the pull-tester. @@ -12,56 +14,25 @@ each test. """ +import time +import copy +import struct from test_framework.test_framework import ComparisonTestFramework from test_framework.util import assert_equal -from test_framework.comptool import (TestManager, TestInstance, RejectResult) -from test_framework.blocktools import (CBlock, - copy, - NetworkThread, - create_transaction, - create_coinbase, - create_block, - CBlockHeader, - MAX_BLOCK_BASE_SIZE, - CTxIn, - CTxOut, - COutPoint, - get_legacy_sigopcount_block, - uint256_from_compact, - COIN) -import time +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.mininode import uint256_from_compact, NetworkThread, MAX_BLOCK_BASE_SIZE, CBlockHeader +from test_framework.blocktools import CBlock, create_transaction, create_coinbase, create_block, CTxIn, CTxOut, COutPoint, get_legacy_sigopcount_block, COIN from test_framework.key import CECKey -from test_framework.script import (CTransaction, - CScript, - OP_TRUE, - SignatureHash, - SIGHASH_ALL, - OP_CHECKSIG, - OP_CHECKMULTISIG, - OP_CHECKMULTISIGVERIFY, - OP_CHECKSIGVERIFY, - OP_2DUP, - hash160, - OP_HASH160, - OP_EQUAL, - ser_uint256, - uint256_from_str, - MAX_SCRIPT_ELEMENT_SIZE, - OP_IF, - OP_INVALIDOPCODE, - OP_ELSE, - OP_ENDIF, - OP_FALSE, - OP_RETURN) -import struct +from test_framework.script import (CTransaction, CScript, OP_TRUE, signature_hash, SIGHASH_ALL, OP_CHECKSIG, OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY, OP_CHECKSIGVERIFY, OP_2DUP, + hash160, OP_HASH160, OP_EQUAL, ser_uint256, uint256_from_str, MAX_SCRIPT_ELEMENT_SIZE, OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_ENDIF, OP_FALSE, OP_RETURN) -class PreviousSpendableOutput(): +class PreviousSpendableOutput: def __init__(self, tx = CTransaction(), n = -1): self.tx = tx self.n = n # the output we're spending # Use this class for tests that require behavior other than normal "mininode" behavior. -# For now, it is used to serialize a bloated varint (b64). +# For now, it is used to serialize a bloated variant (b64). class CBrokenBlock(CBlock): def __init__(self, header=None): super(CBrokenBlock, self).__init__(header) @@ -70,7 +41,7 @@ def initialize(self, base_block): self.vtx = copy.deepcopy(base_block.vtx) self.hashMerkleRoot = self.calc_merkle_root() - def serialize(self): + def serialize(self, **kwargs): r = b"" r += super(CBlock, self).serialize() r += struct.pack(" b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) # \-> b3 (1) -> b4 (2) # Test that a block with a lot of checksigs is okay - lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1)) + lots_of_check_sigs = CScript([OP_CHECKSIG] * (max_block_sigops - 1)) tip(13) - block(15, spend=out[5], script=lots_of_checksigs) + block(15, spend=out[5], script=lots_of_check_sigs) yield accepted() save_spendable_output() # Test that a block with too many checksigs is rejected - too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) + too_many_checksigs = CScript([OP_CHECKSIG] * max_block_sigops) block(16, spend=out[6], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) @@ -427,7 +402,7 @@ def update_block(block_number, new_transactions): b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. - b26 = update_block(26, []) + update_block(26, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b26 chain to make sure ravend isn't accepting b26 @@ -439,7 +414,7 @@ def update_block(block_number, new_transactions): b28 = block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() - b28 = update_block(28, []) + update_block(28, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b28 chain to make sure ravend isn't accepting b28 @@ -451,7 +426,7 @@ def update_block(block_number, new_transactions): b30 = block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() - b30 = update_block(30, []) + update_block(30, []) yield accepted() save_spendable_output() @@ -464,39 +439,39 @@ def update_block(block_number, new_transactions): # # MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end. - lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) + lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((max_block_sigops-1) // 20) + [OP_CHECKSIG] * 19) b31 = block(31, spend=out[8], script=lots_of_multisigs) - assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS) + assert_equal(get_legacy_sigopcount_block(b31), max_block_sigops) yield accepted() save_spendable_output() # this goes over the limit because the coinbase has one sigop - too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20)) + too_many_multisigs = CScript([OP_CHECKMULTISIG] * (max_block_sigops // 20)) b32 = block(32, spend=out[9], script=too_many_multisigs) - assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1) + assert_equal(get_legacy_sigopcount_block(b32), max_block_sigops + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKMULTISIGVERIFY tip(31) - lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19) + lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((max_block_sigops-1) // 20) + [OP_CHECKSIG] * 19) block(33, spend=out[9], script=lots_of_multisigs) yield accepted() save_spendable_output() - too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20)) + too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (max_block_sigops // 20)) block(34, spend=out[10], script=too_many_multisigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKSIGVERIFY tip(33) - lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1)) - b35 = block(35, spend=out[10], script=lots_of_checksigs) + lots_of_check_sigs = CScript([OP_CHECKSIGVERIFY] * (max_block_sigops - 1)) + b35 = block(35, spend=out[10], script=lots_of_check_sigs) yield accepted() save_spendable_output() - too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS)) + too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * max_block_sigops) block(36, spend=out[11], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) @@ -514,7 +489,7 @@ def update_block(block_number, new_transactions): b37 = block(37, spend=out[11]) txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) tx = create_and_sign_tx(out[11].tx, out[11].n, 0) - b37 = update_block(37, [tx]) + update_block(37, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid @@ -534,7 +509,7 @@ def update_block(block_number, new_transactions): # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL # tip(35) - b39 = block(39) + block(39) b39_outputs = 0 b39_sigops_per_output = 6 @@ -554,10 +529,9 @@ def update_block(block_number, new_transactions): b39_outputs += 1 # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE - tx_new = None tx_last = tx total_size=len(b39.serialize()) - while(total_size < MAX_BLOCK_BASE_SIZE): + while total_size < MAX_BLOCK_BASE_SIZE: tx_new = create_tx(tx_last, 1, 1, p2sh_script) tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) tx_new.rehash() @@ -583,7 +557,7 @@ def update_block(block_number, new_transactions): tip(39) b40 = block(40, spend=out[12]) sigops = get_legacy_sigopcount_block(b40) - numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output + numTxes = (max_block_sigops - sigops) // b39_sigops_per_output assert_equal(numTxes <= b39_outputs, True) lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) @@ -595,16 +569,16 @@ def update_block(block_number, new_transactions): # second input is corresponding P2SH output from b39 tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) # Note: must pass the redeem_script (not p2sh_script) to the signature hash function - (sighash, _) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL) + (sighash, _) = signature_hash(redeem_script, tx, 1, SIGHASH_ALL) sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL])) scriptSig = CScript([sig, redeem_script]) tx.vin[1].scriptSig = scriptSig tx.rehash() new_txs.append(tx) - lastOutpoint = COutPoint(tx.sha256, 0) + lastOutpoint = COutPoint(tx.x16r, 0) - b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1 + b40_sigops_to_fill = max_block_sigops - (numTxes * b39_sigops_per_output + sigops) + 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) @@ -647,17 +621,17 @@ def update_block(block_number, new_transactions): # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works. - height = self.block_heights[self.tip.sha256] + 1 + height = self.block_heights[self.tip.x16r] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() b44.nTime = self.tip.nTime + 1 - b44.hashPrevBlock = self.tip.sha256 + b44.hashPrevBlock = self.tip.x16r b44.nBits = 0x207fffff b44.vtx.append(coinbase) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() self.tip = b44 - self.block_heights[b44.sha256] = height + self.block_heights[b44.x16r] = height self.blocks[44] = b44 yield accepted() @@ -665,13 +639,13 @@ def update_block(block_number, new_transactions): non_coinbase = create_tx(out[15].tx, out[15].n, 1) b45 = CBlock() b45.nTime = self.tip.nTime + 1 - b45.hashPrevBlock = self.tip.sha256 + b45.hashPrevBlock = self.tip.x16r b45.nBits = 0x207fffff b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() - b45.calc_sha256() + b45.calc_x16r() b45.solve() - self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1 + self.block_heights[b45.x16r] = self.block_heights[self.tip.x16r]+1 self.tip = b45 self.blocks[45] = b45 yield rejected(RejectResult(16, b'bad-cb-missing')) @@ -680,12 +654,12 @@ def update_block(block_number, new_transactions): tip(44) b46 = CBlock() b46.nTime = b44.nTime+1 - b46.hashPrevBlock = b44.sha256 + b46.hashPrevBlock = b44.x16r b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() - self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1 + self.block_heights[b46.x16r] = self.block_heights[b44.x16r]+1 self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 @@ -734,7 +708,7 @@ def update_block(block_number, new_transactions): tip(44) b52 = block(52, spend=out[15]) tx = create_tx(b52.vtx[1], 0, 1) - b52 = update_block(52, [tx, tx]) + update_block(52, [tx, tx]) yield rejected(RejectResult(16, b'bad-txns-duplicate')) # Test block timestamps @@ -790,7 +764,7 @@ def update_block(block_number, new_transactions): # b57 - a good block with 2 txs, don't submit until end tip(55) - b57 = block(57) + block(57) tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) b57 = update_block(57, [tx, tx1]) @@ -806,7 +780,7 @@ def update_block(block_number, new_transactions): # b57p2 - a good block with 6 tx'es, don't submit until end tip(55) - b57p2 = block("57p2") + block("57p2") tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) tx2 = create_tx(tx1, 0, 1) @@ -820,7 +794,7 @@ def update_block(block_number, new_transactions): self.blocks["b56p2"] = b56p2 assert_equal(b56p2.hash, b57p2.hash) assert_equal(len(b56p2.vtx),6) - b56p2 = update_block("b56p2", [tx3, tx4]) + update_block("b56p2", [tx3, tx4]) yield rejected(RejectResult(16, b'bad-txns-duplicate')) tip("57p2") @@ -841,9 +815,9 @@ def update_block(block_number, new_transactions): block(58, spend=out[17]) tx = CTransaction() assert(len(out[17].tx.vout) < 42) - tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) + tx.vin.append(CTxIn(COutPoint(out[17].tx.x16r, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) - tx.calc_sha256() + tx.calc_x16r() update_block(58, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) @@ -888,10 +862,10 @@ def update_block(block_number, new_transactions): tx = CTransaction() tx.nLockTime = 0xffffffff #this locktime is non-final assert(out[18].n < len(out[18].tx.vout)) - tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence + tx.vin.append(CTxIn(COutPoint(out[18].tx.x16r, out[18].n))) # don't set nSequence tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert(tx.vin[0].nSequence < 0xffffffff) - tx.calc_sha256() + tx.calc_x16r() update_block(62, [tx]) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) @@ -906,7 +880,7 @@ def update_block(block_number, new_transactions): b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() - b63 = update_block(63, []) + update_block(63, []) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) @@ -1043,7 +1017,7 @@ def update_block(block_number, new_transactions): # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. # tip(69) - b72 = block(72) + block(72) tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) tx2 = create_and_sign_tx(tx1, 0, 1) b72 = update_block(72, [tx1, tx2]) # now tip is 72 @@ -1063,7 +1037,7 @@ def update_block(block_number, new_transactions): save_spendable_output() - # Test some invalid scripts and MAX_BLOCK_SIGOPS + # Test some invalid scripts and max_block_sigops # # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b** (22) @@ -1081,23 +1055,23 @@ def update_block(block_number, new_transactions): # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) # tip(72) - b73 = block(73) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 + block(73) + size = max_block_sigops - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4 + a[max_block_sigops - 1] = int("4e",16) # OP_PUSHDATA4 element_size = MAX_SCRIPT_ELEMENT_SIZE + 1 - a[MAX_BLOCK_SIGOPS] = element_size % 256 - a[MAX_BLOCK_SIGOPS+1] = element_size // 256 - a[MAX_BLOCK_SIGOPS+2] = 0 - a[MAX_BLOCK_SIGOPS+3] = 0 + a[max_block_sigops] = element_size % 256 + a[max_block_sigops+1] = element_size // 256 + a[max_block_sigops+2] = 0 + a[max_block_sigops+3] = 0 tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b73 = update_block(73, [tx]) - assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1) + assert_equal(get_legacy_sigopcount_block(b73), max_block_sigops+1) yield rejected(RejectResult(16, b'bad-blk-sigops')) - # b74/75 - if we push an invalid script element, all prevous sigops are counted, + # b74/75 - if we push an invalid script element, all previous sigops are counted, # but sigops after the element are not counted. # # The invalid script element is that the push_data indicates that @@ -1105,32 +1079,32 @@ def update_block(block_number, new_transactions): # provide a much smaller number. These bytes are CHECKSIGS so they would # cause b75 to fail for excessive sigops, if those bytes were counted. # - # b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element - # b75 succeeds because we put MAX_BLOCK_SIGOPS before the element + # b74 fails because we put max_block_sigops+1 before the element + # b75 succeeds because we put max_block_sigops before the element # # tip(72) block(74) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 + size = max_block_sigops - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS] = 0x4e - a[MAX_BLOCK_SIGOPS+1] = 0xfe - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff - a[MAX_BLOCK_SIGOPS+4] = 0xff + a[max_block_sigops] = 0x4e + a[max_block_sigops+1] = 0xfe + a[max_block_sigops+2] = 0xff + a[max_block_sigops+3] = 0xff + a[max_block_sigops+4] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) update_block(74, [tx]) yield rejected(RejectResult(16, b'bad-blk-sigops')) tip(72) block(75) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 + size = max_block_sigops - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e - a[MAX_BLOCK_SIGOPS] = 0xff - a[MAX_BLOCK_SIGOPS+1] = 0xff - a[MAX_BLOCK_SIGOPS+2] = 0xff - a[MAX_BLOCK_SIGOPS+3] = 0xff + a[max_block_sigops-1] = 0x4e + a[max_block_sigops] = 0xff + a[max_block_sigops+1] = 0xff + a[max_block_sigops+2] = 0xff + a[max_block_sigops+3] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) update_block(75, [tx]) yield accepted() @@ -1139,9 +1113,9 @@ def update_block(block_number, new_transactions): # Check that if we push an element filled with CHECKSIGs, they are not counted tip(75) block(76) - size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + size = max_block_sigops - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 a = bytearray([OP_CHECKSIG] * size) - a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs + a[max_block_sigops-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) update_block(76, [tx]) yield accepted() @@ -1235,7 +1209,7 @@ def update_block(block_number, new_transactions): tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) - tx1.calc_sha256() + tx1.calc_x16r() self.sign_tx(tx1, out[29].tx, out[29].n) tx1.rehash() tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN])) @@ -1278,10 +1252,11 @@ def update_block(block_number, new_transactions): # if self.options.runbarelyexpensive: tip(88) - LARGE_REORG_SIZE = 1088 + large_reorg_size = 1088 test1 = TestInstance(sync_every_block=False) spend=out[32] - for i in range(89, LARGE_REORG_SIZE + 89): + i = 0 + for i in range(89, large_reorg_size + 89): b = block(i, spend) tx = CTransaction() script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 @@ -1300,7 +1275,7 @@ def update_block(block_number, new_transactions): # now create alt chain of same length tip(88) test2 = TestInstance(sync_every_block=False) - for i in range(89, LARGE_REORG_SIZE + 89): + for i in range(89, large_reorg_size + 89): block("alt"+str(i)) test2.blocks_and_transactions.append([self.tip, False]) yield test2 diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index 8bed121bdf..754cfaea3d 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -3,27 +3,19 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP65 (CHECKLOCKTIMEVERIFY). +""" +Test BIP65 (CHECKLOCKTIMEVERIFY). Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height 1351. """ +from io import BytesIO from test_framework.test_framework import RavenTestFramework -from test_framework.util import (p2p_port, assert_equal) -from test_framework.mininode import (ToHex, - CTransaction, - hex_str_to_bytes, - NodeConn, - NodeConnCB, - NetworkThread, - msg_block, - wait_until, - mininode_lock, - msg_tx) +from test_framework.util import p2p_port, assert_equal +from test_framework.mininode import to_hex, CTransaction, hex_str_to_bytes, NodeConn, NodeConnCB, NetworkThread, MsgBlock, wait_until, mininode_lock, MsgTx from test_framework.blocktools import create_coinbase, create_block -from test_framework.script import (CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum) -from io import BytesIO +from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, CScriptNum CLTV_HEIGHT = 1351 @@ -33,25 +25,25 @@ REJECT_NONSTANDARD = 64 def cltv_invalidate(tx): - '''Modify the signature in vin 0 of the tx to fail CLTV + """Modify the signature in vin 0 of the tx to fail CLTV Prepends -1 CLTV DROP in the scriptSig itself. TODO: test more ways that transactions using CLTV could be invalid (eg locktime requirements fail, sequence time requirements fail, etc). - ''' + """ tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig))) def cltv_validate(node, tx, height): - '''Modify the signature in vin 0 of the tx to pass CLTV + """Modify the signature in vin 0 of the tx to pass CLTV Prepends CLTV DROP in the scriptSig, and sets - the locktime to height''' + the locktime to height""" tx.vin[0].nSequence = 0 tx.nLockTime = height # Need to re-sign, since nSequence and nLockTime changed - signed_result = node.signrawtransaction(ToHex(tx)) + signed_result = node.signrawtransaction(to_hex(tx)) new_tx = CTransaction() new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex']))) @@ -77,8 +69,7 @@ def set_test_params(self): def run_test(self): node0 = NodeConnCB() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)] node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread @@ -105,7 +96,7 @@ def run_test(self): block.hashMerkleRoot = block.calc_merkle_root() block.solve() - node0.send_and_ping(msg_block(block)) + node0.send_and_ping(MsgBlock(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") @@ -114,10 +105,10 @@ def run_test(self): block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() - node0.send_and_ping(msg_block(block)) + node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock) + wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock, err_msg="last_message") with mininode_lock: assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000003)') @@ -135,7 +126,7 @@ def run_test(self): # First we show that this tx is valid except for CLTV by getting it # accepted to the mempool (which we can achieve with # -promiscuousmempoolflags). - node0.send_and_ping(msg_tx(spendtx)) + node0.send_and_ping(MsgTx(spendtx)) assert spendtx.hash in self.nodes[0].getrawmempool() # Now we verify that a block with this transaction is invalid. @@ -143,10 +134,10 @@ def run_test(self): block.hashMerkleRoot = block.calc_merkle_root() block.solve() - node0.send_and_ping(msg_block(block)) + node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock) + wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock, err_msg="last_message") with mininode_lock: assert node0.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD] assert_equal(node0.last_message["reject"].data, block.sha256) @@ -165,7 +156,7 @@ def run_test(self): block.hashMerkleRoot = block.calc_merkle_root() block.solve() - node0.send_and_ping(msg_block(block)) + node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index a9753f5f49..6237bc5172 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test activation of the first version bits soft fork. + +""" +Test activation of the first version bits soft fork. This soft fork will activate the following BIPS: BIP 68 - nSequence relative lock times @@ -44,14 +46,15 @@ bip112tx_special - test negative argument to OP_CSV """ -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import (Decimal, hex_str_to_bytes, assert_equal, get_bip9_status) -from test_framework.mininode import (ToHex, CTransaction, NetworkThread) -from test_framework.blocktools import (create_coinbase, create_block) -from test_framework.comptool import (TestInstance, TestManager) -from test_framework.script import (OP_DROP, CScript, OP_CHECKSEQUENCEVERIFY) from io import BytesIO import time +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import Decimal, hex_str_to_bytes, assert_equal, get_bip9_status +from test_framework.mininode import to_hex, CTransaction, NetworkThread +from test_framework.blocktools import create_coinbase, create_block +from test_framework.comptool import TestInstance, TestManager +from test_framework.script import OP_DROP, CScript, OP_CHECKSEQUENCEVERIFY + base_relative_locktime = 10 seq_disable_flag = 1<<31 @@ -70,19 +73,21 @@ b18times = [] for b18 in range(2): rlt = base_relative_locktime - if (b31): + if b31: rlt = rlt | seq_disable_flag - if (b25): + if b25: rlt = rlt | seq_random_high_bit - if (b22): + if b22: rlt = rlt | seq_type_flag - if (b18): + if b18: rlt = rlt | seq_random_low_bit b18times.append(rlt) b22times.append(b18times) b25times.append(b22times) relative_locktimes.append(b25times) + +# noinspection PyShadowingNames def all_rlt_txs(txarray): txs = [] for b31 in range(2): @@ -92,6 +97,8 @@ def all_rlt_txs(txarray): txs.append(txarray[b31][b25][b22][b18]) return txs + +# noinspection PyPep8Naming class BIP68_112_113Test(ComparisonTestFramework): def set_test_params(self): self.num_nodes = 1 @@ -106,9 +113,10 @@ def run_test(self): def send_generic_input_tx(self, node, coinbases): amount = Decimal("49.99") - return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) + return node.sendrawtransaction(to_hex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) - def create_transaction(self, node, txid, to_address, amount): + @staticmethod + def create_transaction(node, txid, to_address, amount): inputs = [{ "txid" : txid, "vout" : 0}] outputs = { to_address : amount } rawtx = node.createrawtransaction(inputs, outputs) @@ -117,15 +125,18 @@ def create_transaction(self, node, txid, to_address, amount): tx.deserialize(f) return tx - def sign_transaction(self, node, unsignedtx): - rawtx = ToHex(unsignedtx) + @staticmethod + def sign_transaction(node, unsignedtx): + rawtx = to_hex(unsignedtx) signresult = node.signrawtransaction(rawtx) tx = CTransaction() f = BytesIO(hex_str_to_bytes(signresult['hex'])) tx.deserialize(f) return tx - def generate_blocks(self, number, version, test_blocks = []): + def generate_blocks(self, number, version, test_blocks=None): + if test_blocks is None: + test_blocks = [] for _ in range(number): block = self.create_test_block([], version) test_blocks.append([block, True]) @@ -143,6 +154,7 @@ def create_test_block(self, txs, version = 536870912): block.solve() return block + # noinspection PyShadowingNames def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0): txs = [] assert(len(bip68inputs) >= 16) @@ -164,14 +176,15 @@ def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0): txs.append(b25txs) return txs - def create_bip112special(self, input, txversion): - tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98")) + def create_bip112special(self, input_data, txversion): + tx = self.create_transaction(self.nodes[0], input_data, self.nodeaddress, Decimal("49.98")) tx.nVersion = txversion signtx = self.sign_transaction(self.nodes[0], tx) signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) return signtx - def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0): + # noinspection PyShadowingNames + def create_bip112txs(self, bip112inputs, vary_op_csv, txversion, locktime_delta = 0): txs = [] assert(len(bip112inputs) >= 16) i = 0 @@ -184,13 +197,13 @@ def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = for b18 in range(2): tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) i += 1 - if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed + if vary_op_csv: # if varying OP_CSV, nSequence is fixed tx.vin[0].nSequence = base_relative_locktime + locktime_delta else: # vary nSequence instead, OP_CSV is fixed tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta tx.nVersion = txversion signtx = self.sign_transaction(self.nodes[0], tx) - if (varyOP_CSV): + if vary_op_csv: signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) else: signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) @@ -200,6 +213,7 @@ def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = txs.append(b25txs) return txs + # noinspection PyShadowingNames def get_tests(self): long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index bfd9137bb2..7c1cb51960 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test recovery from a crash during chainstate writing. + +""" +Test recovery from a crash during chainstate writing. - 4 nodes * node0, node1, and node2 will have different dbcrash ratios, and different @@ -24,17 +26,17 @@ * submit block to node * if node crashed on/after submitting: - restart until recovery succeeds - - check that utxo matches node3 using gettxoutsetinfo""" + - check that utxo matches node3 using gettxoutsetinfo +""" import errno import http.client import random import time - -from test_framework.mininode import (CTxIn, COutPoint, COIN, ToHex) -from test_framework.script import (CTransaction, CTxOut) +from test_framework.mininode import CTxIn, COutPoint, COIN, to_hex +from test_framework.script import CTransaction, CTxOut from test_framework.test_framework import RavenTestFramework -from test_framework.util import (create_confirmed_utxos, hex_str_to_bytes, assert_equal) +from test_framework.util import create_confirmed_utxos, hex_str_to_bytes, assert_equal class ChainstateWriteCrashTest(RavenTestFramework): @@ -64,6 +66,7 @@ def setup_network(self): self.start_nodes() # Leave them unconnected, we'll use submitblock directly in this test + # noinspection PyMethodOverriding def restart_node(self, node_index, expected_tip): """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash. @@ -71,6 +74,7 @@ def restart_node(self, node_index, expected_tip): after 60 seconds. Returns the utxo hash of the given node.""" time_start = time.time() while time.time() - time_start < 120: + # noinspection PyBroadException try: # Any of these RPC calls could throw due to node crash self.start_node(node_index) @@ -175,10 +179,12 @@ def verify_utxo_hash(self): nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash()) assert_equal(nodei_utxo_hash, node3_utxo_hash) - def generate_small_transactions(self, node, count, utxo_list): + @staticmethod + def generate_small_transactions(node, count, utxo_list): fee = 100000000 # TODO: replace this with node relay fee based calculation num_transactions = 0 random.shuffle(utxo_list) + utxo = None while len(utxo_list) >= 2 and num_transactions < count: tx = CTransaction() input_amount = 0 @@ -193,10 +199,11 @@ def generate_small_transactions(self, node, count, utxo_list): continue for _ in range(3): - tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) + if utxo is not None: + tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) # Sign and send the transaction to get into the mempool - tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex'] + tx_signed_hex = node.signrawtransaction(to_hex(tx))['hex'] node.sendrawtransaction(tx_signed_hex) num_transactions += 1 diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py index 7e95169bd0..9f18e52b5f 100755 --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -3,25 +3,19 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test BIP66 (DER SIG). + +""" +Test BIP66 (DER SIG). Test that the DERSIG soft-fork activates at (regtest) height 1251. """ +from io import BytesIO from test_framework.test_framework import RavenTestFramework -from test_framework.util import (p2p_port, assert_equal) -from test_framework.mininode import (CTransaction, - hex_str_to_bytes, - NodeConnCB, - NodeConn, - NetworkThread, - msg_block, - wait_until, - mininode_lock, - msg_tx) -from test_framework.blocktools import (create_coinbase, create_block) +from test_framework.util import p2p_port, assert_equal, hex_str_to_bytes +from test_framework.mininode import CTransaction, NodeConnCB, NodeConn, NetworkThread, MsgBlock, wait_until, mininode_lock, MsgTx +from test_framework.blocktools import create_coinbase, create_block from test_framework.script import CScript -from io import BytesIO DERSIG_HEIGHT = 1251 @@ -32,15 +26,15 @@ # A canonical signature consists of: # <30> <02> <02> -def unDERify(tx): +def un_der_ify(tx): """ Make the signature in vin 0 of a tx non-DER-compliant, by adding padding after the S-value. """ - scriptSig = CScript(tx.vin[0].scriptSig) + script_sig = CScript(tx.vin[0].scriptSig) newscript = [] - for i in scriptSig: - if (len(newscript) == 0): + for i in script_sig: + if len(newscript) == 0: newscript.append(i[0:-1] + b'\0' + i[-1:]) else: newscript.append(i) @@ -64,8 +58,7 @@ def set_test_params(self): def run_test(self): node0 = NodeConnCB() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)] node0.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread @@ -78,9 +71,8 @@ def run_test(self): self.log.info("Test that a transaction with non-DER signature can still appear in a block") - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], - self.nodeaddress, 1.0) - unDERify(spendtx) + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 1.0) + un_der_ify(spendtx) spendtx.rehash() tip = self.nodes[0].getbestblockhash() @@ -92,7 +84,7 @@ def run_test(self): block.rehash() block.solve() - node0.send_and_ping(msg_block(block)) + node0.send_and_ping(MsgBlock(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 3") @@ -102,10 +94,10 @@ def run_test(self): block.nVersion = 2 block.rehash() block.solve() - node0.send_and_ping(msg_block(block)) + node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock) + wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock, err_msg="last_message") with mininode_lock: assert_equal(node0.last_message["reject"].code, REJECT_OBSOLETE) assert_equal(node0.last_message["reject"].reason, b'bad-version(0x00000002)') @@ -115,15 +107,14 @@ def run_test(self): self.log.info("Test that transactions with non-DER signatures cannot appear in a block") block.nVersion = 3 - spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], - self.nodeaddress, 1.0) - unDERify(spendtx) + spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) + un_der_ify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # accepted to the mempool (which we can achieve with # -promiscuousmempoolflags). - node0.send_and_ping(msg_tx(spendtx)) + node0.send_and_ping(MsgTx(spendtx)) assert spendtx.hash in self.nodes[0].getrawmempool() # Now we verify that a block with this transaction is invalid. @@ -132,10 +123,10 @@ def run_test(self): block.rehash() block.solve() - node0.send_and_ping(msg_block(block)) + node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) - wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock) + wait_until(lambda: "reject" in node0.last_message.keys(), lock=mininode_lock, err_msg="last_message") with mininode_lock: # We can receive different reject messages depending on whether # ravend is running with multiple script check threads. If script @@ -157,7 +148,7 @@ def run_test(self): block.rehash() block.solve() - node0.send_and_ping(msg_block(block)) + node0.send_and_ping(MsgBlock(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) if __name__ == '__main__': diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index 1315b57280..819fa3a75a 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -3,12 +3,13 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test fee estimation code.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (satoshi_round, Decimal, connect_nodes, random, sync_mempools, sync_blocks) -from test_framework.script import (CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE) -from test_framework.mininode import (CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN) +from test_framework.util import satoshi_round, Decimal, connect_nodes, random, sync_mempools, sync_blocks +from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE +from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint, to_hex, COIN # Construct 2 trivial P2SH's and the ScriptSigs that spend them # So we can create many transactions without needing to spend @@ -21,9 +22,10 @@ # Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2 SCRIPT_SIG = [CScript([OP_TRUE, redeem_script_1]), CScript([OP_TRUE, redeem_script_2])] +# noinspection PyGlobalUndefined global log -def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment): +def small_tx_puzzle_rand_fee(from_node, conflist, unconflist, amount, min_fee, fee_increment): """ Create and send a transaction with a random fee. The transaction pays to a trivial P2SH script, and assumes that its inputs @@ -58,11 +60,11 @@ def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee # the ScriptSig that will satisfy the ScriptPubKey. for inp in tx.vin: inp.scriptSig = SCRIPT_SIG[inp.prevout.n] - txid = from_node.sendrawtransaction(ToHex(tx), True) + txid = from_node.sendrawtransaction(to_hex(tx), True) unconflist.append({ "txid" : txid, "vout" : 0 , "amount" : total_in - amount - fee}) unconflist.append({ "txid" : txid, "vout" : 1 , "amount" : amount}) - return (ToHex(tx), fee) + return to_hex(tx), fee def split_inputs(from_node, txins, txouts, initial_split = False): """ @@ -83,11 +85,11 @@ def split_inputs(from_node, txins, txouts, initial_split = False): # If this is the initial split we actually need to sign the transaction # Otherwise we just need to insert the proper ScriptSig - if (initial_split) : - completetx = from_node.signrawtransaction(ToHex(tx))["hex"] + if initial_split: + completetx = from_node.signrawtransaction(to_hex(tx))["hex"] else : tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]] - completetx = ToHex(tx) + completetx = to_hex(tx) txid = from_node.sendrawtransaction(completetx, True) txouts.append({ "txid" : txid, "vout" : 0 , "amount" : half_change}) txouts.append({ "txid" : txid, "vout" : 1 , "amount" : rem_change}) @@ -137,7 +139,7 @@ def check_estimates(node, fees_seen, max_invalid, print_estimates = True): # Check on the expected number of different confirmation counts # that we might not have valid estimates for if invalid_estimates > max_invalid: - raise AssertionError("More than (%d) invalid estimates"%(max_invalid)) + raise AssertionError("More than (%d) invalid estimates" % max_invalid) return all_estimates @@ -166,14 +168,14 @@ def transact_and_mine(self, numblocks, mining_node): min_fee = Decimal("0.0100000") # We will now mine numblocks blocks generating on average 100 transactions between each block # We shuffle our confirmed txout set before each set of transactions - # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible + # small_tx_puzzle_rand_fee will use the transactions that have inputs already in the chain when possible # resorting to tx's that depend on the mempool when those run out for _ in range(numblocks): random.shuffle(self.confutxo) for _ in range(random.randrange(100-50,100+50)): from_index = random.randint(1,2) - (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo, - self.memutxo, Decimal("0.005"), min_fee, min_fee) + (txhex, fee) = small_tx_puzzle_rand_fee(self.nodes[from_index], self.confutxo, + self.memutxo, Decimal("0.005"), min_fee, min_fee) tx_kbytes = (len(txhex) // 2) / 1000.0 self.fees_per_kb.append(float(fee)/tx_kbytes) sync_mempools(self.nodes[0:3], wait=.1) @@ -204,22 +206,22 @@ def run_test(self): split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True) # Mine - while (len(self.nodes[0].getrawmempool()) > 0): + while len(self.nodes[0].getrawmempool()) > 0: self.nodes[0].generate(1) # Repeatedly split those 2 outputs, doubling twice for each rep # Use txouts to monitor the available utxo, since these won't be tracked in wallet reps = 0 - while (reps < 5): + while reps < 5: #Double txouts to txouts2 - while (len(self.txouts)>0): + while len(self.txouts)>0: split_inputs(self.nodes[0], self.txouts, self.txouts2) - while (len(self.nodes[0].getrawmempool()) > 0): + while len(self.nodes[0].getrawmempool()) > 0: self.nodes[0].generate(1) #Double txouts2 to txouts - while (len(self.txouts2)>0): + while len(self.txouts2)>0: split_inputs(self.nodes[0], self.txouts2, self.txouts) - while (len(self.nodes[0].getrawmempool()) > 0): + while len(self.nodes[0].getrawmempool()) > 0: self.nodes[0].generate(1) reps += 1 self.log.info("Finished splitting") diff --git a/test/functional/feature_listmyassets.py b/test/functional/feature_listmyassets.py index 59b7b8e897..0424e9c0c5 100755 --- a/test/functional/feature_listmyassets.py +++ b/test/functional/feature_listmyassets.py @@ -3,12 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test listmyassets RPC command.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_contains_pair) -from test_framework.mininode import CInv -from io import BytesIO +from test_framework.util import assert_equal, assert_contains_pair class ListMyAssetsTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/feature_loadblock.py b/test/functional/feature_loadblock.py new file mode 100755 index 0000000000..c56bb01780 --- /dev/null +++ b/test/functional/feature_loadblock.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017-2019 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" +Test loadblock option + +Test the option to start a node with the option loadblock which loads +a serialized blockchain from a file (usually called bootstrap.dat). +To generate that file this test uses the helper scripts available +in contrib/linearize. +""" + +import configparser +import os +import subprocess +import sys +import tempfile +import urllib +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, wait_until + +class LoadblockTest(RavenTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 2 + + def run_test(self): + self.nodes[1].setnetworkactive(state=False) + self.nodes[0].generate(100) + + # Parsing the url of our node to get settings for config file + data_dir = self.nodes[0].datadir + node_url = urllib.parse.urlparse(self.nodes[0].url) + cfg_file = os.path.join(data_dir, "linearize.cfg") + bootstrap_file = os.path.join(self.options.tmpdir, "bootstrap.dat") + genesis_block = self.nodes[0].getblockhash(0) + blocks_dir = os.path.join(data_dir, "regtest", "blocks") + hash_list = tempfile.NamedTemporaryFile(dir=data_dir, + mode='w', + delete=False, + encoding="utf-8") + + self.log.info("Create linearization config file") + with open(cfg_file, "a", encoding="utf-8") as cfg: + cfg.write("datadir={}\n".format(data_dir)) + cfg.write("rpcuser={}\n".format(node_url.username)) + cfg.write("rpcpassword={}\n".format(node_url.password)) + cfg.write("port={}\n".format(node_url.port)) + cfg.write("host={}\n".format(node_url.hostname)) + cfg.write("output_file={}\n".format(bootstrap_file)) + cfg.write("max_height=100\n") + cfg.write("netmagic=43524f57\n") + cfg.write("input={}\n".format(blocks_dir)) + cfg.write("genesis={}\n".format(genesis_block)) + cfg.write("hashlist={}\n".format(hash_list.name)) + + # Get the configuration file to find src and linearize + config = configparser.ConfigParser() + if not self.options.configfile: + self.options.configfile = os.path.abspath(os.path.join(os.path.dirname(__file__), "../config.ini")) + config.read_file(open(self.options.configfile)) + base_dir = config["environment"]["SRCDIR"] + linearize_dir = os.path.join(base_dir, "contrib", "linearize") + + self.log.info("Run linearization of block hashes") + linearize_hashes_file = os.path.join(linearize_dir, "linearize-hashes.py") + subprocess.run([sys.executable, linearize_hashes_file, cfg_file], + stdout=hash_list, + check=True) + + self.log.info("Run linearization of block data") + linearize_data_file = os.path.join(linearize_dir, "linearize-data.py") + subprocess.run([sys.executable, linearize_data_file, cfg_file], + check=True) + + self.log.info("Restart second, unsynced node with bootstrap file") + self.stop_node(1) + self.start_node(1, ["-loadblock=" + bootstrap_file]) + wait_until(lambda: self.nodes[1].getblockcount() == 100, err_msg="Wait for block count == 100") + + assert_equal(self.nodes[1].getblockchaininfo()['blocks'], 100) + assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) + + +if __name__ == '__main__': + LoadblockTest().main() diff --git a/test/functional/feature_maxreorgdepth.py b/test/functional/feature_maxreorgdepth.py index 7299f272a2..f1dc45aa99 100755 --- a/test/functional/feature_maxreorgdepth.py +++ b/test/functional/feature_maxreorgdepth.py @@ -3,17 +3,15 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Max Reorg Test + +""" +Max Reorg Test """ + import sys import time from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_all_nodes_bi, - set_node_times, - assert_equal, - connect_nodes_bi, - assert_contains_pair, - assert_does_not_contain_key) +from test_framework.util import connect_all_nodes_bi, set_node_times, assert_equal, connect_nodes_bi, assert_contains_pair, assert_does_not_contain_key from test_framework.mininode import wait_until @@ -30,12 +28,9 @@ def set_test_params(self): # self.extra_args = [[f"-maxreorg={self.max_reorg_depth}", f"-minreorgpeers={self.min_reorg_peers}", f"-minreorgage={self.min_reorg_age}"] for i in range(self.num_nodes)] def add_options(self, parser): - parser.add_option("--height", dest="height", default=65, - help="The height of good branch when adversary surprises.") - parser.add_option("--tip_age", dest="tip_age", default=60*5, - help="Age of tip of non-adversaries at time of reorg.") - parser.add_option("--should_reorg", dest="should_reorg", default=0, - help="Whether a reorg is expected (0 or 1).") + parser.add_option("--height", dest="height", default=65, help="The height of good branch when adversary surprises.") + parser.add_option("--tip_age", dest="tip_age", default=60*5, help="Age of tip of non-adversaries at time of reorg.") + parser.add_option("--should_reorg", dest="should_reorg", default=0, help="Whether a reorg is expected (0 or 1).") def setup_network(self): @@ -117,8 +112,9 @@ def reorg_test(self): else: self.log.info(f"Didn't expect a reorg -- blockcount should remain {expected_height} and both subject and adversary should own {asset_name} (waiting 5 seconds)...") + # noinspection PyBroadException try: - wait_until(lambda: [n.getblockcount() for n in self.nodes] == [expected_height] * peers, timeout=5) + wait_until(lambda: [n.getblockcount() for n in self.nodes] == [expected_height] * peers, timeout=5, err_msg="getblockcount") except: pass self.log.info("BlockCount: " +str([n.getblockcount() for n in self.nodes])) diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py index 1b42c93260..5864d94df2 100755 --- a/test/functional/feature_maxuploadtarget.py +++ b/test/functional/feature_maxuploadtarget.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test behavior of -maxuploadtarget. + +""" +Test behavior of -maxuploadtarget. * Verify that getdata requests for old blocks (>1week) are dropped if uploadtarget has been reached. @@ -11,13 +13,12 @@ if uploadtarget has been reached. * Verify that the upload counters are reset after 24 hours. """ + from collections import defaultdict import time - -from pprint import re -from test_framework.mininode import (NodeConn, NodeConnCB, NetworkThread, msg_getdata, CInv) +from test_framework.mininode import NodeConn, NodeConnCB, NetworkThread, MsgGetdata, CInv from test_framework.test_framework import RavenTestFramework -from test_framework.util import (p2p_port, mine_large_block, assert_equal) +from test_framework.util import p2p_port, mine_large_block, assert_equal class TestNode(NodeConnCB): @@ -29,8 +30,8 @@ def on_inv(self, conn, message): pass def on_block(self, conn, message): - message.block.calc_sha256() - self.block_receive_map[message.block.sha256] += 1 + message.block.calc_x16r() + self.block_receive_map[message.block.x16r] += 1 class MaxUploadTest(RavenTestFramework): @@ -91,7 +92,7 @@ def run_test(self): # test_nodes[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) - getdata_request = msg_getdata() + getdata_request = MsgGetdata() getdata_request.inv.append(CInv(2, big_old_block)) block_rate_minutes = 1 diff --git a/test/functional/feature_messaging.py b/test/functional/feature_messaging.py index dc08d3c712..de3417055f 100755 --- a/test/functional/feature_messaging.py +++ b/test/functional/feature_messaging.py @@ -3,15 +3,13 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Testing messaging + +""" +Testing messaging """ from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, - assert_raises_rpc_error, - assert_contains, - assert_does_not_contain, - assert_contains_pair) +from test_framework.util import assert_equal, assert_raises_rpc_error, assert_contains, assert_does_not_contain, assert_contains_pair class MessagingTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py index c7f4ffff6e..6aaa4fe7bb 100755 --- a/test/functional/feature_minchainwork.py +++ b/test/functional/feature_minchainwork.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test logic for setting nMinimumChainWork on command line. + +""" +Test logic for setting nMinimumChainWork on command line. Nodes don't consider themselves out of "initial block download" until their active chain has more work than nMinimumChainWork. @@ -17,9 +19,8 @@ """ import time - from test_framework.test_framework import RavenTestFramework -from test_framework.util import (sync_blocks, connect_nodes, assert_equal) +from test_framework.util import connect_nodes, assert_equal # 2 hashes required per regtest block (with no difficulty adjustment) REGTEST_WORK_PER_BLOCK = 2 diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index 4bbb725058..dd3e5a5393 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -3,11 +3,14 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the -alertnotify, -blocknotify and -walletnotify options.""" -import os +""" +Test the -alertnotify, -blocknotify and -walletnotify options. +""" + +import os from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, wait_until, connect_nodes_bi) +from test_framework.util import assert_equal, wait_until, connect_nodes_bi class NotificationsTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py index bae71fc1d4..8da1b78911 100755 --- a/test/functional/feature_nulldummy.py +++ b/test/functional/feature_nulldummy.py @@ -2,8 +2,10 @@ # Copyright (c) 2016 The Bitcoin Core developers # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test NULLDUMMY softfork. +# file COPYING or http://www.opensource.org/licenses/mit-mit-license.php. + +""" +Test NULLDUMMY softfork. Connect to a single node. Generate 2 blocks (save the coinbases for later). @@ -14,25 +16,26 @@ [Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. """ -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (bytes_to_hex_str, assert_raises_rpc_error, hex_str_to_bytes, assert_equal) -from test_framework.mininode import (CTransaction, NetworkThread) -from test_framework.blocktools import (create_coinbase, create_block, add_witness_commitment) -from test_framework.script import CScript from io import BytesIO import time +from test_framework.test_framework import RavenTestFramework +from test_framework.util import bytes_to_hex_str, assert_raises_rpc_error, hex_str_to_bytes, assert_equal +from test_framework.mininode import CTransaction, NetworkThread +from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment +from test_framework.script import CScript NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" -def trueDummy(tx): - scriptSig = CScript(tx.vin[0].scriptSig) +def true_dummy(tx): + script_sig = CScript(tx.vin[0].scriptSig) newscript = [] - for i in scriptSig: - if (len(newscript) == 0): + for i in script_sig: + if len(newscript) == 0: assert(len(i) == 0) newscript.append(b'\x51') else: newscript.append(i) + # noinspection PyPep8Naming tx.vin[0].scriptSig = CScript(newscript) tx.rehash() @@ -71,7 +74,7 @@ def run_test(self): self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47) - trueDummy(test2tx) + true_dummy(test2tx) assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True) self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") @@ -80,7 +83,7 @@ def run_test(self): self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation") test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46) test6txs=[CTransaction(test4tx)] - trueDummy(test4tx) + true_dummy(test4tx) assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True) self.block_submit(self.nodes[0], [test4tx]) @@ -97,7 +100,8 @@ def run_test(self): self.block_submit(self.nodes[0], test6txs, True, True) - def create_transaction(self, node, txid, to_address, amount): + @staticmethod + def create_transaction(node, txid, to_address, amount): inputs = [{ "txid" : txid, "vout" : 0}] outputs = { to_address : amount } rawtx = node.createrawtransaction(inputs, outputs) @@ -119,7 +123,7 @@ def block_submit(self, node, txs, witness = False, accept = False): block.rehash() block.solve() node.submitblock(bytes_to_hex_str(block.serialize(True))) - if (accept): + if accept: assert_equal(node.getbestblockhash(), block.hash) self.tip = block.sha256 self.lastblockhash = block.hash diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index 53afb83aaa..48d61cd875 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test ravend with different proxy configuration. + +""" +Test ravend with different proxy configuration. Test plan: - Start ravend's with different proxy configurations @@ -30,10 +32,9 @@ import socket import os - -from test_framework.socks5 import (Socks5Configuration, Socks5Command, Socks5Server, AddressType) +from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType from test_framework.test_framework import RavenTestFramework -from test_framework.util import (PORT_MIN, PORT_RANGE, assert_equal) +from test_framework.util import PORT_MIN, PORT_RANGE, assert_equal from test_framework.netutil import test_ipv6_local RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports @@ -76,13 +77,13 @@ def setup_nodes(self): # Note: proxies are not used to connect to local nodes # this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost args = [ - ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'], - ['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'], - ['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'], + ['-listen', '-proxy=%s:%i' % self.conf1.addr, '-proxyrandomize=1'], + ['-listen', '-proxy=%s:%i' % self.conf1.addr, '-onion=%s:%i' % self.conf2.addr, '-proxyrandomize=0'], + ['-listen', '-proxy=%s:%i' % self.conf2.addr, '-proxyrandomize=1'], [] ] if self.have_ipv6: - args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion'] + args[3] = ['-listen', '-proxy=[%s]:%i' % self.conf3.addr, '-proxyrandomize=0', '-noonion'] self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() @@ -169,28 +170,28 @@ def networks_dict(d): # test RPC getnetworkinfo n0 = networks_dict(self.nodes[0].getnetworkinfo()) for net in ['ipv4','ipv6','onion']: - assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr)) + assert_equal(n0[net]['proxy'], '%s:%i' % self.conf1.addr) assert_equal(n0[net]['proxy_randomize_credentials'], True) assert_equal(n0['onion']['reachable'], True) n1 = networks_dict(self.nodes[1].getnetworkinfo()) for net in ['ipv4','ipv6']: - assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr)) + assert_equal(n1[net]['proxy'], '%s:%i' % self.conf1.addr) assert_equal(n1[net]['proxy_randomize_credentials'], False) - assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr)) + assert_equal(n1['onion']['proxy'], '%s:%i' % self.conf2.addr) assert_equal(n1['onion']['proxy_randomize_credentials'], False) assert_equal(n1['onion']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) for net in ['ipv4','ipv6','onion']: - assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr)) + assert_equal(n2[net]['proxy'], '%s:%i' % self.conf2.addr) assert_equal(n2[net]['proxy_randomize_credentials'], True) assert_equal(n2['onion']['reachable'], True) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) for net in ['ipv4','ipv6']: - assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr)) + assert_equal(n3[net]['proxy'], '[%s]:%i' % self.conf3.addr) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 1cd2907280..e4434a7e9b 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -3,22 +3,19 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the pruning code. + +""" +Test the pruning code. WARNING: This test uses 4GB of disk space. This test takes 30 mins or more (up to 2 hours) """ -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes, - sync_blocks, - mine_large_block, - assert_equal, - assert_raises_rpc_error, - assert_greater_than) import time import os +from test_framework.test_framework import RavenTestFramework +from test_framework.util import connect_nodes, sync_blocks, mine_large_block, assert_equal, assert_raises_rpc_error, assert_greater_than MIN_BLOCKS_TO_KEEP = 288 @@ -85,19 +82,19 @@ def test_height_min(self): for _ in range(25): mine_large_block(self.nodes[0], self.utxo_cache_0) - waitstart = time.time() + wait_start = time.time() while os.path.isfile(self.prunedir+"blk00000.dat"): time.sleep(0.1) - if time.time() - waitstart > 30: + if time.time() - wait_start > 30: raise AssertionError("blk00000.dat not pruned when it should be") self.log.info("Success") usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: %d" % usage) - if (usage > 550): + if usage > 550: raise AssertionError("Pruning target not being met") - def create_chain_with_staleblocks(self): + def create_chain_with_stale_blocks(self): # Create stale blocks in manageable sized chunks self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") @@ -115,7 +112,7 @@ def create_chain_with_staleblocks(self): # Add node1's wallet transactions back to the mempool, to # avoid the mined blocks from being too small. self.nodes[1].resendwallettransactions() - self.nodes[1].generate(1) #tx's already in mempool from previous disconnects + self.nodes[1].generate(1) # tx's already in mempool from previous disconnects # Reorg back with 25 block chain from node 0 for _ in range(25): @@ -139,20 +136,20 @@ def reorg_test(self): height = self.nodes[1].getblockcount() self.log.info("Current block height: %d" % height) - invalidheight = height-287 - badhash = self.nodes[1].getblockhash(invalidheight) - self.log.info("Invalidating block %s at height %d" % (badhash,invalidheight)) - self.nodes[1].invalidateblock(badhash) + invalid_height = height-287 + bad_hash = self.nodes[1].getblockhash(invalid_height) + self.log.info("Invalidating block %s at height %d" % (bad_hash,invalid_height)) + self.nodes[1].invalidateblock(bad_hash) # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want # So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago) - mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) - curhash = self.nodes[1].getblockhash(invalidheight - 1) - while curhash != mainchainhash: - self.nodes[1].invalidateblock(curhash) - curhash = self.nodes[1].getblockhash(invalidheight - 1) + mainchainhash = self.nodes[0].getblockhash(invalid_height - 1) + current_hash = self.nodes[1].getblockhash(invalid_height - 1) + while current_hash != mainchainhash: + self.nodes[1].invalidateblock(current_hash) + current_hash = self.nodes[1].getblockhash(invalid_height - 1) - assert(self.nodes[1].getblockcount() == invalidheight - 1) + assert(self.nodes[1].getblockcount() == invalid_height - 1) self.log.info("New best height: %d" % self.nodes[1].getblockcount()) # Reboot node1 to clear those giant tx's from mempool @@ -184,15 +181,15 @@ def reorg_test(self): usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: %d" % usage) - if (usage > 550): + if usage > 550: raise AssertionError("Pruning target not being met") - return invalidheight,badhash + return invalid_height,bad_hash def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) - self.log.info("Will need to redownload block %d" % self.forkheight) + self.log.info("Will need to re-download block %d" % self.forkheight) # Verify that we have enough history to reorg back to the fork point # Although this is more than 288 blocks, because this chain was written more recently @@ -201,14 +198,14 @@ def reorg_back(self): self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() - curchainhash = self.nodes[2].getblockhash(self.mainchainheight) - self.nodes[2].invalidateblock(curchainhash) - goalbestheight = self.mainchainheight - goalbesthash = self.mainchainhash2 + current_chain_hash = self.nodes[2].getblockhash(self.mainchainheight) + self.nodes[2].invalidateblock(current_chain_hash) + goal_best_height = self.mainchainheight + goal_best_hash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to the original chain created in # create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to - # redownload its missing blocks. + # re-download its missing blocks. # Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain # because it has all the block data. # However it must mine enough blocks to have a more work chain than the reorg_test chain in order @@ -216,20 +213,20 @@ def reorg_back(self): # At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight - self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) - self.nodes[0].invalidateblock(curchainhash) + self.log.info("Rewind node 0 to prev main chain to mine longer chain to trigger re-download. Blocks needed: %d" % blocks_to_mine) + self.nodes[0].invalidateblock(current_chain_hash) assert(self.nodes[0].getblockcount() == self.mainchainheight) assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) - goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] - goalbestheight = first_reorg_height + 1 + goal_best_hash = self.nodes[0].generate(blocks_to_mine)[-1] + goal_best_height = first_reorg_height + 1 - self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") - waitstart = time.time() - while self.nodes[2].getblockcount() < goalbestheight: + self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to re-download") + wait_start = time.time() + while self.nodes[2].getblockcount() < goal_best_height: time.sleep(0.1) - if time.time() - waitstart > 900: + if time.time() - wait_start > 900: raise AssertionError("Node 2 didn't reorg to proper height") - assert(self.nodes[2].getbestblockhash() == goalbesthash) + assert(self.nodes[2].getbestblockhash() == goal_best_hash) # Verify we can now have the data for a block previously pruned assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) @@ -369,7 +366,7 @@ def run_test(self): # N0=N1=N2 **...*(1020) self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate") - self.create_chain_with_staleblocks() + self.create_chain_with_stale_blocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) @@ -425,11 +422,11 @@ def run_test(self): # \ # *...**(1320) - self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg") + self.log.info("Test that we can re-request a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to - # original main chain (*), but will require redownload of some blocks + # original main chain (*), but will require re-download of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: diff --git a/test/functional/feature_raw_restricted_assets.py b/test/functional/feature_raw_restricted_assets.py index bf0725dda2..427409eac0 100755 --- a/test/functional/feature_raw_restricted_assets.py +++ b/test/functional/feature_raw_restricted_assets.py @@ -3,12 +3,12 @@ # Copyright (c) 2017-2018 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test restricted asset related RPC commands.""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import * import math -from pprint import pprint +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal BURN_ADDRESSES = { 'issue_restricted': 'n1issueRestrictedXXXXXXXXXXXXZVT9V', @@ -32,9 +32,7 @@ def truncate(number, digits = 8): stepper = pow(10.0, digits) return math.trunc(stepper * number) / stepper -def get_tx_issue_hex(node, to_address, asset_name, \ - asset_quantity=1000, verifier_string="true", units=0, reissuable=1, has_ipfs=0, \ - ipfs_hash="", owner_change_address=""): +def get_tx_issue_hex(node, to_address, asset_name, asset_quantity=1000, verifier_string="true", units=0, reissuable=1, has_ipfs=0, ipfs_hash="", owner_change_address=""): change_address = node.getnewaddress() rvn_unspent = next(u for u in node.listunspent() if u['amount'] > BURN_AMOUNTS['issue_restricted']) @@ -68,8 +66,7 @@ def get_tx_issue_hex(node, to_address, asset_name, \ tx_issue_hex = tx_issue_signed['hex'] return tx_issue_hex -def get_tx_reissue_hex(node, to_address, asset_name, asset_quantity, \ - reissuable=1, verifier_string="", ipfs_hash="", owner_change_address=""): +def get_tx_reissue_hex(node, to_address, asset_name, asset_quantity, reissuable=1, verifier_string="", ipfs_hash="", owner_change_address=""): change_address = node.getnewaddress() rvn_unspent = next(u for u in node.listunspent() if u['amount'] > BURN_AMOUNTS['reissue_restricted']) @@ -102,8 +99,7 @@ def get_tx_reissue_hex(node, to_address, asset_name, asset_quantity, \ tx_issue_hex = tx_issue_signed['hex'] return tx_issue_hex -def get_tx_issue_qualifier_hex(node, to_address, asset_name, \ - asset_quantity=1, has_ipfs=0, ipfs_hash="", root_change_address="", change_qty=1): +def get_tx_issue_qualifier_hex(node, to_address, asset_name, asset_quantity=1, has_ipfs=0, ipfs_hash="", root_change_address="", change_qty=1): change_address = node.getnewaddress() is_sub_qualifier = len(asset_name.split('/')) > 1 @@ -174,8 +170,7 @@ def get_tx_transfer_hex(node, to_address, asset_name, asset_quantity): tx_transfer_hex = tx_transfer_signed['hex'] return tx_transfer_hex -def get_tx_tag_address_hex(node, op, qualifier_name, tag_addresses, qualifier_change_address, \ - change_qty=1): +def get_tx_tag_address_hex(node, op, qualifier_name, tag_addresses, qualifier_change_address, change_qty=1): change_address = node.getnewaddress() burn_amount = truncate(float(BURN_AMOUNTS['tag_address'] * len(tag_addresses))) @@ -289,9 +284,8 @@ def issue_restricted_test(self): n0.issue(base_asset_name) n0.generate(1) - hex = get_tx_issue_hex(n0, to_address, asset_name, qty, verifier, \ - units, reissuable, has_ipfs, ipfs_hash, owner_change_address) - txid = n0.sendrawtransaction(hex) + hex_data = get_tx_issue_hex(n0, to_address, asset_name, qty, verifier, units, reissuable, has_ipfs, ipfs_hash, owner_change_address) + txid = n0.sendrawtransaction(hex_data) n0.generate(1) #verify @@ -335,9 +329,8 @@ def reissue_restricted_test(self): n0.addtagtoaddress(qualifier, to_address) n0.generate(1) - hex = get_tx_reissue_hex(n0, to_address, asset_name, reissue_qty, reissuable, reissue_verifier, ipfs_hash, \ - owner_change_address) - txid = n0.sendrawtransaction(hex) + hex_data = get_tx_reissue_hex(n0, to_address, asset_name, reissue_qty, reissuable, reissue_verifier, ipfs_hash, owner_change_address) + txid = n0.sendrawtransaction(hex_data) n0.generate(1) #verify @@ -362,8 +355,8 @@ def issue_qualifier_test(self): ipfs_hash = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8" #### ROOT QUALIFIER - hex = get_tx_issue_qualifier_hex(n0, to_address, asset_name, qty, has_ipfs, ipfs_hash) - txid = n0.sendrawtransaction(hex) + hex_data = get_tx_issue_qualifier_hex(n0, to_address, asset_name, qty, has_ipfs, ipfs_hash) + txid = n0.sendrawtransaction(hex_data) n0.generate(1) #verify @@ -384,8 +377,7 @@ def issue_qualifier_test(self): root_change_address = n0.getnewaddress() #### SUB-QUALIFIER - sub_hex = get_tx_issue_qualifier_hex(n0, sub_to_address, sub_asset_name, sub_qty, sub_has_ipfs, sub_ipfs_hash, \ - root_change_address, qty) + sub_hex = get_tx_issue_qualifier_hex(n0, sub_to_address, sub_asset_name, sub_qty, sub_has_ipfs, sub_ipfs_hash, root_change_address, qty) sub_txid = n0.sendrawtransaction(sub_hex) n0.generate(1) @@ -408,14 +400,14 @@ def transfer_qualifier_test(self): qty = 5 n0_address = n0.getnewaddress() - hex = get_tx_issue_qualifier_hex(n0, n0_address, asset_name, qty) - txid = n0.sendrawtransaction(hex) + hex_data = get_tx_issue_qualifier_hex(n0, n0_address, asset_name, qty) + n0.sendrawtransaction(hex_data) n0.generate(1) n1_address = n1.getnewaddress() xfer_qty = 2 - hex = get_tx_transfer_hex(n0, n1_address, asset_name, xfer_qty) - txid = n0.sendrawtransaction(hex) + hex_data = get_tx_transfer_hex(n0, n1_address, asset_name, xfer_qty) + n0.sendrawtransaction(hex_data) n0.generate(1) self.sync_all() diff --git a/test/functional/feature_rawassettransactions.py b/test/functional/feature_rawassettransactions.py index d72b747ee7..8d75da42d9 100755 --- a/test/functional/feature_rawassettransactions.py +++ b/test/functional/feature_rawassettransactions.py @@ -3,33 +3,24 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the rawtransaction RPCs for asset transactions. + +""" +Test the rawtransaction RPCs for asset transactions. """ + +import math from io import BytesIO from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, - assert_raises_rpc_error, - assert_is_hash_string, - assert_does_not_contain_key, - assert_contains_key, - assert_contains_pair) -from test_framework.mininode import (CTransaction, - hex_str_to_bytes, - bytes_to_hex_str, - CScriptReissue, - CScriptOwner, - CScriptTransfer, - CTxOut, - CScriptIssue) -import math - +from test_framework.util import assert_equal, assert_raises_rpc_error, assert_is_hash_string, assert_does_not_contain_key, assert_contains_key, assert_contains_pair +from test_framework.mininode import CTransaction, hex_str_to_bytes, bytes_to_hex_str, CScriptReissue, CScriptOwner, CScriptTransfer, CTxOut, CScriptIssue def truncate(number, digits=8): stepper = pow(10.0, digits) return math.trunc(stepper * number) / stepper -def get_first_unspent(self: object, node: object, needed: object = 500.1) -> object: +# noinspection PyTypeChecker,PyUnboundLocalVariable,PyUnresolvedReferences +def get_first_unspent(self: object, node: object, needed: float = 500.1) -> object: # Find the first unspent with enough required for transaction for n in range(0, len(node.listunspent())): unspent = node.listunspent()[n] @@ -64,6 +55,7 @@ def get_tx_issue_hex(self, node, asset_name, asset_quantity, asset_units=0): return tx_issue_hex +# noinspection PyTypeChecker class RawAssetTransactionsTest(RavenTestFramework): def set_test_params(self): self.setup_clean_chain = True @@ -1332,7 +1324,7 @@ def issue_sub_multiple_outputs_test(self): assert_equal(1, n0.listmyassets()[owner]) def transfer_asset_tampering_test(self): - self.log.info("Testing trasnfer of asset transaction tampering...") + self.log.info("Testing transfer of asset transaction tampering...") n0, n1 = self.nodes[0], self.nodes[1] ######################################## diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index f5b8dd28b4..e622caf370 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -3,19 +3,20 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the RBF code.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (satoshi_round, assert_raises_rpc_error, assert_equal, Decimal) +from test_framework.util import satoshi_round, assert_raises_rpc_error, assert_equal, Decimal from test_framework.script import CScript -from test_framework.mininode import (bytes_to_hex_str, COIN, CTransaction, CTxIn, COutPoint, CTxOut) +from test_framework.mininode import bytes_to_hex_str, COIN, CTransaction, CTxIn, COutPoint, CTxOut MAX_REPLACEMENT_LIMIT = 100 -def txToHex(tx): +def tx_to_hex(tx): return bytes_to_hex_str(tx.serialize()) -def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])): +def make_utxo(node, amount, confirmed=True, script_pub_key=CScript([1])): """Create a txout with a given amount and scriptPubKey Mines coins as needed. @@ -40,10 +41,10 @@ def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])): tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(txid, i))] - tx2.vout = [CTxOut(amount, scriptPubKey)] + tx2.vout = [CTxOut(amount, script_pub_key)] tx2.rehash() - signed_tx = node.signrawtransaction(txToHex(tx2)) + signed_tx = node.signrawtransaction(tx_to_hex(tx2)) txid = node.sendrawtransaction(signed_tx['hex'], True) @@ -91,7 +92,7 @@ def run_test(self): self.test_doublespend_tree() self.log.info("Running test replacement feeperkb...") - self.test_replacement_feeperkb() + self.test_replacement_fee_per_kb() self.log.info("Running test spends of conflicting outputs...") self.test_spends_of_conflicting_outputs() @@ -123,18 +124,18 @@ def test_simple_doublespend(self): self.sync_all() tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx_to_hex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) self.sync_all() # Should fail because we haven't changed the fee tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)] tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx_to_hex(tx1b) # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) @@ -143,9 +144,9 @@ def test_simple_doublespend(self): # Extra 0.1 RVN fee tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)] tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx_to_hex(tx1b) # Replacement still disabled even with "enough fee" assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True) # Works when enabled @@ -166,18 +167,18 @@ def test_simple_doublespend(self): def test_doublespend_chain(self): """Doublespend of a long chain""" - initial_nValue = 5000*COIN - tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) + initial_n_value = 5000*COIN + tx0_outpoint = make_utxo(self.nodes[0], initial_n_value) prevout = tx0_outpoint - remaining_value = initial_nValue + remaining_value = initial_n_value chain_txids = [] while remaining_value > 1000*COIN: remaining_value -= 100*COIN tx = CTransaction() - tx.vin = [CTxIn(prevout, nSequence=0)] + tx.vin = [CTxIn(prevout, n_sequence=0)] tx.vout = [CTxOut(remaining_value, CScript([1]))] - tx_hex = txToHex(tx) + tx_hex = tx_to_hex(tx) txid = self.nodes[0].sendrawtransaction(tx_hex, True) chain_txids.append(txid) prevout = COutPoint(int(txid, 16), 0) @@ -185,18 +186,18 @@ def test_doublespend_chain(self): # Whether the double-spend is allowed is evaluated by including all # child fees - 40 RVN - so this attempt is rejected. dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)] + dbl_tx.vout = [CTxOut(initial_n_value - 30*COIN, CScript([1]))] + dbl_tx_hex = tx_to_hex(dbl_tx) # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) # Accepted with sufficient fee dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] + dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)] dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx_hex = tx_to_hex(dbl_tx) self.nodes[0].sendrawtransaction(dbl_tx_hex, True) mempool = self.nodes[0].getrawmempool() @@ -206,58 +207,58 @@ def test_doublespend_chain(self): def test_doublespend_tree(self): """Doublespend of a big tree of transactions""" - initial_nValue = 50*COIN - tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) + initial_n_value = 50*COIN + tx0_outpoint = make_utxo(self.nodes[0], initial_n_value) - def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None): + def branch(prevout, initial_value, max_txs, tree_width=5, fee_val=0.0001 * COIN, _total_txs=None): if _total_txs is None: _total_txs = [0] if _total_txs[0] >= max_txs: return - txout_value = (initial_value - fee) // tree_width - if txout_value < fee: + txout_value = (initial_value - fee_val) // tree_width + if txout_value < fee_val: return vout = [CTxOut(txout_value, CScript([i+1])) for i in range(tree_width)] - tx = CTransaction() - tx.vin = [CTxIn(prevout, nSequence=0)] - tx.vout = vout - tx_hex = txToHex(tx) + tx_data = CTransaction() + tx_data.vin = [CTxIn(prevout, n_sequence=0)] + tx_data.vout = vout + tx_hex = tx_to_hex(tx_data) - assert(len(tx.serialize()) < 100000) + assert(len(tx_data.serialize()) < 100000) txid = self.nodes[0].sendrawtransaction(tx_hex, True) - yield tx + yield tx_data _total_txs[0] += 1 txid = int(txid, 16) - for i, _ in enumerate(tx.vout): + for i, _ in enumerate(tx_data.vout): for x in branch(COutPoint(txid, i), txout_value, - max_txs, - tree_width=tree_width, fee=fee, - _total_txs=_total_txs): + max_txs, + tree_width=tree_width, fee_val=fee_val, + _total_txs=_total_txs): yield x fee = int(0.0001*COIN) n = MAX_REPLACEMENT_LIMIT - tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) + tree_txs = list(branch(tx0_outpoint, initial_n_value, n, fee_val=fee)) assert_equal(len(tree_txs), n) # Attempt double-spend, will fail because too little fee paid dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)] + dbl_tx.vout = [CTxOut(initial_n_value - fee*n, CScript([1]))] + dbl_tx_hex = tx_to_hex(dbl_tx) # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) # 1 RVN fee is enough dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)] + dbl_tx.vout = [CTxOut(initial_n_value - fee*n - 1*COIN, CScript([1]))] + dbl_tx_hex = tx_to_hex(dbl_tx) self.nodes[0].sendrawtransaction(dbl_tx_hex, True) mempool = self.nodes[0].getrawmempool() @@ -270,14 +271,14 @@ def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _tota # double-spent at once" anti-DoS limit. for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2): fee = int(0.0001*COIN) - tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) - tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) + tx0_outpoint = make_utxo(self.nodes[0], initial_n_value) + tree_txs = list(branch(tx0_outpoint, initial_n_value, n, fee_val=fee)) assert_equal(len(tree_txs), n) dbl_tx = CTransaction() - dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] - dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx.vin = [CTxIn(tx0_outpoint, n_sequence=0)] + dbl_tx.vout = [CTxOut(initial_n_value - 2*fee*n, CScript([1]))] + dbl_tx_hex = tx_to_hex(dbl_tx) # This will raise an exception assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True) @@ -285,22 +286,22 @@ def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _tota tx.rehash() self.nodes[0].getrawtransaction(tx.hash) - def test_replacement_feeperkb(self): + def test_replacement_fee_per_kb(self): """Replacement requires fee-per-KB to be higher""" tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx_to_hex(tx1a) self.nodes[0].sendrawtransaction(tx1a_hex, True) # Higher fee, but the fee per KB is much lower, so the replacement is # rejected. tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)] tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx_to_hex(tx1b) # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) @@ -311,36 +312,36 @@ def test_spends_of_conflicting_outputs(self): utxo2 = make_utxo(self.nodes[0], 3*COIN) tx1a = CTransaction() - tx1a.vin = [CTxIn(utxo1, nSequence=0)] + tx1a.vin = [CTxIn(utxo1, n_sequence=0)] tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx_to_hex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) tx1a_txid = int(tx1a_txid, 16) # Direct spend an output of the transaction we're replacing. tx2 = CTransaction() - tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] - tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) + tx2.vin = [CTxIn(utxo1, n_sequence=0), CTxIn(utxo2, n_sequence=0)] + tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0)) tx2.vout = tx1a.vout - tx2_hex = txToHex(tx2) + tx2_hex = tx_to_hex(tx2) # This will raise an exception assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) # Spend tx1a's output to test the indirect case. tx1b = CTransaction() - tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] + tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0)] tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx_to_hex(tx1b) tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) tx1b_txid = int(tx1b_txid, 16) tx2 = CTransaction() - tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), + tx2.vin = [CTxIn(utxo1, n_sequence=0), CTxIn(utxo2, n_sequence=0), CTxIn(COutPoint(tx1b_txid, 0))] tx2.vout = tx1a.vout - tx2_hex = txToHex(tx2) + tx2_hex = tx_to_hex(tx2) # This will raise an exception assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True) @@ -353,13 +354,13 @@ def test_new_unconfirmed_inputs(self): tx1 = CTransaction() tx1.vin = [CTxIn(confirmed_utxo)] tx1.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1_hex = txToHex(tx1) + tx1_hex = tx_to_hex(tx1) self.nodes[0].sendrawtransaction(tx1_hex, True) tx2 = CTransaction() tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] tx2.vout = tx1.vout - tx2_hex = txToHex(tx2) + tx2_hex = tx_to_hex(tx2) # This will raise an exception assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True) @@ -370,19 +371,19 @@ def test_too_many_replacements(self): # transactions # Start by creating a single transaction with many outputs - initial_nValue = 10*COIN - utxo = make_utxo(self.nodes[0], initial_nValue) + initial_n_value = 10 * COIN + utxo = make_utxo(self.nodes[0], initial_n_value) fee = int(0.0001*COIN) - split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) + split_value = int((initial_n_value - fee) / (MAX_REPLACEMENT_LIMIT + 1)) outputs = [] for i in range(MAX_REPLACEMENT_LIMIT+1): outputs.append(CTxOut(split_value, CScript([1]))) splitting_tx = CTransaction() - splitting_tx.vin = [CTxIn(utxo, nSequence=0)] + splitting_tx.vin = [CTxIn(utxo, n_sequence=0)] splitting_tx.vout = outputs - splitting_tx_hex = txToHex(splitting_tx) + splitting_tx_hex = tx_to_hex(splitting_tx) txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True) txid = int(txid, 16) @@ -390,9 +391,9 @@ def test_too_many_replacements(self): # Now spend each of those outputs individually for i in range(MAX_REPLACEMENT_LIMIT+1): tx_i = CTransaction() - tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] + tx_i.vin = [CTxIn(COutPoint(txid, i), n_sequence=0)] tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))] - tx_i_hex = txToHex(tx_i) + tx_i_hex = tx_to_hex(tx_i) self.nodes[0].sendrawtransaction(tx_i_hex, True) # Now create doublespend of the whole lot; should fail. @@ -401,11 +402,11 @@ def test_too_many_replacements(self): double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1) inputs = [] for i in range(MAX_REPLACEMENT_LIMIT+1): - inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) + inputs.append(CTxIn(COutPoint(txid, i), n_sequence=0)) double_tx = CTransaction() double_tx.vin = inputs double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] - double_tx_hex = txToHex(double_tx) + double_tx_hex = tx_to_hex(double_tx) # This will raise an exception assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True) @@ -414,7 +415,7 @@ def test_too_many_replacements(self): double_tx = CTransaction() double_tx.vin = inputs[0:-1] double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] - double_tx_hex = txToHex(double_tx) + double_tx_hex = tx_to_hex(double_tx) self.nodes[0].sendrawtransaction(double_tx_hex, True) def test_opt_in(self): @@ -423,16 +424,16 @@ def test_opt_in(self): # Create a non-opting in transaction tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] + tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0xffffffff)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx_to_hex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Shouldn't be able to double-spend tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)] tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx_to_hex(tx1b) # This will raise an exception assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True) @@ -441,16 +442,16 @@ def test_opt_in(self): # Create a different non-opting in transaction tx2a = CTransaction() - tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] + tx2a.vin = [CTxIn(tx1_outpoint, n_sequence=0xfffffffe)] tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx2a_hex = txToHex(tx2a) + tx2a_hex = tx_to_hex(tx2a) tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) # Still shouldn't be able to double-spend tx2b = CTransaction() - tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] + tx2b.vin = [CTxIn(tx1_outpoint, n_sequence=0)] tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] - tx2b_hex = txToHex(tx2b) + tx2b_hex = tx_to_hex(tx2b) # This will raise an exception assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True) @@ -463,22 +464,22 @@ def test_opt_in(self): tx2a_txid = int(tx2a_txid, 16) tx3a = CTransaction() - tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), - CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] + tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0xffffffff), + CTxIn(COutPoint(tx2a_txid, 0), n_sequence=0xfffffffd)] tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))] - tx3a_hex = txToHex(tx3a) + tx3a_hex = tx_to_hex(tx3a) self.nodes[0].sendrawtransaction(tx3a_hex, True) tx3b = CTransaction() - tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] + tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), n_sequence=0)] tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))] - tx3b_hex = txToHex(tx3b) + tx3b_hex = tx_to_hex(tx3b) tx3c = CTransaction() - tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] + tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), n_sequence=0)] tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))] - tx3c_hex = txToHex(tx3c) + tx3c_hex = tx_to_hex(tx3c) self.nodes[0].sendrawtransaction(tx3b_hex, True) # If tx3b was accepted, tx3c won't look like a replacement, @@ -493,16 +494,16 @@ def test_prioritised_transactions(self): tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a.vin = [CTxIn(tx0_outpoint, n_sequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx_to_hex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Higher fee, but the actual fee per KB is much lower. tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b.vin = [CTxIn(tx0_outpoint, n_sequence=0)] tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx_to_hex(tx1b) # Verify tx1b cannot replace tx1a. assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True) @@ -519,17 +520,17 @@ def test_prioritised_transactions(self): tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx2a = CTransaction() - tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] + tx2a.vin = [CTxIn(tx1_outpoint, n_sequence=0)] tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] - tx2a_hex = txToHex(tx2a) + tx2a_hex = tx_to_hex(tx2a) self.nodes[0].sendrawtransaction(tx2a_hex, True) # Lower fee, but we'll prioritise it tx2b = CTransaction() - tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] + tx2b.vin = [CTxIn(tx1_outpoint, n_sequence=0)] tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))] tx2b.rehash() - tx2b_hex = txToHex(tx2b) + tx2b_hex = tx_to_hex(tx2b) # Verify tx2b cannot replace tx2a. assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True) @@ -554,11 +555,11 @@ def test_rpc(self): assert_equal(json1["vin"][0]["sequence"], 4294967295) rawtx2 = self.nodes[0].createrawtransaction([], outs) - frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True}) - frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False}) + f_raw_tx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True}) + f_raw_tx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False}) - json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex']) - json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex']) + json0 = self.nodes[0].decoderawtransaction(f_raw_tx2a['hex']) + json1 = self.nodes[0].decoderawtransaction(f_raw_tx2b['hex']) assert_equal(json0["vin"][0]["sequence"], 4294967293) assert_equal(json1["vin"][0]["sequence"], 4294967294) diff --git a/test/functional/feature_reindex.py b/test/functional/feature_reindex.py index 2d02d7f788..c50370e760 100755 --- a/test/functional/feature_reindex.py +++ b/test/functional/feature_reindex.py @@ -3,16 +3,18 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test running ravend with -reindex and -reindex-chainstate options. + +""" +Test running ravend with -reindex and -reindex-chainstate options. - Start a single node and generate 3 blocks. -- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3. -- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3. +- Stop the node and restart it with -reindex. Verify that the node has re-indexed up to block 3. +- Stop the node and restart it with -reindex-chainstate. Verify that the node has re-indexed up to block 3. """ +import time from test_framework.test_framework import RavenTestFramework from test_framework.util import assert_equal -import time class ReindexTest(RavenTestFramework): diff --git a/test/functional/feature_restricted_assets.py b/test/functional/feature_restricted_assets.py index 9d3c26ae4d..3e4cc5fd60 100755 --- a/test/functional/feature_restricted_assets.py +++ b/test/functional/feature_restricted_assets.py @@ -3,11 +3,13 @@ # Copyright (c) 2017-2018 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test restricted asset related RPC commands.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import * +from test_framework.util import assert_equal, assert_raises_rpc_error, assert_does_not_contain_key, assert_does_not_contain, assert_contains_key, assert_happening, assert_contains +# noinspection PyAttributeOutsideInit class RestrictedAssetsTest(RavenTestFramework): def set_test_params(self): self.setup_clean_chain = True @@ -42,26 +44,20 @@ def issuerestrictedasset(self): assert_raises_rpc_error(None, "Arguments:", n0.issuerestrictedasset, asset_name, qty, verifier) # valid params - assert_raises_rpc_error(None, "Invalid asset name", - n0.issuerestrictedasset, "$!N\/AL!D", qty, verifier, to_address) - assert_raises_rpc_error(None, "Verifier string can not be empty", - n0.issuerestrictedasset, asset_name, qty, "", to_address) - assert_raises_rpc_error(None, "bad-txns-null-verifier-failed-syntax-check", - n0.issuerestrictedasset, asset_name, qty, "false && true", to_address) - assert_raises_rpc_error(None, "bad-txns-null-verifier-contains-non-issued-qualifier", - n0.issuerestrictedasset, asset_name, qty, "#NONEXIZTENT", to_address) - assert_raises_rpc_error(None, "Invalid Raven address", - n0.issuerestrictedasset, asset_name, qty, verifier, "garbageaddress") + assert_raises_rpc_error(None, "Invalid asset name", n0.issuerestrictedasset, "$!N\/AL!D", qty, verifier, to_address) + assert_raises_rpc_error(None, "Verifier string can not be empty", n0.issuerestrictedasset, asset_name, qty, "", to_address) + assert_raises_rpc_error(None, "bad-txns-null-verifier-failed-syntax-check", n0.issuerestrictedasset, asset_name, qty, "false && true", to_address) + assert_raises_rpc_error(None, "bad-txns-null-verifier-contains-non-issued-qualifier", n0.issuerestrictedasset, asset_name, qty, "#NONEXIZTENT", to_address) + assert_raises_rpc_error(None, "Invalid Raven address", n0.issuerestrictedasset, asset_name, qty, verifier, "garbageaddress") # base asset required - assert_raises_rpc_error(-32600, f"Wallet doesn't have asset: {base_asset_name}!", - n0.issuerestrictedasset, asset_name, qty, verifier, to_address) + assert_raises_rpc_error(-32600, f"Wallet doesn't have asset: {base_asset_name}!", n0.issuerestrictedasset, asset_name, qty, verifier, to_address) n0.issue(base_asset_name) # issue txid = n0.issuerestrictedasset(asset_name, qty, verifier, to_address) - #verify + # verify assert_equal(64, len(txid[0])) assert_equal(qty, n0.listmyassets(asset_name, True)[asset_name]['balance']) n0.generate(1) @@ -89,18 +85,14 @@ def issuerestrictedasset_full(self): n0.issue(base_asset_name) # valid params - assert_raises_rpc_error(None, "Invalid Raven address", - n0.issuerestrictedasset, asset_name, qty, verifier, to_address, "garbagechangeaddress") - assert_raises_rpc_error(None, "Units must be between 0 and 8", - n0.issuerestrictedasset, asset_name, qty, verifier, to_address, change_address, 9) - assert_raises_rpc_error(None, "Units must be between 0 and 8", - n0.issuerestrictedasset, asset_name, qty, verifier, to_address, change_address, -1) - - #issue - txid = n0.issuerestrictedasset(asset_name, qty, verifier, to_address, change_address, units, reissuable, - has_ipfs, ipfs_hash) - - #verify + assert_raises_rpc_error(None, "Invalid Raven address", n0.issuerestrictedasset, asset_name, qty, verifier, to_address, "garbagechangeaddress") + assert_raises_rpc_error(None, "Units must be between 0 and 8", n0.issuerestrictedasset, asset_name, qty, verifier, to_address, change_address, 9) + assert_raises_rpc_error(None, "Units must be between 0 and 8", n0.issuerestrictedasset, asset_name, qty, verifier, to_address, change_address, -1) + + # issue + txid = n0.issuerestrictedasset(asset_name, qty, verifier, to_address, change_address, units, reissuable, has_ipfs, ipfs_hash) + + # verify assert_equal(64, len(txid[0])) assert_equal(qty, n0.listmyassets(asset_name, True)[asset_name]['balance']) n0.generate(1) @@ -150,25 +142,15 @@ def reissuerestrictedasset_full(self): n0.generate(1) # valid params - assert_raises_rpc_error(None, "Invalid asset name", - n0.reissuerestrictedasset, "$!N\/AL!D", qty, to_address) - assert_raises_rpc_error(None, "Wallet doesn't have asset", - n0.reissuerestrictedasset, foreign_asset_name, qty, to_address) - assert_raises_rpc_error(None, "Invalid Raven address", - n0.reissuerestrictedasset, asset_name, qty, "garbageaddress") - assert_raises_rpc_error(None, "Invalid Raven address", - n0.reissuerestrictedasset, asset_name, qty, to_address, - change_verifier, verifier, "garbagechangeaddress") - assert_raises_rpc_error(None, "Units must be between -1 and 8", - n0.reissuerestrictedasset, asset_name, qty, to_address, - change_verifier, verifier, change_address, 9) - assert_raises_rpc_error(None, "Units must be between -1 and 8", - n0.reissuerestrictedasset, asset_name, qty, to_address, - change_verifier, verifier, change_address, -2) + assert_raises_rpc_error(None, "Invalid asset name", n0.reissuerestrictedasset, "$!N\/AL!D", qty, to_address) + assert_raises_rpc_error(None, "Wallet doesn't have asset", n0.reissuerestrictedasset, foreign_asset_name, qty, to_address) + assert_raises_rpc_error(None, "Invalid Raven address", n0.reissuerestrictedasset, asset_name, qty, "garbageaddress") + assert_raises_rpc_error(None, "Invalid Raven address", n0.reissuerestrictedasset, asset_name, qty, to_address, change_verifier, verifier, "garbagechangeaddress") + assert_raises_rpc_error(None, "Units must be between -1 and 8", n0.reissuerestrictedasset, asset_name, qty, to_address, change_verifier, verifier, change_address, 9) + assert_raises_rpc_error(None, "Units must be between -1 and 8", n0.reissuerestrictedasset, asset_name, qty, to_address, change_verifier, verifier, change_address, -2) # reissue - txid = n0.reissuerestrictedasset(asset_name, qty, to_address, - change_verifier, verifier, change_address, units, reissuable, ipfs_hash) + txid = n0.reissuerestrictedasset(asset_name, qty, to_address, change_verifier, verifier, change_address, units, reissuable, ipfs_hash) # verify assert_equal(64, len(txid[0])) @@ -204,7 +186,7 @@ def issuequalifierasset(self): assert_equal(False, asset_data['reissuable']) assert_equal(False, asset_data['has_ipfs']) - def issuequalifierasset_full(self): + def issue_qualifier_asset_full(self): self.log.info("Testing issuequalifierasset() with all params...") n0 = self.nodes[0] @@ -216,13 +198,9 @@ def issuequalifierasset_full(self): ipfs_hash = "QmacSRmrkVmvJfbCpmU6pK72furJ8E8fbKHindrLxmYMQo" assert_raises_rpc_error(None, "Amount must be between 1 and 10", n0.issuequalifierasset, asset_name, 0) - assert_raises_rpc_error(None, "Invalid Raven address", n0.issuequalifierasset, asset_name, qty, - "garbageaddress") - assert_raises_rpc_error(None, "Invalid Raven address", n0.issuequalifierasset, asset_name, qty, to_address, - "gargabechangeaddress") - assert_raises_rpc_error(None, "ipfs_hash must be 46 characters", n0.issuequalifierasset, asset_name, qty, - to_address, change_address, True) - + assert_raises_rpc_error(None, "Invalid Raven address", n0.issuequalifierasset, asset_name, qty, "garbageaddress") + assert_raises_rpc_error(None, "Invalid Raven address", n0.issuequalifierasset, asset_name, qty, to_address, "gargabechangeaddress") + assert_raises_rpc_error(None, "ipfs_hash must be 46 characters", n0.issuequalifierasset, asset_name, qty, to_address, change_address, True) # issue txid = n0.issuequalifierasset(asset_name, qty, to_address, change_address, has_ipfs, ipfs_hash) @@ -252,16 +230,13 @@ def transferqualifier(self): n0.generate(1) self.sync_all() - assert_equal(1, n0.listmyassets(asset_name, True)[asset_name]['balance']) + assert_equal(1, n0.listmyassets(asset_name, True)[asset_name]['balance']) assert_does_not_contain_key(asset_name, n1.listmyassets()) - assert_raises_rpc_error(None, "Only use this rpc call to send Qualifier assets", n0.transferqualifier, - nonqualifier_asset_name, 1, n1_address) + assert_raises_rpc_error(None, "Only use this rpc call to send Qualifier assets", n0.transferqualifier, nonqualifier_asset_name, 1, n1_address) assert_raises_rpc_error(None, "Invalid Raven address", n0.transferqualifier, asset_name, 1, "garbageaddress") - assert_raises_rpc_error(None, "Invalid Raven address", n0.transferqualifier, asset_name, 1, n1_address, - "garbagechangeaddress") - assert_raises_rpc_error(None, "Invalid IPFS hash", n0.transferqualifier, asset_name, 1, n1_address, - n0_change_address, "garbagemessage") + assert_raises_rpc_error(None, "Invalid Raven address", n0.transferqualifier, asset_name, 1, n1_address, "garbagechangeaddress") + assert_raises_rpc_error(None, "Invalid IPFS hash", n0.transferqualifier, asset_name, 1, n1_address, n0_change_address, "garbagemessage") # transfer txid = n0.transferqualifier(asset_name, 1, n1_address, n0_change_address, message) @@ -290,12 +265,12 @@ def tagging(self): verifier = tag issue_address = n0.getnewaddress() n0.issue(base_asset_name) - assert_raises_rpc_error(-8, "bad-txns-null-verifier-address-failed-verification", n0.issuerestrictedasset, - asset_name, qty, verifier, issue_address) + assert_raises_rpc_error(-8, "bad-txns-null-verifier-address-failed-verification", n0.issuerestrictedasset, asset_name, qty, verifier, issue_address) # Isolate this test from other tagging on n0... def viewmytaggedaddresses(): return list(filter(lambda x: tag == x['Tag Name'], n0.viewmytaggedaddresses())) + assert_equal(0, len(viewmytaggedaddresses())) n0.addtagtoaddress(tag, issue_address, change_address) @@ -317,8 +292,7 @@ def viewmytaggedaddresses(): assert_contains_key('Assigned', t1) assert_happening(t1['Assigned']) - assert_raises_rpc_error(-8, "bad-txns-null-verifier-address-failed-verification", n0.transfer, - asset_name, 100, address) + assert_raises_rpc_error(-8, "bad-txns-null-verifier-address-failed-verification", n0.transfer, asset_name, 100, address) # special case: make sure transfer fails if change address(es) are verified even though to address isn't rvn_change_address = n0.getnewaddress() @@ -326,23 +300,20 @@ def viewmytaggedaddresses(): n0.addtagtoaddress(tag, rvn_change_address) n0.addtagtoaddress(tag, asset_change_address) n0.generate(1) - assert_raises_rpc_error(-8, "bad-txns-null-verifier-address-failed-verification", n0.transfer, - asset_name, 100, address, "", 0, rvn_change_address, asset_change_address) + assert_raises_rpc_error(-8, "bad-txns-null-verifier-address-failed-verification", n0.transfer, asset_name, 100, address, "", 0, rvn_change_address, asset_change_address) n0.removetagfromaddress(tag, rvn_change_address) n0.removetagfromaddress(tag, asset_change_address) n0.generate(1) ## assert_raises_rpc_error(None, "Invalid Raven address", n0.addtagtoaddress, tag, "garbageaddress") - assert_raises_rpc_error(None, "Invalid Raven change address", n0.addtagtoaddress, tag, address, - "garbagechangeaddress") + assert_raises_rpc_error(None, "Invalid Raven change address", n0.addtagtoaddress, tag, address, "garbagechangeaddress") n0.addtagtoaddress(tag, address, change_address) n0.addtagtoaddress(tag, address, change_address) # redundant tagging ok if consistent n0.generate(1) - assert_raises_rpc_error(-32600, "add-qualifier-when-already-assigned", n0.addtagtoaddress, - tag, address, change_address) + assert_raises_rpc_error(-32600, "add-qualifier-when-already-assigned", n0.addtagtoaddress, tag, address, change_address) # post-tagging verification assert_contains(address, n0.listaddressesfortag(tag)) @@ -351,12 +322,12 @@ def viewmytaggedaddresses(): # viewmytaggedaddresses tagged = viewmytaggedaddresses() - assert_equal(4, len(tagged)) # includes removed... + assert_equal(4, len(tagged)) assert_contains(issue_address, list(map(lambda x: x['Address'], tagged))) assert_contains(address, list(map(lambda x: x['Address'], tagged))) for t in tagged: assert_equal(tag, t['Tag Name']) - if ('Assigned' in t): + if 'Assigned' in t: assert_happening(t['Assigned']) else: assert_happening(t['Removed']) @@ -364,15 +335,12 @@ def viewmytaggedaddresses(): # special case: make sure transfer fails if the asset change address isn't verified (even if the rvn change address is) rvn_change_address = n0.getnewaddress() asset_change_address = n0.getnewaddress() - assert_raises_rpc_error(-20, "bad-txns-null-verifier-address-failed-verification", n0.transfer, - asset_name, 100, address, "", 0, rvn_change_address, asset_change_address) + assert_raises_rpc_error(-20, "bad-txns-null-verifier-address-failed-verification", n0.transfer, asset_name, 100, address, "", 0, rvn_change_address, asset_change_address) n0.addtagtoaddress(tag, rvn_change_address) n0.generate(1) - assert_raises_rpc_error(-20, "bad-txns-null-verifier-address-failed-verification", n0.transfer, - asset_name, 100, address, "", 0, rvn_change_address, asset_change_address) + assert_raises_rpc_error(-20, "bad-txns-null-verifier-address-failed-verification", n0.transfer, asset_name, 100, address, "", 0, rvn_change_address, asset_change_address) n0.removetagfromaddress(tag, rvn_change_address) n0.generate(1) - ## # do the transfer already! txid = n0.transfer(asset_name, 100, address) @@ -387,18 +355,16 @@ def viewmytaggedaddresses(): txid = n0.transfer(asset_name, 1, issue_address, "", 0, "", asset_change_address) n0.generate(1) assert_equal(64, len(txid[0])) - assert(n0.listassetbalancesbyaddress(asset_change_address)[asset_name] > 0) + assert (n0.listassetbalancesbyaddress(asset_change_address)[asset_name] > 0) assert_raises_rpc_error(None, "Invalid Raven address", n0.removetagfromaddress, tag, "garbageaddress") - assert_raises_rpc_error(None, "Invalid Raven change address", n0.removetagfromaddress, tag, address, - "garbagechangeaddress") + assert_raises_rpc_error(None, "Invalid Raven change address", n0.removetagfromaddress, tag, address, "garbagechangeaddress") n0.removetagfromaddress(tag, address, change_address) - n0.removetagfromaddress(tag, address, change_address) # redundant untagging ok if consistent + n0.removetagfromaddress(tag, address, change_address) # redundant untagging ok if consistent n0.generate(1) - assert_raises_rpc_error(-32600, "removing-qualifier-when-not-assigned", n0.removetagfromaddress, - tag, address, change_address) + assert_raises_rpc_error(-32600, "removing-qualifier-when-not-assigned", n0.removetagfromaddress, tag, address, change_address) # TODO: test without specifying change address when there are no valid change addresses (all untagged) @@ -409,7 +375,7 @@ def viewmytaggedaddresses(): # viewmytaggedaddresses tagged = viewmytaggedaddresses() - assert_equal(6, len(tagged)) # includes removed + assert_equal(6, len(tagged)) # includes removed assert_contains(issue_address, list(map(lambda x: x['Address'], tagged))) assert_contains(address, list(map(lambda x: x['Address'], tagged))) for t in tagged: @@ -419,8 +385,7 @@ def viewmytaggedaddresses(): if address == t['Address']: assert_happening(t['Removed']) - assert_raises_rpc_error(-8, "bad-txns-null-verifier-address-failed-verification", n0.transfer, - asset_name, 100, address) + assert_raises_rpc_error(-8, "bad-txns-null-verifier-address-failed-verification", n0.transfer, asset_name, 100, address) def freezing(self): self.log.info("Testing freezing...") @@ -464,18 +429,16 @@ def viewmyrestrictedaddresses(): self.sync_all() assert_equal(9000, n0.listassetbalancesbyaddress(change_address)[asset_name]) assert_equal(1000, n1.listmyassets()[asset_name]) - address = change_address # assets have moved + address = change_address # assets have moved assert_raises_rpc_error(None, "Invalid Raven address", n0.freezeaddress, asset_name, "garbageaddress") - assert_raises_rpc_error(None, "Invalid Raven change address", n0.freezeaddress, asset_name, address, - "garbagechangeaddress") + assert_raises_rpc_error(None, "Invalid Raven change address", n0.freezeaddress, asset_name, address, "garbagechangeaddress") n0.freezeaddress(asset_name, address, rvn_change_address) - n0.freezeaddress(asset_name, address, rvn_change_address) # redundant freezing ok if consistent + n0.freezeaddress(asset_name, address, rvn_change_address) # redundant freezing ok if consistent n0.generate(1) - assert_raises_rpc_error(-32600, "freeze-address-when-already-frozen", n0.freezeaddress, - asset_name, address, rvn_change_address) + assert_raises_rpc_error(-32600, "freeze-address-when-already-frozen", n0.freezeaddress, asset_name, address, rvn_change_address) # post-freezing verification assert_contains(asset_name, n0.listaddressrestrictions(address)) @@ -489,19 +452,16 @@ def viewmyrestrictedaddresses(): assert_equal(asset_name, r['Asset Name']) assert_happening(r['Restricted']) - assert_raises_rpc_error(-8, "No asset outpoints are selected from the given address", n0.transferfromaddress, - asset_name, address, 1000, n1.getnewaddress()) + assert_raises_rpc_error(-8, "No asset outpoints are selected from the given address", n0.transferfromaddress, asset_name, address, 1000, n1.getnewaddress()) assert_raises_rpc_error(None, "Invalid Raven address", n0.unfreezeaddress, asset_name, "garbageaddress") - assert_raises_rpc_error(None, "Invalid Raven change address", n0.unfreezeaddress, asset_name, address, - "garbagechangeaddress") + assert_raises_rpc_error(None, "Invalid Raven change address", n0.unfreezeaddress, asset_name, address, "garbagechangeaddress") n0.unfreezeaddress(asset_name, address, rvn_change_address) - n0.unfreezeaddress(asset_name, address, rvn_change_address) # redundant unfreezing ok if consistent + n0.unfreezeaddress(asset_name, address, rvn_change_address) # redundant unfreezing ok if consistent n0.generate(1) - assert_raises_rpc_error(-32600, "unfreeze-address-when-not-frozen", n0.unfreezeaddress, - asset_name, address, rvn_change_address) + assert_raises_rpc_error(-32600, "unfreeze-address-when-not-frozen", n0.unfreezeaddress, asset_name, address, rvn_change_address) # post-unfreezing verification assert_does_not_contain(asset_name, n0.listaddressrestrictions(address)) @@ -522,7 +482,6 @@ def viewmyrestrictedaddresses(): self.sync_all() assert_equal(8000, n0.listassetbalancesbyaddress(change_address)[asset_name]) assert_equal(2000, n1.listmyassets()[asset_name]) - address = change_address # assets have moved def global_freezing(self): self.log.info("Testing global freezing...") @@ -551,33 +510,28 @@ def global_freezing(self): self.sync_all() assert_equal(9000, n0.listassetbalancesbyaddress(change_address)[asset_name]) assert_equal(1000, n1.listmyassets()[asset_name]) - address = change_address # assets have moved + address = change_address # assets have moved - assert_raises_rpc_error(None, "Invalid Raven change address", n0.freezerestrictedasset, asset_name, - "garbagechangeaddress") + assert_raises_rpc_error(None, "Invalid Raven change address", n0.freezerestrictedasset, asset_name, "garbagechangeaddress") n0.freezerestrictedasset(asset_name, rvn_change_address) - n0.freezerestrictedasset(asset_name, rvn_change_address) # redundant freezing ok if consistent + n0.freezerestrictedasset(asset_name, rvn_change_address) # redundant freezing ok if consistent n0.generate(1) - assert_raises_rpc_error(None, "global-freeze-when-already-frozen", n0.freezerestrictedasset, - asset_name, rvn_change_address) + assert_raises_rpc_error(None, "global-freeze-when-already-frozen", n0.freezerestrictedasset, asset_name, rvn_change_address) # post-freeze validation assert_contains(asset_name, n0.listglobalrestrictions()) assert n0.checkglobalrestriction(asset_name) - assert_raises_rpc_error(-8, "restricted asset has been globally frozen", n0.transferfromaddress, - asset_name, address, 1000, n1.getnewaddress()) + assert_raises_rpc_error(-8, "restricted asset has been globally frozen", n0.transferfromaddress, asset_name, address, 1000, n1.getnewaddress()) - assert_raises_rpc_error(None, "Invalid Raven change address", n0.unfreezerestrictedasset, asset_name, - "garbagechangeaddress") + assert_raises_rpc_error(None, "Invalid Raven change address", n0.unfreezerestrictedasset, asset_name, "garbagechangeaddress") n0.unfreezerestrictedasset(asset_name, rvn_change_address) - n0.unfreezerestrictedasset(asset_name, rvn_change_address) # redundant unfreezing ok if consistent + n0.unfreezerestrictedasset(asset_name, rvn_change_address) # redundant unfreezing ok if consistent n0.generate(1) - assert_raises_rpc_error(None, "global-unfreeze-when-not-frozen", n0.unfreezerestrictedasset, - asset_name, rvn_change_address) + assert_raises_rpc_error(None, "global-unfreeze-when-not-frozen", n0.unfreezerestrictedasset, asset_name, rvn_change_address) # post-unfreeze validation assert_does_not_contain(asset_name, n0.listglobalrestrictions()) @@ -589,8 +543,6 @@ def global_freezing(self): self.sync_all() assert_equal(8000, n0.listassetbalancesbyaddress(change_address)[asset_name]) assert_equal(2000, n1.listmyassets()[asset_name]) - address = change_address # assets have moved - def isvalidverifierstring(self): self.log.info("Testing isvalidverifierstring()...") @@ -614,8 +566,7 @@ def isvalidverifierstring(self): " " ] for s in invalid_empty: - assert_raises_rpc_error(-8, "Verifier string can not be empty", - n0.isvalidverifierstring, s) + assert_raises_rpc_error(-8, "Verifier string can not be empty", n0.isvalidverifierstring, s) invalid_syntax = [ "asdf", @@ -624,12 +575,9 @@ def isvalidverifierstring(self): for s in invalid_syntax: assert_raises_rpc_error(-8, "failed-syntax", n0.isvalidverifierstring, s) - invalid_non_issued = [ - "#NOPE" - ] + invalid_non_issued = ["#NOPE"] for s in invalid_non_issued: - assert_raises_rpc_error(-8, "contains-non-issued-qualifier", - n0.isvalidverifierstring, s) + assert_raises_rpc_error(-8, "contains-non-issued-qualifier", n0.isvalidverifierstring, s) def run_test(self): self.activate_restricted_assets() @@ -638,12 +586,13 @@ def run_test(self): self.issuerestrictedasset_full() self.reissuerestrictedasset_full() self.issuequalifierasset() - self.issuequalifierasset_full() + self.issue_qualifier_asset_full() self.transferqualifier() self.tagging() self.freezing() self.global_freezing() self.isvalidverifierstring() + if __name__ == '__main__': - RestrictedAssetsTest().main() \ No newline at end of file + RestrictedAssetsTest().main() diff --git a/test/functional/feature_rewards.py b/test/functional/feature_rewards.py index 76c91ad9c1..49bb5ed598 100755 --- a/test/functional/feature_rewards.py +++ b/test/functional/feature_rewards.py @@ -3,20 +3,19 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Testing rewards use cases - -""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import * +"""Testing rewards use cases""" -import string +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, assert_raises_rpc_error, assert_contains, Decimal +# noinspection PyAttributeOutsideInit class RewardsTest(RavenTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 4 - self.extra_args = [["-assetindex", "-debug=rewards"], ["-assetindex", "-minrewardheight=15"], ["-assetindex"], ["-assetindex"]] + self.extra_args = [["-assetindex", "-debug=rewards"], ["-assetindex", "-minrewardheight=15"], ["-assetindex"], + ["-assetindex"]] def activate_assets(self): self.log.info("Generating RVN for node[0] and activating assets...") @@ -28,130 +27,129 @@ def activate_assets(self): self.sync_all() assert_equal("active", n0.getblockchaininfo()["bip9_softforks"]["assets"]["status"]) - ## Basic functionality test - RVN reward - ## - create the main owner address - ## - mine blocks to have enouugh RVN for the reward payments, plus purchasing the asset - ## - issue the STOCK1 asset to the owner - ## - create 5 shareholder addresses - ## - distribute different amounts of the STOCK1 asset to each of the shareholder addresses - ## - mine some blocks - ## - retrieve the current chain height - ## - distribute an RVN reward amongst the shareholders - ## - verify that each one receives the expected amount of reward RVN + # Basic functionality test - RVN reward + # - create the main owner address + # - mine blocks to have enough RVN for the reward payments, plus purchasing the asset + # - issue the STOCK1 asset to the owner + # - create 5 shareholder addresses + # - distribute different amounts of the STOCK1 asset to each of the shareholder addresses + # - mine some blocks + # - retrieve the current chain height + # - distribute an RVN reward amongst the shareholders + # - verify that each one receives the expected amount of reward RVN def basic_test_rvn(self): self.log.info("Running basic RVN reward test!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() - # self.log.info(f"Owner address: {ownerAddr0}") + owner_addr0 = n0.getnewaddress() + # self.log.info(f"Owner address: {owner_addr0}") self.log.info("Creating distributor address") - distAddr0 = n0.getnewaddress() - # self.log.info(f"Distributor address: {distAddr0}") + dist_addr0 = n0.getnewaddress() + # self.log.info(f"Distributor address: {dist_addr0}") self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK1 asset") - n0.issue(asset_name="STOCK1", qty=10000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK1", qty=10000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Checking listassetbalancesbyaddress()...") - assert_equal(n0.listassetbalancesbyaddress(ownerAddr0)["STOCK1"], 10000) + assert_equal(n0.listassetbalancesbyaddress(owner_addr0)["STOCK1"], 10000) self.log.info("Transferring all assets to a single address for tracking") - n0.transfer(asset_name="STOCK1", qty=10000, to_address=distAddr0) + n0.transfer(asset_name="STOCK1", qty=10000, to_address=dist_addr0) n0.generate(10) self.sync_all() - assert_equal(n0.listassetbalancesbyaddress(distAddr0)["STOCK1"], 10000) + assert_equal(n0.listassetbalancesbyaddress(dist_addr0)["STOCK1"], 10000) self.log.info("Creating shareholder addresses") - shareholderAddr0 = n0.getnewaddress() - shareholderAddr1 = n1.getnewaddress() - shareholderAddr2 = n2.getnewaddress() - shareholderAddr3 = n1.getnewaddress() - shareholderAddr4 = n0.getnewaddress() + shareholder_addr0 = n0.getnewaddress() + shareholder_addr1 = n1.getnewaddress() + shareholder_addr2 = n2.getnewaddress() + shareholder_addr3 = n1.getnewaddress() + shareholder_addr4 = n0.getnewaddress() self.log.info("Distributing shares") - n0.transfer(asset_name="STOCK1", qty=200, to_address=shareholderAddr0, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - n0.transfer(asset_name="STOCK1", qty=300, to_address=shareholderAddr1, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - n0.transfer(asset_name="STOCK1", qty=400, to_address=shareholderAddr2, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - n0.transfer(asset_name="STOCK1", qty=500, to_address=shareholderAddr3, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - n0.transfer(asset_name="STOCK1", qty=600, to_address=shareholderAddr4, message="", expire_time=0, change_address="", asset_change_address=distAddr0) + n0.transfer(asset_name="STOCK1", qty=200, to_address=shareholder_addr0, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + n0.transfer(asset_name="STOCK1", qty=300, to_address=shareholder_addr1, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + n0.transfer(asset_name="STOCK1", qty=400, to_address=shareholder_addr2, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + n0.transfer(asset_name="STOCK1", qty=500, to_address=shareholder_addr3, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + n0.transfer(asset_name="STOCK1", qty=600, to_address=shareholder_addr4, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) n0.generate(10) self.sync_all() self.log.info("Verifying share distribution") - ##ownerDetails = n0.listmyassets("STOCK1", True) - ##self.log.info(f"Owner: {ownerDetails}") - ##distDetails = n0.listassetbalancesbyaddress(distAddr0) - ##self.log.info(f"Change: {distDetails}") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)["STOCK1"], 200) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["STOCK1"], 300) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["STOCK1"], 400) - assert_equal(n2.listassetbalancesbyaddress(shareholderAddr3)["STOCK1"], 500) - assert_equal(n2.listassetbalancesbyaddress(shareholderAddr4)["STOCK1"], 600) - assert_equal(n0.listassetbalancesbyaddress(distAddr0)["STOCK1"], 8000) + # ownerDetails = n0.listmyassets("STOCK1", True) + # self.log.info(f"Owner: {ownerDetails}") + # distDetails = n0.listassetbalancesbyaddress(dist_addr0) + # self.log.info(f"Change: {distDetails}") + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)["STOCK1"], 200) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["STOCK1"], 300) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["STOCK1"], 400) + assert_equal(n2.listassetbalancesbyaddress(shareholder_addr3)["STOCK1"], 500) + assert_equal(n2.listassetbalancesbyaddress(shareholder_addr4)["STOCK1"], 600) + assert_equal(n0.listassetbalancesbyaddress(dist_addr0)["STOCK1"], 8000) self.log.info("Mining blocks") n0.generate(200) self.sync_all() self.log.info("Providing additional funding") - self.nodes[0].sendtoaddress(ownerAddr0, 2000) + self.nodes[0].sendtoaddress(owner_addr0, 2000) n0.generate(100) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 100 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 100 self.log.info("Requesting snapshot of STOCK1 ownership in 100 blocks") - n0.requestsnapshot(asset_name="STOCK1", block_height=tgtBlockHeight) + n0.requestsnapshot(asset_name="STOCK1", block_height=tgt_block_height) n0.generate(61) self.log.info("Retrieving snapshot request") - snapShotReq = n0.getsnapshotrequest(asset_name="STOCK1", block_height=tgtBlockHeight) - assert_equal(snapShotReq["asset_name"], "STOCK1") - assert_equal(snapShotReq["block_height"], tgtBlockHeight) + snap_shot_req = n0.getsnapshotrequest(asset_name="STOCK1", block_height=tgt_block_height) + assert_equal(snap_shot_req["asset_name"], "STOCK1") + assert_equal(snap_shot_req["block_height"], tgt_block_height) self.log.info("Skipping forward to allow snapshot to process") n0.generate(100) self.sync_all() self.log.info("Retrieving snapshot for ownership validation") - snapShot = n0.getsnapshot(asset_name="STOCK1", block_height=tgtBlockHeight) - assert_equal(snapShot["name"], "STOCK1") - assert_equal(snapShot["height"], tgtBlockHeight) + snap_shot = n0.getsnapshot(asset_name="STOCK1", block_height=tgt_block_height) + assert_equal(snap_shot["name"], "STOCK1") + assert_equal(snap_shot["height"], tgt_block_height) owner0 = False owner1 = False owner2 = False owner3 = False owner4 = False owner5 = False - for ownerAddr in snapShot["owners"]: - ##self.log.info(f"Found owner {ownerAddr}") - if ownerAddr["address"] == shareholderAddr0: + for ownerAddr in snap_shot["owners"]: + # self.log.info(f"Found owner {ownerAddr}") + if ownerAddr["address"] == shareholder_addr0: assert_equal(ownerAddr["amount_owned"], 200) owner0 = True - elif ownerAddr["address"] == shareholderAddr1: + elif ownerAddr["address"] == shareholder_addr1: assert_equal(ownerAddr["amount_owned"], 300) owner1 = True - elif ownerAddr["address"] == shareholderAddr2: + elif ownerAddr["address"] == shareholder_addr2: assert_equal(ownerAddr["amount_owned"], 400) owner2 = True - elif ownerAddr["address"] == shareholderAddr3: + elif ownerAddr["address"] == shareholder_addr3: assert_equal(ownerAddr["amount_owned"], 500) owner3 = True - elif ownerAddr["address"] == shareholderAddr4: + elif ownerAddr["address"] == shareholder_addr4: assert_equal(ownerAddr["amount_owned"], 600) owner4 = True - elif ownerAddr["address"] == distAddr0: + elif ownerAddr["address"] == dist_addr0: assert_equal(ownerAddr["amount_owned"], 8000) owner5 = True assert_equal(owner0, True) @@ -161,147 +159,146 @@ def basic_test_rvn(self): assert_equal(owner4, True) assert_equal(owner5, True) - ## listassetbalancesbyaddress only lists the most recently delivered amount - ## for the address, which I believe is a bug, since there can only be one - ## key in the result object with the asset name. - ##self.log.info("Moving shares after snapshot") - ##n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholderAddr0, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholderAddr1, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholderAddr2, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholderAddr3, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholderAddr4, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.generate(100) - ##self.sync_all() - - ##self.log.info("Verifying share distribution after snapshot") - ##assert_equal(n2.listassetbalancesbyaddress(shareholderAddr0)["STOCK1"], 300) - ##assert_equal(n2.listassetbalancesbyaddress(shareholderAddr1)["STOCK1"], 400) - ##assert_equal(n1.listassetbalancesbyaddress(shareholderAddr2)["STOCK1"], 500) - ##assert_equal(n0.listassetbalancesbyaddress(shareholderAddr3)["STOCK1"], 600) - ##assert_equal(n0.listassetbalancesbyaddress(shareholderAddr4)["STOCK1"], 700) + # listassetbalancesbyaddress only lists the most recently delivered amount + # for the address, which I believe is a bug, since there can only be one + # key in the result object with the asset name. + # self.log.info("Moving shares after snapshot") + # n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholder_addr0, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholder_addr1, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholder_addr2, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholder_addr3, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.transfer(asset_name="STOCK1", qty=100, to_address=shareholder_addr4, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.generate(100) + # self.sync_all() + + # self.log.info("Verifying share distribution after snapshot") + # assert_equal(n2.listassetbalancesbyaddress(shareholder_addr0)["STOCK1"], 300) + # assert_equal(n2.listassetbalancesbyaddress(shareholder_addr1)["STOCK1"], 400) + # assert_equal(n1.listassetbalancesbyaddress(shareholder_addr2)["STOCK1"], 500) + # assert_equal(n0.listassetbalancesbyaddress(shareholder_addr3)["STOCK1"], 600) + # assert_equal(n0.listassetbalancesbyaddress(shareholder_addr4)["STOCK1"], 700) self.log.info("Initiating reward payout") - n0.distributereward(asset_name="STOCK1", snapshot_height=tgtBlockHeight, distribution_asset_name="RVN", gross_distribution_amount=2000, exception_addresses=distAddr0) + n0.distributereward(asset_name="STOCK1", snapshot_height=tgt_block_height, distribution_asset_name="RVN", + gross_distribution_amount=2000, exception_addresses=dist_addr0) n0.generate(10) self.sync_all() - ## Inexplicably, order matters here. We need to verify the amount - ## using the node that created the address (?!) + # Inexplicably, order matters here. We need to verify the amount + # using the node that created the address (?!) self.log.info("Verifying RVN holdings after payout") - assert_equal(n0.getreceivedbyaddress(shareholderAddr0, 0), 200) - assert_equal(n1.getreceivedbyaddress(shareholderAddr1, 0), 300) - assert_equal(n2.getreceivedbyaddress(shareholderAddr2, 0), 400) - assert_equal(n1.getreceivedbyaddress(shareholderAddr3, 0), 500) - assert_equal(n0.getreceivedbyaddress(shareholderAddr4, 0), 600) - - ## Basic functionality test - ASSET reward - ## - create the main owner address - ## - mine blocks to have enouugh RVN for the reward fees, plus purchasing the asset - ## - issue the STOCK2 asset to the owner - ## - create 5 shareholder addresses - ## - issue the PAYOUT1 asset to the owner - ## - distribute different amounts of the PAYOUT1 asset to each of the shareholder addresses - ## - mine some blocks - ## - retrieve the current chain height - ## - distribute reward of PAYOUT1 asset units amongst the shareholders - ## - verify that each one receives the expected amount of PAYOUT1 + assert_equal(n0.getreceivedbyaddress(shareholder_addr0, 0), 200) + assert_equal(n1.getreceivedbyaddress(shareholder_addr1, 0), 300) + assert_equal(n2.getreceivedbyaddress(shareholder_addr2, 0), 400) + assert_equal(n1.getreceivedbyaddress(shareholder_addr3, 0), 500) + assert_equal(n0.getreceivedbyaddress(shareholder_addr4, 0), 600) + + # Basic functionality test - ASSET reward + # - create the main owner address + # - mine blocks to have enough RVN for the reward fees, plus purchasing the asset + # - issue the STOCK2 asset to the owner + # - create 5 shareholder addresses + # - issue the PAYOUT1 asset to the owner + # - distribute different amounts of the PAYOUT1 asset to each of the shareholder addresses + # - mine some blocks + # - retrieve the current chain height + # - distribute reward of PAYOUT1 asset units amongst the shareholders + # - verify that each one receives the expected amount of PAYOUT1 def basic_test_asset(self): self.log.info("Running basic ASSET reward test!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() - # self.log.info(f"Owner address: {ownerAddr0}") + owner_addr0 = n0.getnewaddress() + # self.log.info(f"Owner address: {owner_addr0}") self.log.info("Creating distributor address") - distAddr0 = n0.getnewaddress() - # self.log.info(f"Distributor address: {distAddr0}") + dist_addr0 = n0.getnewaddress() + # self.log.info(f"Distributor address: {dist_addr0}") self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK2 asset") - n0.issue(asset_name="STOCK2", qty=10000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK2", qty=10000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Creating shareholder addresses") - shareholderAddr0 = n0.getnewaddress() - shareholderAddr1 = n1.getnewaddress() - shareholderAddr2 = n2.getnewaddress() - shareholderAddr3 = n1.getnewaddress() - shareholderAddr4 = n0.getnewaddress() + shareholder_addr0 = n0.getnewaddress() + shareholder_addr1 = n1.getnewaddress() + shareholder_addr2 = n2.getnewaddress() + shareholder_addr3 = n1.getnewaddress() + shareholder_addr4 = n0.getnewaddress() self.log.info("Distributing shares") - n0.transfer(asset_name="STOCK2", qty=200, to_address=shareholderAddr0, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - n0.transfer(asset_name="STOCK2", qty=300, to_address=shareholderAddr1, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - n0.transfer(asset_name="STOCK2", qty=400, to_address=shareholderAddr2, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - n0.transfer(asset_name="STOCK2", qty=500, to_address=shareholderAddr3, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - n0.transfer(asset_name="STOCK2", qty=600, to_address=shareholderAddr4, message="", expire_time=0, change_address="", asset_change_address=distAddr0) + n0.transfer(asset_name="STOCK2", qty=200, to_address=shareholder_addr0, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + n0.transfer(asset_name="STOCK2", qty=300, to_address=shareholder_addr1, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + n0.transfer(asset_name="STOCK2", qty=400, to_address=shareholder_addr2, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + n0.transfer(asset_name="STOCK2", qty=500, to_address=shareholder_addr3, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + n0.transfer(asset_name="STOCK2", qty=600, to_address=shareholder_addr4, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) n0.generate(10) self.sync_all() self.log.info("Verifying share distribution") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)["STOCK2"], 200) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["STOCK2"], 300) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["STOCK2"], 400) - assert_equal(n2.listassetbalancesbyaddress(shareholderAddr3)["STOCK2"], 500) - assert_equal(n2.listassetbalancesbyaddress(shareholderAddr4)["STOCK2"], 600) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)["STOCK2"], 200) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["STOCK2"], 300) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["STOCK2"], 400) + assert_equal(n2.listassetbalancesbyaddress(shareholder_addr3)["STOCK2"], 500) + assert_equal(n2.listassetbalancesbyaddress(shareholder_addr4)["STOCK2"], 600) self.log.info("Mining blocks") n0.generate(200) self.sync_all() self.log.info("Issuing PAYOUT1 asset") - n0.issue(asset_name="PAYOUT1", qty=10000, to_address=ownerAddr0, change_address="", \ - units=8, reissuable=True, has_ipfs=False) + n0.issue(asset_name="PAYOUT1", qty=10000, to_address=owner_addr0, change_address="", units=8, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 100 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 100 self.log.info("Requesting snapshot of STOCK2 ownership in 100 blocks") - n0.requestsnapshot(asset_name="STOCK2", block_height=tgtBlockHeight) + n0.requestsnapshot(asset_name="STOCK2", block_height=tgt_block_height) # Mine 60 blocks to make sure the -minrewardsheight is met n0.generate(61) self.log.info("Retrieving snapshot request") - snapShotReq = n0.getsnapshotrequest(asset_name="STOCK2", block_height=tgtBlockHeight) - assert_equal(snapShotReq["asset_name"], "STOCK2") - assert_equal(snapShotReq["block_height"], tgtBlockHeight) + snap_shot_req = n0.getsnapshotrequest(asset_name="STOCK2", block_height=tgt_block_height) + assert_equal(snap_shot_req["asset_name"], "STOCK2") + assert_equal(snap_shot_req["block_height"], tgt_block_height) self.log.info("Skipping forward to allow snapshot to process") n0.generate(100) self.sync_all() self.log.info("Retrieving snapshot for ownership validation") - snapShot = n0.getsnapshot(asset_name="STOCK2", block_height=tgtBlockHeight) - assert_equal(snapShot["name"], "STOCK2") - assert_equal(snapShot["height"], tgtBlockHeight) + snap_shot = n0.getsnapshot(asset_name="STOCK2", block_height=tgt_block_height) + assert_equal(snap_shot["name"], "STOCK2") + assert_equal(snap_shot["height"], tgt_block_height) owner0 = False owner1 = False owner2 = False owner3 = False owner4 = False - for ownerAddr in snapShot["owners"]: - if ownerAddr["address"] == shareholderAddr0: + for ownerAddr in snap_shot["owners"]: + if ownerAddr["address"] == shareholder_addr0: assert_equal(ownerAddr["amount_owned"], 200) owner0 = True - elif ownerAddr["address"] == shareholderAddr1: + elif ownerAddr["address"] == shareholder_addr1: assert_equal(ownerAddr["amount_owned"], 300) owner1 = True - elif ownerAddr["address"] == shareholderAddr2: + elif ownerAddr["address"] == shareholder_addr2: assert_equal(ownerAddr["amount_owned"], 400) owner2 = True - elif ownerAddr["address"] == shareholderAddr3: + elif ownerAddr["address"] == shareholder_addr3: assert_equal(ownerAddr["amount_owned"], 500) owner3 = True - elif ownerAddr["address"] == shareholderAddr4: + elif ownerAddr["address"] == shareholder_addr4: assert_equal(ownerAddr["amount_owned"], 600) owner4 = True assert_equal(owner0, True) @@ -310,60 +307,59 @@ def basic_test_asset(self): assert_equal(owner3, True) assert_equal(owner4, True) - ## listassetbalancesbyaddress only lists the most recently delivered amount - ## for the address, which I believe is a bug, since there can only be one - ## key in the result object with the asset name. - ##self.log.info("Moving shares after snapshot") - ##n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholderAddr0, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholderAddr1, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholderAddr2, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholderAddr3, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholderAddr4, message="", expire_time=0, change_address="", asset_change_address=distAddr0) - ##n0.generate(100) - ##self.sync_all() - - ##self.log.info("Verifying share distribution after snapshot") - ##assert_equal(n2.listassetbalancesbyaddress(shareholderAddr0)["STOCK2"], 300) - ##assert_equal(n2.listassetbalancesbyaddress(shareholderAddr1)["STOCK2"], 400) - ##assert_equal(n1.listassetbalancesbyaddress(shareholderAddr2)["STOCK2"], 500) - ##assert_equal(n0.listassetbalancesbyaddress(shareholderAddr3)["STOCK2"], 600) - ##assert_equal(n0.listassetbalancesbyaddress(shareholderAddr4)["STOCK2"], 700) + # listassetbalancesbyaddress only lists the most recently delivered amount + # for the address, which I believe is a bug, since there can only be one + # key in the result object with the asset name. + # self.log.info("Moving shares after snapshot") + # n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholder_addr0, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholder_addr1, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholder_addr2, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholder_addr3, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.transfer(asset_name="STOCK2", qty=100, to_address=shareholder_addr4, message="", expire_time=0, change_address="", asset_change_address=dist_addr0) + # n0.generate(100) + # self.sync_all() + # self.log.info("Verifying share distribution after snapshot") + # assert_equal(n2.listassetbalancesbyaddress(shareholder_addr0)["STOCK2"], 300) + # assert_equal(n2.listassetbalancesbyaddress(shareholder_addr1)["STOCK2"], 400) + # assert_equal(n1.listassetbalancesbyaddress(shareholder_addr2)["STOCK2"], 500) + # assert_equal(n0.listassetbalancesbyaddress(shareholder_addr3)["STOCK2"], 600) + # assert_equal(n0.listassetbalancesbyaddress(shareholder_addr4)["STOCK2"], 700) self.log.info("Initiating reward payout") - n0.distributereward(asset_name="STOCK2", snapshot_height=tgtBlockHeight, distribution_asset_name="PAYOUT1", gross_distribution_amount=2000, exception_addresses=distAddr0) + n0.distributereward(asset_name="STOCK2", snapshot_height=tgt_block_height, distribution_asset_name="PAYOUT1", + gross_distribution_amount=2000, exception_addresses=dist_addr0) n0.generate(10) self.sync_all() - ## Inexplicably, order matters here. We need to verify the amount - ## using the node that created the address (?!) + # Inexplicably, order matters here. We need to verify the amount + # using the node that created the address (?!) self.log.info("Verifying PAYOUT1 holdings after payout") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)["PAYOUT1"], 200) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["PAYOUT1"], 300) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["PAYOUT1"], 400) - assert_equal(n2.listassetbalancesbyaddress(shareholderAddr3)["PAYOUT1"], 500) - assert_equal(n2.listassetbalancesbyaddress(shareholderAddr4)["PAYOUT1"], 600) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)["PAYOUT1"], 200) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["PAYOUT1"], 300) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["PAYOUT1"], 400) + assert_equal(n2.listassetbalancesbyaddress(shareholder_addr3)["PAYOUT1"], 500) + assert_equal(n2.listassetbalancesbyaddress(shareholder_addr4)["PAYOUT1"], 600) - ## Attempts a payout without an asset snapshot + # Attempts a payout without an asset snapshot def payout_without_snapshot(self): self.log.info("Running payout without snapshot test!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() + owner_addr0 = n0.getnewaddress() self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK3 asset") - n0.issue(asset_name="STOCK3", qty=10000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK3", qty=10000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 100 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 100 self.log.info("Skipping forward so that we're beyond the expected snapshot height") n0.generate(161) @@ -371,23 +367,23 @@ def payout_without_snapshot(self): self.log.info("Initiating failing reward payout") assert_raises_rpc_error(-32600, "Snapshot request not found", - n0.distributereward, "STOCK3", tgtBlockHeight, "RVN", 2000, ownerAddr0) + n0.distributereward, "STOCK3", tgt_block_height, "RVN", 2000, owner_addr0) - ## Attempts a payout for an invalid ownership asset + # Attempts a payout for an invalid ownership asset def payout_with_invalid_ownership_asset(self): self.log.info("Running payout with invalid ownership asset test!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() + owner_addr0 = n0.getnewaddress() self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 100 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 100 self.log.info("Skipping forward so that we're beyond the expected snapshot height") n0.generate(161) @@ -395,29 +391,28 @@ def payout_with_invalid_ownership_asset(self): self.log.info("Initiating failing reward payout") assert_raises_rpc_error(-32600, "The asset hasn't been created: STOCK4", - n0.distributereward, "STOCK4", tgtBlockHeight, "RVN", 2000, ownerAddr0) + n0.distributereward, "STOCK4", tgt_block_height, "RVN", 2000, owner_addr0) - ## Attempts a payout for an invalid payout asset + # Attempts a payout for an invalid payout asset def payout_with_invalid_payout_asset(self): self.log.info("Running payout with invalid payout asset test!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() + owner_addr0 = n0.getnewaddress() self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK5 asset") - n0.issue(asset_name="STOCK5", qty=10000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK5", qty=10000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 100 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 100 self.log.info("Skipping forward so that we're beyond the expected snapshot height") n0.generate(161) @@ -425,86 +420,86 @@ def payout_with_invalid_payout_asset(self): self.log.info("Initiating failing reward payout") assert_raises_rpc_error(-32600, "Wallet doesn't have the ownership token(!) for the distribution asset", - n0.distributereward, "STOCK5", tgtBlockHeight, "PAYOUT2", 2000, ownerAddr0) + n0.distributereward, "STOCK5", tgt_block_height, "PAYOUT2", 2000, owner_addr0) - ## Attempts a payout for an invalid payout asset + # Attempts a payout for an invalid payout asset def payout_before_minimum_height_is_reached(self): self.log.info("Running payout before minimum rewards height is reached!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() + owner_addr0 = n0.getnewaddress() self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK6 asset") - n0.issue(asset_name="STOCK6", qty=10000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK6", qty=10000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 1 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 1 self.log.info("Requesting snapshot of STOCK6 ownership in 1 blocks") - n0.requestsnapshot(asset_name="STOCK6", block_height=tgtBlockHeight) + n0.requestsnapshot(asset_name="STOCK6", block_height=tgt_block_height) self.log.info("Skipping forward so that we're 15 blocks ahead of the snapshot height") n0.generate(16) self.sync_all() - self.log.info("Initiating failing reward payout because we are only 15 block ahead of the snapshot instead of 60") - assert_raises_rpc_error(-32600, "For security of the rewards payout, it is recommended to wait until chain is 60 blocks ahead of the snapshot height. You can modify this by using the -minrewardsheight.", - n0.distributereward, "STOCK6", tgtBlockHeight, "RVN", 2000, ownerAddr0) + self.log.info( + "Initiating failing reward payout because we are only 15 block ahead of the snapshot instead of 60") + assert_raises_rpc_error(-32600, + "For security of the rewards payout, it is recommended to wait until chain is 60 blocks ahead of the snapshot height. You can modify this by using the -minrewardsheight.", + n0.distributereward, "STOCK6", tgt_block_height, "RVN", 2000, owner_addr0) - ## Attempts a payout using a custome rewards height of 15, and they have low rvn balance + # Attempts a payout using a custom rewards height of 15, and they have low rvn balance def payout_custom_height_set_with_low_funds(self): self.log.info("Running payout before minimum rewards height is reached with custom min height value set!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n1.getnewaddress() + owner_addr0 = n1.getnewaddress() self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(5) self.sync_all() n1.generate(10) self.sync_all() self.log.info("Issuing STOCK7 asset") - n1.issue(asset_name="STOCK7", qty=10000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n1.issue(asset_name="STOCK7", qty=10000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) n1.generate(10) self.sync_all() - shareholderAddr0 = n2.getnewaddress() + shareholder_addr0 = n2.getnewaddress() - n1.transfer(asset_name="STOCK7", qty=200, to_address=shareholderAddr0, message="", expire_time=0, change_address="", asset_change_address=ownerAddr0) + n1.transfer(asset_name="STOCK7", qty=200, to_address=shareholder_addr0, message="", expire_time=0, change_address="", asset_change_address=owner_addr0) n1.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n1.getblockchaininfo()["blocks"] + 1 + tgt_block_height = n1.getblockchaininfo()["blocks"] + 1 self.log.info("Requesting snapshot of STOCK7 ownership in 1 blocks") - n1.requestsnapshot(asset_name="STOCK7", block_height=tgtBlockHeight) + n1.requestsnapshot(asset_name="STOCK7", block_height=tgt_block_height) self.log.info("Skipping forward so that we're 30 blocks ahead of the snapshot height") n1.generate(31) self.sync_all() self.log.info("Initiating reward payout should succeed because -minrewardheight=15 on node1") - n1.distributereward("STOCK7", tgtBlockHeight, "RVN", 2000, ownerAddr0) + n1.distributereward("STOCK7", tgt_block_height, "RVN", 2000, owner_addr0) n1.generate(2) self.sync_all() - assert_equal(n1.getdistributestatus("STOCK7", tgtBlockHeight, "RVN", 2000, ownerAddr0)['Status'], 3) + assert_equal(n1.getdistributestatus("STOCK7", tgt_block_height, "RVN", 2000, owner_addr0)['Status'], 3) n0.sendtoaddress(n1.getnewaddress(), 3000) n0.generate(5) @@ -513,73 +508,71 @@ def payout_custom_height_set_with_low_funds(self): n1.generate(10) self.sync_all() - assert_equal(n2.getreceivedbyaddress(shareholderAddr0, 1), 2000) + assert_equal(n2.getreceivedbyaddress(shareholder_addr0, 1), 2000) - ## Attempts a payout using a custome rewards height of 15, and they have low rvn balance - def payout_with_insufficent_asset_amount(self): + # Attempts a payout using a custom rewards height of 15, and they have low rvn balance + def payout_with_insufficient_asset_amount(self): self.log.info("Running payout before minimum rewards height is reached with custom min height value set!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n1.getnewaddress() + owner_addr0 = n1.getnewaddress() self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 2000) + self.nodes[0].sendtoaddress(owner_addr0, 2000) n0.generate(5) self.sync_all() n1.generate(10) self.sync_all() - n1.issue(asset_name="STOCK_7.1", qty=10000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n1.issue(asset_name="STOCK_7.1", qty=10000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) self.log.info("Issuing LOW_ASSET_AMOUNT asset") - n1.issue(asset_name="LOW_ASSET_AMOUNT", qty=10000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n1.issue(asset_name="LOW_ASSET_AMOUNT", qty=10000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) n1.generate(10) self.sync_all() - ## Send the majority of the assets to node0 - assetHolderAddress = n0.getnewaddress() - n1.transfer(asset_name="LOW_ASSET_AMOUNT", qty=9000, to_address=assetHolderAddress, message="", expire_time=0, change_address="", asset_change_address=ownerAddr0) - shareholderAddr0 = n2.getnewaddress() - n1.transfer(asset_name="STOCK_7.1", qty=200, to_address=shareholderAddr0, message="", expire_time=0, change_address="", asset_change_address=ownerAddr0) + # Send the majority of the assets to node0 + asset_holder_address = n0.getnewaddress() + n1.transfer(asset_name="LOW_ASSET_AMOUNT", qty=9000, to_address=asset_holder_address, message="", expire_time=0, change_address="", asset_change_address=owner_addr0) + shareholder_addr0 = n2.getnewaddress() + n1.transfer(asset_name="STOCK_7.1", qty=200, to_address=shareholder_addr0, message="", expire_time=0, change_address="", asset_change_address=owner_addr0) n1.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n1.getblockchaininfo()["blocks"] + 1 + tgt_block_height = n1.getblockchaininfo()["blocks"] + 1 self.log.info("Requesting snapshot of STOCK_7.1 ownership in 1 blocks") - n1.requestsnapshot(asset_name="STOCK_7.1", block_height=tgtBlockHeight) + n1.requestsnapshot(asset_name="STOCK_7.1", block_height=tgt_block_height) self.log.info("Skipping forward so that we're 30 blocks ahead of the snapshot height") n1.generate(61) self.sync_all() self.log.info("Initiating reward payout") - n1.distributereward("STOCK_7.1", tgtBlockHeight, "LOW_ASSET_AMOUNT", 2000, ownerAddr0) + n1.distributereward("STOCK_7.1", tgt_block_height, "LOW_ASSET_AMOUNT", 2000, owner_addr0) n1.generate(2) self.sync_all() - assert_equal(n1.getdistributestatus("STOCK_7.1", tgtBlockHeight, "LOW_ASSET_AMOUNT", 2000, ownerAddr0)['Status'], 5) + assert_equal( + n1.getdistributestatus("STOCK_7.1", tgt_block_height, "LOW_ASSET_AMOUNT", 2000, owner_addr0)['Status'], 5) - ## node0 transfer back the assets to node1, now the distribution transaction should get created successfully. when the next block is mined - n0.transfer(asset_name="LOW_ASSET_AMOUNT", qty=9000, to_address=ownerAddr0, message="", expire_time=0, change_address="") + # node0 transfer back the assets to node1, now the distribution transaction should get created successfully. when the next block is mined + n0.transfer(asset_name="LOW_ASSET_AMOUNT", qty=9000, to_address=owner_addr0, message="", expire_time=0, change_address="") n0.generate(5) self.sync_all() n1.generate(10) self.sync_all() - assert_equal(n2.listassetbalancesbyaddress(shareholderAddr0)["LOW_ASSET_AMOUNT"], 2000) + assert_equal(n2.listassetbalancesbyaddress(shareholder_addr0)["LOW_ASSET_AMOUNT"], 2000) - def listsnapshotrequests(self): + def list_snapshot_requests(self): self.log.info("Testing listsnapshotrequests()...") n0, n1 = self.nodes[0], self.nodes[1] - self.log.info("Providing funding") self.nodes[0].sendtoaddress(n1.getnewaddress(), 1000) n0.generate(5) @@ -595,8 +588,7 @@ def listsnapshotrequests(self): block_height2 = n1.getblockcount() + 200 # make sure a snapshot can't be created for a non-existent - assert_raises_rpc_error(-8, "asset does not exist", - n1.requestsnapshot, asset_name1, block_height1) + assert_raises_rpc_error(-8, "asset does not exist", n1.requestsnapshot, asset_name1, block_height1) n1.issue(asset_name1) n1.issue(asset_name2) n1.generate(1) @@ -626,84 +618,85 @@ def basic_test_asset_uneven_distribution(self): n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() - # self.log.info(f"Owner address: {ownerAddr0}") + owner_addr0 = n0.getnewaddress() + # self.log.info(f"Owner address: {owner_addr0}") self.log.info("Creating distributor address") - distAddr0 = n0.getnewaddress() - # self.log.info(f"Distributor address: {distAddr0}") + dist_addr0 = n0.getnewaddress() + # self.log.info(f"Distributor address: {dist_addr0}") self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK8 asset") - n0.issue(asset_name="STOCK8", qty=10000, to_address=distAddr0, change_address="", \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK8", qty=10000, to_address=dist_addr0, change_address="", units=0, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Creating shareholder addresses") - shareholderAddr0 = n0.getnewaddress() - shareholderAddr1 = n1.getnewaddress() - shareholderAddr2 = n2.getnewaddress() + shareholder_addr0 = n0.getnewaddress() + shareholder_addr1 = n1.getnewaddress() + shareholder_addr2 = n2.getnewaddress() self.log.info("Distributing shares") - n0.transferfromaddress(asset_name="STOCK8", from_address=distAddr0, qty=300, to_address=shareholderAddr0, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK8", from_address=distAddr0, qty=300, to_address=shareholderAddr1, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK8", from_address=distAddr0, qty=300, to_address=shareholderAddr2, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) + n0.transferfromaddress(asset_name="STOCK8", from_address=dist_addr0, qty=300, to_address=shareholder_addr0, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK8", from_address=dist_addr0, qty=300, to_address=shareholder_addr1, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK8", from_address=dist_addr0, qty=300, to_address=shareholder_addr2, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) n0.generate(150) self.sync_all() self.log.info("Verifying share distribution") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)['STOCK8'], 300) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["STOCK8"], 300) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["STOCK8"], 300) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)['STOCK8'], 300) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["STOCK8"], 300) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["STOCK8"], 300) self.log.info("Mining blocks") n0.generate(10) self.sync_all() self.log.info("Issuing PAYOUT8 asset") - n0.issue(asset_name="PAYOUT8", qty=10, to_address=ownerAddr0, change_address="", \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name="PAYOUT8", qty=10, to_address=owner_addr0, change_address="", units=0, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 5 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 5 self.log.info("Requesting snapshot of STOCK8 ownership in 5 blocks") - n0.requestsnapshot(asset_name="STOCK8", block_height=tgtBlockHeight) + n0.requestsnapshot(asset_name="STOCK8", block_height=tgt_block_height) # Mine 10 blocks to make sure snapshot is created n0.generate(10) self.log.info("Retrieving snapshot request") - snapShotReq = n0.getsnapshotrequest(asset_name="STOCK8", block_height=tgtBlockHeight) - assert_equal(snapShotReq["asset_name"], "STOCK8") - assert_equal(snapShotReq["block_height"], tgtBlockHeight) + snap_shot_req = n0.getsnapshotrequest(asset_name="STOCK8", block_height=tgt_block_height) + assert_equal(snap_shot_req["asset_name"], "STOCK8") + assert_equal(snap_shot_req["block_height"], tgt_block_height) self.log.info("Skipping forward to allow snapshot to process") n0.generate(61) self.sync_all() self.log.info("Retrieving snapshot for ownership validation") - snapShot = n0.getsnapshot(asset_name="STOCK8", block_height=tgtBlockHeight) - assert_equal(snapShot["name"], "STOCK8") - assert_equal(snapShot["height"], tgtBlockHeight) + snap_shot = n0.getsnapshot(asset_name="STOCK8", block_height=tgt_block_height) + assert_equal(snap_shot["name"], "STOCK8") + assert_equal(snap_shot["height"], tgt_block_height) owner0 = False owner1 = False owner2 = False - for ownerAddr in snapShot["owners"]: - if ownerAddr["address"] == shareholderAddr0: + for ownerAddr in snap_shot["owners"]: + if ownerAddr["address"] == shareholder_addr0: assert_equal(ownerAddr["amount_owned"], 300) owner0 = True - elif ownerAddr["address"] == shareholderAddr1: + elif ownerAddr["address"] == shareholder_addr1: assert_equal(ownerAddr["amount_owned"], 300) owner1 = True - elif ownerAddr["address"] == shareholderAddr2: + elif ownerAddr["address"] == shareholder_addr2: assert_equal(ownerAddr["amount_owned"], 300) owner2 = True assert_equal(owner0, True) @@ -711,100 +704,102 @@ def basic_test_asset_uneven_distribution(self): assert_equal(owner2, True) self.log.info("Initiating reward payout") - n0.distributereward(asset_name="STOCK8", snapshot_height=tgtBlockHeight, distribution_asset_name="PAYOUT8", gross_distribution_amount=10, exception_addresses=distAddr0) + n0.distributereward(asset_name="STOCK8", snapshot_height=tgt_block_height, distribution_asset_name="PAYOUT8", + gross_distribution_amount=10, exception_addresses=dist_addr0) n0.generate(10) self.sync_all() - ## Inexplicably, order matters here. We need to verify the amount - ## using the node that created the address (?!) + # Inexplicably, order matters here. We need to verify the amount + # using the node that created the address (?!) self.log.info("Verifying PAYOUT8 holdings after payout") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)["PAYOUT8"], 3) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["PAYOUT8"], 3) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["PAYOUT8"], 3) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)["PAYOUT8"], 3) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["PAYOUT8"], 3) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["PAYOUT8"], 3) def basic_test_asset_even_distribution(self): self.log.info("Running ASSET reward test (units = 0)!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() - # self.log.info(f"Owner address: {ownerAddr0}") + owner_addr0 = n0.getnewaddress() + # self.log.info(f"Owner address: {owner_addr0}") self.log.info("Creating distributor address") - distAddr0 = n0.getnewaddress() - # self.log.info(f"Distributor address: {distAddr0}") + dist_addr0 = n0.getnewaddress() + # self.log.info(f"Distributor address: {dist_addr0}") self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK9 asset") - n0.issue(asset_name="STOCK9", qty=10000, to_address=distAddr0, change_address="", \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK9", qty=10000, to_address=dist_addr0, change_address="", units=0, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Creating shareholder addresses") - shareholderAddr0 = n0.getnewaddress() - shareholderAddr1 = n1.getnewaddress() - shareholderAddr2 = n2.getnewaddress() + shareholder_addr0 = n0.getnewaddress() + shareholder_addr1 = n1.getnewaddress() + shareholder_addr2 = n2.getnewaddress() self.log.info("Distributing shares") - n0.transferfromaddress(asset_name="STOCK9", from_address=distAddr0, qty=300, to_address=shareholderAddr0, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK9", from_address=distAddr0, qty=300, to_address=shareholderAddr1, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK9", from_address=distAddr0, qty=400, to_address=shareholderAddr2, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) + n0.transferfromaddress(asset_name="STOCK9", from_address=dist_addr0, qty=300, to_address=shareholder_addr0, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK9", from_address=dist_addr0, qty=300, to_address=shareholder_addr1, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK9", from_address=dist_addr0, qty=400, to_address=shareholder_addr2, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) n0.generate(150) self.sync_all() self.log.info("Verifying share distribution") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)['STOCK9'], 300) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["STOCK9"], 300) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["STOCK9"], 400) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)['STOCK9'], 300) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["STOCK9"], 300) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["STOCK9"], 400) self.log.info("Mining blocks") n0.generate(10) self.sync_all() self.log.info("Issuing PAYOUT9 asset") - n0.issue(asset_name="PAYOUT9", qty=10, to_address=ownerAddr0, change_address="", \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name="PAYOUT9", qty=10, to_address=owner_addr0, change_address="", units=0, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 5 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 5 self.log.info("Requesting snapshot of STOCK9 ownership in 5 blocks") - n0.requestsnapshot(asset_name="STOCK9", block_height=tgtBlockHeight) + n0.requestsnapshot(asset_name="STOCK9", block_height=tgt_block_height) # Mine 10 blocks to make sure snapshot is created n0.generate(10) self.log.info("Retrieving snapshot request") - snapShotReq = n0.getsnapshotrequest(asset_name="STOCK9", block_height=tgtBlockHeight) - assert_equal(snapShotReq["asset_name"], "STOCK9") - assert_equal(snapShotReq["block_height"], tgtBlockHeight) + snap_shot_req = n0.getsnapshotrequest(asset_name="STOCK9", block_height=tgt_block_height) + assert_equal(snap_shot_req["asset_name"], "STOCK9") + assert_equal(snap_shot_req["block_height"], tgt_block_height) self.log.info("Skipping forward to allow snapshot to process") n0.generate(61) self.sync_all() self.log.info("Retrieving snapshot for ownership validation") - snapShot = n0.getsnapshot(asset_name="STOCK9", block_height=tgtBlockHeight) - assert_equal(snapShot["name"], "STOCK9") - assert_equal(snapShot["height"], tgtBlockHeight) + snap_shot = n0.getsnapshot(asset_name="STOCK9", block_height=tgt_block_height) + assert_equal(snap_shot["name"], "STOCK9") + assert_equal(snap_shot["height"], tgt_block_height) owner0 = False owner1 = False owner2 = False - for ownerAddr in snapShot["owners"]: - if ownerAddr["address"] == shareholderAddr0: + for ownerAddr in snap_shot["owners"]: + if ownerAddr["address"] == shareholder_addr0: assert_equal(ownerAddr["amount_owned"], 300) owner0 = True - elif ownerAddr["address"] == shareholderAddr1: + elif ownerAddr["address"] == shareholder_addr1: assert_equal(ownerAddr["amount_owned"], 300) owner1 = True - elif ownerAddr["address"] == shareholderAddr2: + elif ownerAddr["address"] == shareholder_addr2: assert_equal(ownerAddr["amount_owned"], 400) owner2 = True assert_equal(owner0, True) @@ -812,100 +807,103 @@ def basic_test_asset_even_distribution(self): assert_equal(owner2, True) self.log.info("Initiating reward payout") - n0.distributereward(asset_name="STOCK9", snapshot_height=tgtBlockHeight, distribution_asset_name="PAYOUT9", gross_distribution_amount=10, exception_addresses=distAddr0) + n0.distributereward(asset_name="STOCK9", snapshot_height=tgt_block_height, distribution_asset_name="PAYOUT9", + gross_distribution_amount=10, exception_addresses=dist_addr0) n0.generate(10) self.sync_all() - ## Inexplicably, order matters here. We need to verify the amount - ## using the node that created the address (?!) + # Inexplicably, order matters here. We need to verify the amount + # using the node that created the address (?!) self.log.info("Verifying PAYOUT9 holdings after payout") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)["PAYOUT9"], 3) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["PAYOUT9"], 3) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["PAYOUT9"], 4) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)["PAYOUT9"], 3) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["PAYOUT9"], 3) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["PAYOUT9"], 4) def basic_test_asset_round_down_uneven_distribution(self): self.log.info("Running ASSET reward test with uneven distribution (units = 0)!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() - # self.log.info(f"Owner address: {ownerAddr0}") + owner_addr0 = n0.getnewaddress() + # self.log.info(f"Owner address: {owner_addr0}") self.log.info("Creating distributor address") - distAddr0 = n0.getnewaddress() - # self.log.info(f"Distributor address: {distAddr0}") + dist_addr0 = n0.getnewaddress() + # self.log.info(f"Distributor address: {dist_addr0}") self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK10 asset") - n0.issue(asset_name="STOCK10", qty=10000, to_address=distAddr0, change_address="", \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK10", qty=10000, to_address=dist_addr0, change_address="", units=0, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Creating shareholder addresses") - shareholderAddr0 = n0.getnewaddress() - shareholderAddr1 = n1.getnewaddress() - shareholderAddr2 = n2.getnewaddress() + shareholder_addr0 = n0.getnewaddress() + shareholder_addr1 = n1.getnewaddress() + shareholder_addr2 = n2.getnewaddress() self.log.info("Distributing shares") - n0.transferfromaddress(asset_name="STOCK10", from_address=distAddr0, qty=300, to_address=shareholderAddr0, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK10", from_address=distAddr0, qty=300, to_address=shareholderAddr1, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK10", from_address=distAddr0, qty=500, to_address=shareholderAddr2, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) + n0.transferfromaddress(asset_name="STOCK10", from_address=dist_addr0, qty=300, to_address=shareholder_addr0, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK10", from_address=dist_addr0, qty=300, to_address=shareholder_addr1, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK10", from_address=dist_addr0, qty=500, to_address=shareholder_addr2, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) n0.generate(150) self.sync_all() self.log.info("Verifying share distribution") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)['STOCK10'], 300) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["STOCK10"], 300) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["STOCK10"], 500) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)['STOCK10'], 300) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["STOCK10"], 300) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["STOCK10"], 500) self.log.info("Mining blocks") n0.generate(10) self.sync_all() self.log.info("Issuing PAYOUT10 asset") - n0.issue(asset_name="PAYOUT10", qty=10, to_address=ownerAddr0, change_address="", \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name="PAYOUT10", qty=10, to_address=owner_addr0, change_address="", units=0, reissuable=True, + has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 5 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 5 self.log.info("Requesting snapshot of STOCK10 ownership in 5 blocks") - n0.requestsnapshot(asset_name="STOCK10", block_height=tgtBlockHeight) + n0.requestsnapshot(asset_name="STOCK10", block_height=tgt_block_height) # Mine 10 blocks to make sure snapshot is created n0.generate(10) self.log.info("Retrieving snapshot request") - snapShotReq = n0.getsnapshotrequest(asset_name="STOCK10", block_height=tgtBlockHeight) - assert_equal(snapShotReq["asset_name"], "STOCK10") - assert_equal(snapShotReq["block_height"], tgtBlockHeight) + snap_shot_req = n0.getsnapshotrequest(asset_name="STOCK10", block_height=tgt_block_height) + assert_equal(snap_shot_req["asset_name"], "STOCK10") + assert_equal(snap_shot_req["block_height"], tgt_block_height) self.log.info("Skipping forward to allow snapshot to process") n0.generate(61) self.sync_all() self.log.info("Retrieving snapshot for ownership validation") - snapShot = n0.getsnapshot(asset_name="STOCK10", block_height=tgtBlockHeight) - assert_equal(snapShot["name"], "STOCK10") - assert_equal(snapShot["height"], tgtBlockHeight) + snap_shot = n0.getsnapshot(asset_name="STOCK10", block_height=tgt_block_height) + assert_equal(snap_shot["name"], "STOCK10") + assert_equal(snap_shot["height"], tgt_block_height) owner0 = False owner1 = False owner2 = False - for ownerAddr in snapShot["owners"]: - if ownerAddr["address"] == shareholderAddr0: + for ownerAddr in snap_shot["owners"]: + if ownerAddr["address"] == shareholder_addr0: assert_equal(ownerAddr["amount_owned"], 300) owner0 = True - elif ownerAddr["address"] == shareholderAddr1: + elif ownerAddr["address"] == shareholder_addr1: assert_equal(ownerAddr["amount_owned"], 300) owner1 = True - elif ownerAddr["address"] == shareholderAddr2: + elif ownerAddr["address"] == shareholder_addr2: assert_equal(ownerAddr["amount_owned"], 500) owner2 = True assert_equal(owner0, True) @@ -913,107 +911,110 @@ def basic_test_asset_round_down_uneven_distribution(self): assert_equal(owner2, True) self.log.info("Initiating reward payout") - n0.distributereward(asset_name="STOCK10", snapshot_height=tgtBlockHeight, distribution_asset_name="PAYOUT10", gross_distribution_amount=10, exception_addresses=distAddr0) + n0.distributereward(asset_name="STOCK10", snapshot_height=tgt_block_height, distribution_asset_name="PAYOUT10", + gross_distribution_amount=10, exception_addresses=dist_addr0) n0.generate(10) self.sync_all() - ## Inexplicably, order matters here. We need to verify the amount - ## using the node that created the address (?!) + # Inexplicably, order matters here. We need to verify the amount + # using the node that created the address (?!) self.log.info("Verifying PAYOUT10 holdings after payout") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)["PAYOUT10"], 2) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["PAYOUT10"], 2) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["PAYOUT10"], 4) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)["PAYOUT10"], 2) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["PAYOUT10"], 2) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["PAYOUT10"], 4) def basic_test_asset_round_down_uneven_distribution_2(self): self.log.info("Running ASSET reward test with uneven distribution (units = 0)!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() - # self.log.info(f"Owner address: {ownerAddr0}") + owner_addr0 = n0.getnewaddress() + # self.log.info(f"Owner address: {owner_addr0}") self.log.info("Creating distributor address") - distAddr0 = n0.getnewaddress() - # self.log.info(f"Distributor address: {distAddr0}") + dist_addr0 = n0.getnewaddress() + # self.log.info(f"Distributor address: {dist_addr0}") self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK11 asset") - n0.issue(asset_name="STOCK11", qty=10000, to_address=distAddr0, change_address="", \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK11", qty=10000, to_address=dist_addr0, change_address="", units=0, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Creating shareholder addresses") - shareholderAddr0 = n0.getnewaddress() - shareholderAddr1 = n1.getnewaddress() - shareholderAddr2 = n2.getnewaddress() - shareholderAddr3 = n2.getnewaddress() + shareholder_addr0 = n0.getnewaddress() + shareholder_addr1 = n1.getnewaddress() + shareholder_addr2 = n2.getnewaddress() + shareholder_addr3 = n2.getnewaddress() self.log.info("Distributing shares") - n0.transferfromaddress(asset_name="STOCK11", from_address=distAddr0, qty=9, to_address=shareholderAddr0, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK11", from_address=distAddr0, qty=3, to_address=shareholderAddr1, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK11", from_address=distAddr0, qty=2, to_address=shareholderAddr2, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK11", from_address=distAddr0, qty=1, to_address=shareholderAddr3, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) + n0.transferfromaddress(asset_name="STOCK11", from_address=dist_addr0, qty=9, to_address=shareholder_addr0, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK11", from_address=dist_addr0, qty=3, to_address=shareholder_addr1, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK11", from_address=dist_addr0, qty=2, to_address=shareholder_addr2, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK11", from_address=dist_addr0, qty=1, to_address=shareholder_addr3, + message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) n0.generate(150) self.sync_all() self.log.info("Verifying share distribution") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)['STOCK11'], 9) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["STOCK11"], 3) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["STOCK11"], 2) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr3)["STOCK11"], 1) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)['STOCK11'], 9) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["STOCK11"], 3) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["STOCK11"], 2) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr3)["STOCK11"], 1) self.log.info("Mining blocks") n0.generate(10) self.sync_all() self.log.info("Issuing PAYOUT11 asset") - n0.issue(asset_name="PAYOUT11", qty=10, to_address=ownerAddr0, change_address="", \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name="PAYOUT11", qty=10, to_address=owner_addr0, change_address="", units=0, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 5 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 5 self.log.info("Requesting snapshot of STOCK11 ownership in 5 blocks") - n0.requestsnapshot(asset_name="STOCK11", block_height=tgtBlockHeight) + n0.requestsnapshot(asset_name="STOCK11", block_height=tgt_block_height) # Mine 10 blocks to make sure snapshot is created n0.generate(10) self.log.info("Retrieving snapshot request") - snapShotReq = n0.getsnapshotrequest(asset_name="STOCK11", block_height=tgtBlockHeight) - assert_equal(snapShotReq["asset_name"], "STOCK11") - assert_equal(snapShotReq["block_height"], tgtBlockHeight) + snap_shot_req = n0.getsnapshotrequest(asset_name="STOCK11", block_height=tgt_block_height) + assert_equal(snap_shot_req["asset_name"], "STOCK11") + assert_equal(snap_shot_req["block_height"], tgt_block_height) self.log.info("Skipping forward to allow snapshot to process") n0.generate(61) self.sync_all() self.log.info("Retrieving snapshot for ownership validation") - snapShot = n0.getsnapshot(asset_name="STOCK11", block_height=tgtBlockHeight) - assert_equal(snapShot["name"], "STOCK11") - assert_equal(snapShot["height"], tgtBlockHeight) + snap_shot = n0.getsnapshot(asset_name="STOCK11", block_height=tgt_block_height) + assert_equal(snap_shot["name"], "STOCK11") + assert_equal(snap_shot["height"], tgt_block_height) owner0 = False owner1 = False owner2 = False - for ownerAddr in snapShot["owners"]: - if ownerAddr["address"] == shareholderAddr0: + for ownerAddr in snap_shot["owners"]: + if ownerAddr["address"] == shareholder_addr0: assert_equal(ownerAddr["amount_owned"], 9) owner0 = True - elif ownerAddr["address"] == shareholderAddr1: + elif ownerAddr["address"] == shareholder_addr1: assert_equal(ownerAddr["amount_owned"], 3) owner1 = True - elif ownerAddr["address"] == shareholderAddr2: + elif ownerAddr["address"] == shareholder_addr2: assert_equal(ownerAddr["amount_owned"], 2) owner2 = True - elif ownerAddr["address"] == shareholderAddr3: + elif ownerAddr["address"] == shareholder_addr3: assert_equal(ownerAddr["amount_owned"], 1) owner2 = True assert_equal(owner0, True) @@ -1021,107 +1022,106 @@ def basic_test_asset_round_down_uneven_distribution_2(self): assert_equal(owner2, True) self.log.info("Initiating reward payout") - n0.distributereward(asset_name="STOCK11", snapshot_height=tgtBlockHeight, distribution_asset_name="PAYOUT11", gross_distribution_amount=10, exception_addresses=distAddr0) + n0.distributereward(asset_name="STOCK11", snapshot_height=tgt_block_height, distribution_asset_name="PAYOUT11", + gross_distribution_amount=10, exception_addresses=dist_addr0) n0.generate(10) self.sync_all() - ## Inexplicably, order matters here. We need to verify the amount - ## using the node that created the address (?!) + # Inexplicably, order matters here. We need to verify the amount + # using the node that created the address (?!) self.log.info("Verifying PAYOUT11 holdings after payout") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)["PAYOUT11"], 6) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["PAYOUT11"], 2) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["PAYOUT11"], 1) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)["PAYOUT11"], 6) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["PAYOUT11"], 2) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["PAYOUT11"], 1) def basic_test_asset_round_down_uneven_distribution_3(self): self.log.info("Running ASSET reward test with uneven distribution (units = 1)!") n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() - # self.log.info(f"Owner address: {ownerAddr0}") + owner_addr0 = n0.getnewaddress() + # self.log.info(f"Owner address: {owner_addr0}") self.log.info("Creating distributor address") - distAddr0 = n0.getnewaddress() - # self.log.info(f"Distributor address: {distAddr0}") + dist_addr0 = n0.getnewaddress() + # self.log.info(f"Distributor address: {dist_addr0}") self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(10) self.sync_all() self.log.info("Issuing STOCK12 asset") - n0.issue(asset_name="STOCK12", qty=10000, to_address=distAddr0, change_address="", \ - units=0, reissuable=True, has_ipfs=False) + n0.issue(asset_name="STOCK12", qty=10000, to_address=dist_addr0, change_address="", units=0, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Creating shareholder addresses") - shareholderAddr0 = n0.getnewaddress() - shareholderAddr1 = n1.getnewaddress() - shareholderAddr2 = n2.getnewaddress() - shareholderAddr3 = n2.getnewaddress() + shareholder_addr0 = n0.getnewaddress() + shareholder_addr1 = n1.getnewaddress() + shareholder_addr2 = n2.getnewaddress() + shareholder_addr3 = n2.getnewaddress() self.log.info("Distributing shares") - n0.transferfromaddress(asset_name="STOCK12", from_address=distAddr0, qty=9, to_address=shareholderAddr0, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK12", from_address=distAddr0, qty=3, to_address=shareholderAddr1, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK12", from_address=distAddr0, qty=2, to_address=shareholderAddr2, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) - n0.transferfromaddress(asset_name="STOCK12", from_address=distAddr0, qty=1, to_address=shareholderAddr3, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) + n0.transferfromaddress(asset_name="STOCK12", from_address=dist_addr0, qty=9, to_address=shareholder_addr0, message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK12", from_address=dist_addr0, qty=3, to_address=shareholder_addr1, message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK12", from_address=dist_addr0, qty=2, to_address=shareholder_addr2, message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) + n0.transferfromaddress(asset_name="STOCK12", from_address=dist_addr0, qty=1, to_address=shareholder_addr3, message="", expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) n0.generate(150) self.sync_all() self.log.info("Verifying share distribution") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)['STOCK12'], 9) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["STOCK12"], 3) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["STOCK12"], 2) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr3)["STOCK12"], 1) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)['STOCK12'], 9) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["STOCK12"], 3) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["STOCK12"], 2) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr3)["STOCK12"], 1) self.log.info("Mining blocks") n0.generate(10) self.sync_all() self.log.info("Issuing PAYOUT12 asset") - n0.issue(asset_name="PAYOUT12", qty=10, to_address=ownerAddr0, change_address="", \ - units=1, reissuable=True, has_ipfs=False) + n0.issue(asset_name="PAYOUT12", qty=10, to_address=owner_addr0, change_address="", units=1, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 5 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 5 self.log.info("Requesting snapshot of STOCK12 ownership in 5 blocks") - n0.requestsnapshot(asset_name="STOCK12", block_height=tgtBlockHeight) + n0.requestsnapshot(asset_name="STOCK12", block_height=tgt_block_height) # Mine 10 blocks to make sure snapshot is created n0.generate(10) self.log.info("Retrieving snapshot request") - snapShotReq = n0.getsnapshotrequest(asset_name="STOCK12", block_height=tgtBlockHeight) - assert_equal(snapShotReq["asset_name"], "STOCK12") - assert_equal(snapShotReq["block_height"], tgtBlockHeight) + snap_shot_req = n0.getsnapshotrequest(asset_name="STOCK12", block_height=tgt_block_height) + assert_equal(snap_shot_req["asset_name"], "STOCK12") + assert_equal(snap_shot_req["block_height"], tgt_block_height) self.log.info("Skipping forward to allow snapshot to process") n0.generate(61) self.sync_all() self.log.info("Retrieving snapshot for ownership validation") - snapShot = n0.getsnapshot(asset_name="STOCK12", block_height=tgtBlockHeight) - assert_equal(snapShot["name"], "STOCK12") - assert_equal(snapShot["height"], tgtBlockHeight) + snap_shot = n0.getsnapshot(asset_name="STOCK12", block_height=tgt_block_height) + assert_equal(snap_shot["name"], "STOCK12") + assert_equal(snap_shot["height"], tgt_block_height) owner0 = False owner1 = False owner2 = False - for ownerAddr in snapShot["owners"]: - if ownerAddr["address"] == shareholderAddr0: + for ownerAddr in snap_shot["owners"]: + if ownerAddr["address"] == shareholder_addr0: assert_equal(ownerAddr["amount_owned"], 9) owner0 = True - elif ownerAddr["address"] == shareholderAddr1: + elif ownerAddr["address"] == shareholder_addr1: assert_equal(ownerAddr["amount_owned"], 3) owner1 = True - elif ownerAddr["address"] == shareholderAddr2: + elif ownerAddr["address"] == shareholder_addr2: assert_equal(ownerAddr["amount_owned"], 2) owner2 = True - elif ownerAddr["address"] == shareholderAddr3: + elif ownerAddr["address"] == shareholder_addr3: assert_equal(ownerAddr["amount_owned"], 1) owner2 = True assert_equal(owner0, True) @@ -1129,63 +1129,63 @@ def basic_test_asset_round_down_uneven_distribution_3(self): assert_equal(owner2, True) self.log.info("Initiating reward payout") - n0.distributereward(asset_name="STOCK12", snapshot_height=tgtBlockHeight, distribution_asset_name="PAYOUT12", gross_distribution_amount=10, exception_addresses=distAddr0) + n0.distributereward(asset_name="STOCK12", snapshot_height=tgt_block_height, distribution_asset_name="PAYOUT12", + gross_distribution_amount=10, exception_addresses=dist_addr0) n0.generate(10) self.sync_all() - ## Inexplicably, order matters here. We need to verify the amount - ## using the node that created the address (?!) + # Inexplicably, order matters here. We need to verify the amount + # using the node that created the address (?!) self.log.info("Verifying PAYOUT12 holdings after payout") - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr0)["PAYOUT12"], 6) - assert_equal(n1.listassetbalancesbyaddress(shareholderAddr1)["PAYOUT12"], 2) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr2)["PAYOUT12"], Decimal(str(1.3))) - assert_equal(n0.listassetbalancesbyaddress(shareholderAddr3)["PAYOUT12"], Decimal(str(0.6))) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr0)["PAYOUT12"], 6) + assert_equal(n1.listassetbalancesbyaddress(shareholder_addr1)["PAYOUT12"], 2) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr2)["PAYOUT12"], Decimal(str(1.3))) + assert_equal(n0.listassetbalancesbyaddress(shareholder_addr3)["PAYOUT12"], Decimal(str(0.6))) def test_rvn_bulk(self): self.log.info("Running basic RVN reward test!") n0, n1, n2, n3 = self.nodes[0], self.nodes[1], self.nodes[2], self.nodes[3] self.log.info("Creating owner address") - ownerAddr0 = n0.getnewaddress() - # self.log.info(f"Owner address: {ownerAddr0}") + owner_addr0 = n0.getnewaddress() + # self.log.info(f"Owner address: {owner_addr0}") self.log.info("Creating distributor address") - distAddr0 = n0.getnewaddress() - # self.log.info(f"Distributor address: {distAddr0}") + dist_addr0 = n0.getnewaddress() + # self.log.info(f"Distributor address: {dist_addr0}") self.log.info("Providing funding") - self.nodes[0].sendtoaddress(ownerAddr0, 1000) + self.nodes[0].sendtoaddress(owner_addr0, 1000) n0.generate(100) self.sync_all() self.log.info("Issuing BULK1 asset") - n0.issue(asset_name="BULK1", qty=100000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) - n0.issue(asset_name="TTTTTTTTTTTTTTTTTTTTTTTTTTTTT1", qty=100000, to_address=ownerAddr0, change_address="", \ - units=4, reissuable=True, has_ipfs=False) + n0.issue(asset_name="BULK1", qty=100000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) + n0.issue(asset_name="TTTTTTTTTTTTTTTTTTTTTTTTTTTTT1", qty=100000, to_address=owner_addr0, change_address="", units=4, reissuable=True, has_ipfs=False) n0.generate(10) self.sync_all() self.log.info("Checking listassetbalancesbyaddress()...") - assert_equal(n0.listassetbalancesbyaddress(ownerAddr0)["BULK1"], 100000) + assert_equal(n0.listassetbalancesbyaddress(owner_addr0)["BULK1"], 100000) self.log.info("Transferring all assets to a single address for tracking") - n0.transfer(asset_name="BULK1", qty=100000, to_address=distAddr0) + n0.transfer(asset_name="BULK1", qty=100000, to_address=dist_addr0) n0.generate(10) self.sync_all() - assert_equal(n0.listassetbalancesbyaddress(distAddr0)["BULK1"], 100000) + assert_equal(n0.listassetbalancesbyaddress(dist_addr0)["BULK1"], 100000) self.log.info("Creating shareholder addresses") - address_list = [None]*9999 + address_list = [None] * 9999 for i in range(0, 9999, 3): address_list[i] = n1.getnewaddress() - address_list[i+1] = n2.getnewaddress() - address_list[i+2] = n3.getnewaddress() + address_list[i + 1] = n2.getnewaddress() + address_list[i + 2] = n3.getnewaddress() self.log.info("Distributing shares") count = 0 for address in address_list: - n0.transferfromaddress(asset_name="BULK1", from_address=distAddr0, qty=10, to_address=address, message="", expire_time=0, rvn_change_address="", asset_change_address=distAddr0) + n0.transferfromaddress(asset_name="BULK1", from_address=dist_addr0, qty=10, to_address=address, message="", + expire_time=0, rvn_change_address="", asset_change_address=dist_addr0) count += 1 if count > 190: n0.generate(1) @@ -1198,42 +1198,44 @@ def test_rvn_bulk(self): self.log.info("Verifying share distribution") for i in range(0, 9999, 3): assert_equal(n1.listassetbalancesbyaddress(address_list[i])["BULK1"], 10) - assert_equal(n2.listassetbalancesbyaddress(address_list[i+1])["BULK1"], 10) - assert_equal(n3.listassetbalancesbyaddress(address_list[i+2])["BULK1"], 10) + assert_equal(n2.listassetbalancesbyaddress(address_list[i + 1])["BULK1"], 10) + assert_equal(n3.listassetbalancesbyaddress(address_list[i + 2])["BULK1"], 10) self.log.info("Mining blocks") n0.generate(10) self.sync_all() self.log.info("Providing additional funding") - self.nodes[0].sendtoaddress(ownerAddr0, 2000) + self.nodes[0].sendtoaddress(owner_addr0, 2000) n0.generate(100) self.sync_all() self.log.info("Retrieving chain height") - tgtBlockHeight = n0.getblockchaininfo()["blocks"] + 5 + tgt_block_height = n0.getblockchaininfo()["blocks"] + 5 self.log.info("Requesting snapshot of BULK1 ownership in 100 blocks") - n0.requestsnapshot(asset_name="BULK1", block_height=tgtBlockHeight) + n0.requestsnapshot(asset_name="BULK1", block_height=tgt_block_height) self.log.info("Skipping forward to allow snapshot to process") n0.generate(66) self.sync_all() self.log.info("Retrieving snapshot request") - snapShotReq = n0.getsnapshotrequest(asset_name="BULK1", block_height=tgtBlockHeight) - assert_equal(snapShotReq["asset_name"], "BULK1") - assert_equal(snapShotReq["block_height"], tgtBlockHeight) + snap_shot_req = n0.getsnapshotrequest(asset_name="BULK1", block_height=tgt_block_height) + assert_equal(snap_shot_req["asset_name"], "BULK1") + assert_equal(snap_shot_req["block_height"], tgt_block_height) self.log.info("Retrieving snapshot for ownership validation") - snapShot = n0.getsnapshot(asset_name="BULK1", block_height=tgtBlockHeight) - assert_equal(snapShot["name"], "BULK1") - assert_equal(snapShot["height"], tgtBlockHeight) - for ownerAddr in snapShot["owners"]: + snap_shot = n0.getsnapshot(asset_name="BULK1", block_height=tgt_block_height) + assert_equal(snap_shot["name"], "BULK1") + assert_equal(snap_shot["height"], tgt_block_height) + for ownerAddr in snap_shot["owners"]: assert_equal(ownerAddr["amount_owned"], 10) self.log.info("Initiating reward payout") - result = n0.distributereward(asset_name="BULK1", snapshot_height=tgtBlockHeight, distribution_asset_name="TTTTTTTTTTTTTTTTTTTTTTTTTTTTT1", gross_distribution_amount=100000, exception_addresses=distAddr0, change_address="", dry_run=False) + n0.distributereward(asset_name="BULK1", snapshot_height=tgt_block_height, + distribution_asset_name="TTTTTTTTTTTTTTTTTTTTTTTTTTTTT1", gross_distribution_amount=100000, + exception_addresses=dist_addr0, change_address="", dry_run=False) # print(result) n0.generate(10) self.sync_all() @@ -1242,8 +1244,8 @@ def test_rvn_bulk(self): self.log.info("Checking reward payout") for i in range(0, 9999, 3): assert_equal(n1.listassetbalancesbyaddress(address_list[i])['TTTTTTTTTTTTTTTTTTTTTTTTTTTTT1'], Decimal(str(10.0010))) - assert_equal(n2.listassetbalancesbyaddress(address_list[i+1])['TTTTTTTTTTTTTTTTTTTTTTTTTTTTT1'], Decimal(str(10.0010))) - assert_equal(n3.listassetbalancesbyaddress(address_list[i+2])['TTTTTTTTTTTTTTTTTTTTTTTTTTTTT1'], Decimal(str(10.0010))) + assert_equal(n2.listassetbalancesbyaddress(address_list[i + 1])['TTTTTTTTTTTTTTTTTTTTTTTTTTTTT1'], Decimal(str(10.0010))) + assert_equal(n3.listassetbalancesbyaddress(address_list[i + 2])['TTTTTTTTTTTTTTTTTTTTTTTTTTTTT1'], Decimal(str(10.0010))) def run_test(self): self.activate_assets() @@ -1253,15 +1255,15 @@ def run_test(self): self.payout_with_invalid_ownership_asset() self.payout_with_invalid_payout_asset() self.payout_before_minimum_height_is_reached() - self.listsnapshotrequests() + self.list_snapshot_requests() self.payout_custom_height_set_with_low_funds() - self.payout_with_insufficent_asset_amount() + self.payout_with_insufficient_asset_amount() self.basic_test_asset_uneven_distribution() self.basic_test_asset_even_distribution() self.basic_test_asset_round_down_uneven_distribution() self.basic_test_asset_round_down_uneven_distribution_2() self.basic_test_asset_round_down_uneven_distribution_3() - #self.test_asset_bulk() + # self.test_asset_bulk() if __name__ == "__main__": diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index feb7afc659..8978e57f5e 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -3,38 +3,15 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the SegWit changeover logic.""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (hex_str_to_bytes, - bytes_to_hex_str, - connect_nodes, - Decimal, - assert_equal, - sync_blocks, - assert_raises_rpc_error, - try_rpc) -from test_framework.mininode import (sha256, - CTransaction, - CTxIn, - COutPoint, - CTxOut, - COIN, - ToHex, - from_hex) -from test_framework.address import (script_to_p2sh, key_to_p2pkh) -from test_framework.script import ( CScript, - OP_HASH160, - OP_CHECKSIG, - hash160, - OP_EQUAL, OP_DUP, - OP_EQUALVERIFY, - OP_0, - OP_1, - OP_2, - OP_CHECKMULTISIG, - OP_TRUE) from io import BytesIO +from test_framework.test_framework import RavenTestFramework +from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, connect_nodes, Decimal, assert_equal, sync_blocks, assert_raises_rpc_error, try_rpc +from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, to_hex, from_hex +from test_framework.address import script_to_p2sh, key_to_p2pkh +from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_0, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE NODE_0 = 0 NODE_2 = 2 @@ -45,7 +22,7 @@ # given pubkey, or a P2WSH output of a 1-of-1 multisig for the given # pubkey. Returns the hex encoding of the scriptPubKey. def witness_script(use_p2wsh, pubkey): - if (use_p2wsh == False): + if not use_p2wsh: # P2WPKH instead pubkeyhash = hash160(hex_str_to_bytes(pubkey)) pkscript = CScript([OP_0, pubkeyhash]) @@ -60,13 +37,13 @@ def witness_script(use_p2wsh, pubkey): # optionally wrapping the segwit output using P2SH. def create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount): pkscript = hex_str_to_bytes(witness_script(use_p2wsh, pubkey)) - if (encode_p2sh): + if encode_p2sh: p2sh_hash = hash160(pkscript) pkscript = CScript([OP_HASH160, p2sh_hash, OP_EQUAL]) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), b"")) tx.vout.append(CTxOut(int(amount*COIN), pkscript)) - return ToHex(tx) + return to_hex(tx) # Create a transaction spending a given utxo to a segwit output corresponding # to the given pubkey: use_p2wsh determines whether to use P2WPKH or P2WSH @@ -75,22 +52,20 @@ def create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount): # insert_redeem_script will be added to the scriptSig, if given. def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""): tx_to_witness = create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount) - if (sign): + if sign: signed = node.signrawtransaction(tx_to_witness) assert("errors" not in signed or len(["errors"]) == 0) return node.sendrawtransaction(signed["hex"]) else: - if (insert_redeem_script): + if insert_redeem_script: tx = from_hex(CTransaction(), tx_to_witness) tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)]) - tx_to_witness = ToHex(tx) + tx_to_witness = to_hex(tx) return node.sendrawtransaction(tx_to_witness) def getutxo(txid): - utxo = {} - utxo["vout"] = 0 - utxo["txid"] = txid + utxo = {"vout": 0, "txid": txid} return utxo def find_unspent(node, min_value): @@ -274,7 +249,7 @@ def run_test(self): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b'')) tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE]))) - tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex'] + tx2_hex = self.nodes[0].signrawtransaction(to_hex(tx))['hex'] txid2 = self.nodes[0].sendrawtransaction(tx2_hex) tx = from_hex(CTransaction(), tx2_hex) assert(not tx.wit.is_null()) @@ -283,8 +258,8 @@ def run_test(self): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b"")) tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee - tx.calc_sha256() - txid3 = self.nodes[0].sendrawtransaction(ToHex(tx)) + tx.calc_x16r() + txid3 = self.nodes[0].sendrawtransaction(to_hex(tx)) assert(tx.wit.is_null()) assert(txid3 in self.nodes[0].getrawmempool()) @@ -304,7 +279,7 @@ def run_test(self): assert(txid3 in template_txids) # Check that wtxid is properly reported in mempool entry - assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True)) + assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_x16r(True)) # Mine a block to clear the gbt cache again. self.nodes[0].generate(1) @@ -327,8 +302,8 @@ def run_test(self): uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"] self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR") compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"] - assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False)) - assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True)) + assert (self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False) + assert (self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True) self.nodes[0].importpubkey(pubkeys[0]) compressed_solvable_address = [key_to_p2pkh(pubkeys[0])] @@ -362,7 +337,7 @@ def run_test(self): for i in compressed_spendable_address: v = self.nodes[0].validateaddress(i) - if (v['isscript']): + if v['isscript']: [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # bare and p2sh multisig with compressed keys should always be spendable spendable_anytime.extend([bare, p2sh]) @@ -377,7 +352,7 @@ def run_test(self): for i in uncompressed_spendable_address: v = self.nodes[0].validateaddress(i) - if (v['isscript']): + if v['isscript']: [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # bare and p2sh multisig with uncompressed keys should always be spendable spendable_anytime.extend([bare, p2sh]) @@ -394,7 +369,7 @@ def run_test(self): for i in compressed_solvable_address: v = self.nodes[0].validateaddress(i) - if (v['isscript']): + if v['isscript']: # Multisig without private is not seen after addmultisigaddress, but seen after importaddress [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh]) @@ -407,7 +382,7 @@ def run_test(self): for i in uncompressed_solvable_address: v = self.nodes[0].validateaddress(i) - if (v['isscript']): + if v['isscript']: [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress solvable_after_importaddress.extend([bare, p2sh]) @@ -447,7 +422,7 @@ def run_test(self): importlist = [] for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address: v = self.nodes[0].validateaddress(i) - if (v['isscript']): + if v['isscript']: bare = hex_str_to_bytes(v['hex']) importlist.append(bytes_to_hex_str(bare)) importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)]))) @@ -523,7 +498,7 @@ def run_test(self): for i in compressed_spendable_address: v = self.nodes[0].validateaddress(i) - if (v['isscript']): + if v['isscript']: [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) @@ -536,7 +511,7 @@ def run_test(self): for i in uncompressed_spendable_address + uncompressed_solvable_address: v = self.nodes[0].validateaddress(i) - if (v['isscript']): + if v['isscript']: [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen unseen_anytime.extend([p2wsh, p2sh_p2wsh]) @@ -547,7 +522,7 @@ def run_test(self): for i in compressed_solvable_address: v = self.nodes[0].validateaddress(i) - if (v['isscript']): + if v['isscript']: # P2WSH multisig without private key are seen after addwitnessaddress [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh]) @@ -604,27 +579,29 @@ def mine_and_test_listunspent(self, script_list, ismine): watchcount = 0 spendcount = 0 for i in self.nodes[0].listunspent(): - if (i['txid'] == txid): + if i['txid'] == txid: watchcount += 1 - if (i['spendable'] == True): + if i['spendable']: spendcount += 1 - if (ismine == 2): + if ismine == 2: assert_equal(spendcount, len(script_list)) - elif (ismine == 1): + elif ismine == 1: assert_equal(watchcount, len(script_list)) assert_equal(spendcount, 0) else: assert_equal(watchcount, 0) return txid - def p2sh_address_to_script(self,v): + @staticmethod + def p2sh_address_to_script(v): bare = CScript(hex_str_to_bytes(v['hex'])) p2sh = CScript(hex_str_to_bytes(v['scriptPubKey'])) p2wsh = CScript([OP_0, sha256(bare)]) p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL]) - return([bare, p2sh, p2wsh, p2sh_p2wsh]) + return [bare, p2sh, p2wsh, p2sh_p2wsh] - def p2pkh_address_to_script(self,v): + @staticmethod + def p2pkh_address_to_script(v): pubkey = hex_str_to_bytes(v['pubkey']) p2wpkh = CScript([OP_0, hash160(pubkey)]) p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL]) @@ -638,7 +615,7 @@ def p2pkh_address_to_script(self,v): p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL]) return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] - def create_and_mine_tx_from_txids(self, txids, success = True): + def create_and_mine_tx_from_txids(self, txids): tx = CTransaction() for i in txids: txtmp = CTransaction() diff --git a/test/functional/feature_shutdown.py b/test/functional/feature_shutdown.py new file mode 100755 index 0000000000..d085515be7 --- /dev/null +++ b/test/functional/feature_shutdown.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test ravend shutdown.""" + +from threading import Thread +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, get_rpc_proxy, wait_until + +def test_long_call(node): + block = node.waitfornewblock() + assert_equal(block['height'], 0) + +class ShutdownTest(RavenTestFramework): + + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coverage_dir=self.nodes[0].coverage_dir) + # Force connection establishment by executing a dummy command. + node.getblockcount() + Thread(target=test_long_call, args=(node,)).start() + # Wait until the server is executing the above `waitfornewblock`. + wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2, err_msg="wait until getrpcinfo active commands") + # Wait 1 second after requesting shutdown but not before the `stop` call + # finishes. This is to ensure event loop waits for current connections + # to close. + self.stop_node(0) #, wait=1000) + +if __name__ == '__main__': + ShutdownTest().main() diff --git a/test/functional/feature_uacomment.py b/test/functional/feature_uacomment.py index 6f6b04d2b3..1f91eb3de1 100755 --- a/test/functional/feature_uacomment.py +++ b/test/functional/feature_uacomment.py @@ -3,6 +3,7 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the -uacomment option.""" from test_framework.test_framework import RavenTestFramework diff --git a/test/functional/feature_unique_assets.py b/test/functional/feature_unique_assets.py index b9465783f7..de8a42569f 100755 --- a/test/functional/feature_unique_assets.py +++ b/test/functional/feature_unique_assets.py @@ -3,17 +3,12 @@ # Copyright (c) 2017-2018 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Testing unique asset use cases -""" -import random +"""Testing unique asset use cases""" +import random from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_contains, - assert_does_not_contain_key, - assert_equal, - assert_raises_rpc_error) - +from test_framework.util import assert_contains, assert_does_not_contain_key, assert_equal, assert_raises_rpc_error def gen_root_asset_name(): size = random.randint(3, 14) @@ -97,7 +92,7 @@ def issue_invalid(self): assert_raises_rpc_error(-8, f"Invalid parameter: asset_name '{asset_name}' has already been used", n0.issue, asset_name) - def issueunique_test(self): + def issue_unique_test(self): self.log.info("Testing issueunique RPC...") n0, n1 = self.nodes[0], self.nodes[1] n0.sendtoaddress(n1.getnewaddress(), 501) @@ -109,6 +104,7 @@ def issueunique_test(self): n0.issueunique(root, asset_tags, ipfs_hashes) block_hash = n0.generate(1)[0] + asset_name = "" for tag in asset_tags: asset_name = f"{root}#{tag}" assert_equal(1, n0.listmyassets()[asset_name]) @@ -138,7 +134,7 @@ def issueunique_test(self): def run_test(self): self.activate_assets() - self.issueunique_test() + self.issue_unique_test() self.issue_one() self.issue_invalid() diff --git a/test/functional/p2p_versionbits.py b/test/functional/feature_versionbits_warning.py similarity index 89% rename from test/functional/p2p_versionbits.py rename to test/functional/feature_versionbits_warning.py index 4e57bff1c3..9ed2e3384d 100755 --- a/test/functional/p2p_versionbits.py +++ b/test/functional/feature_versionbits_warning.py @@ -3,16 +3,18 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test version bits warning system. + +""" +Test version bits warning system. Generate chains with block versions that appear to be signalling unknown soft-forks, and test that warning alerts are generated. """ -from test_framework.mininode import (NodeConn, NodeConnCB, msg_block, NetworkThread) +from re import compile +from test_framework.mininode import NodeConn, NodeConnCB, MsgBlock, NetworkThread from test_framework.test_framework import RavenTestFramework -from test_framework.util import (os, p2p_port) -import re +from test_framework.util import os, p2p_port from test_framework.blocktools import create_block, create_coinbase VB_PERIOD = 144 # versionbits period length for regtest @@ -22,7 +24,7 @@ WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect" WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT) -VB_PATTERN = re.compile("^Warning.*versionbit") +VB_PATTERN = compile("^Warning.*versionbit") class TestNode(NodeConnCB): def on_inv(self, conn, message): @@ -42,7 +44,7 @@ def setup_network(self): self.setup_nodes() # Send numblocks blocks via peer with nVersionToUse set. - def send_blocks_with_version(self, peer, numblocks, nVersionToUse): + def send_blocks_with_version(self, peer, numblocks, n_version_to_use): tip = self.nodes[0].getbestblockhash() height = self.nodes[0].getblockcount() block_time = self.nodes[0].getblockheader(tip)["time"]+1 @@ -50,12 +52,12 @@ def send_blocks_with_version(self, peer, numblocks, nVersionToUse): for _ in range(numblocks): block = create_block(tip, create_coinbase(height+1), block_time) - block.nVersion = nVersionToUse + block.nVersion = n_version_to_use block.solve() - peer.send_message(msg_block(block)) + peer.send_message(MsgBlock(block)) block_time += 1 height += 1 - tip = block.sha256 + tip = block.x16r peer.sync_with_ping() def test_versionbits_in_alert_file(self): @@ -67,8 +69,7 @@ def run_test(self): # Setup the p2p connection and start up the network thread. test_node = TestNode() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)] test_node.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread @@ -81,8 +82,8 @@ def run_test(self): # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD # blocks signaling some unknown bit. - nVersion = VB_TOP_BITS | (1<= VB_THRESHOLD blocks signaling # some unknown bit - self.send_blocks_with_version(test_node, VB_THRESHOLD, nVersion) + self.send_blocks_with_version(test_node, VB_THRESHOLD, n_version) self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD) # Might not get a versionbits-related alert yet, as we should # have gotten a different alert due to more than 51/100 blocks diff --git a/test/functional/interface_http.py b/test/functional/interface_http.py index df811a6da2..ecf21fbcf0 100755 --- a/test/functional/interface_http.py +++ b/test/functional/interface_http.py @@ -3,14 +3,15 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the RPC HTTP basics.""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (str_to_b64str, assert_equal) +"""Test the RPC HTTP basics.""" import http.client import urllib.parse +from test_framework.test_framework import RavenTestFramework +from test_framework.util import str_to_b64str, assert_equal +# noinspection PyUnresolvedReferences class HTTPBasicsTest (RavenTestFramework): def set_test_params(self): self.num_nodes = 3 @@ -32,13 +33,13 @@ def run_test(self): conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) - assert(conn.sock!=None) #according to http/1.1 connection must still be open! + assert(conn.sock is not None) #according to http/1.1 connection must still be open! #send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) #must also response with a correct json-rpc message - assert(conn.sock!=None) #according to http/1.1 connection must still be open! + assert(conn.sock is not None) #according to http/1.1 connection must still be open! conn.close() #same should be if we add keep-alive because this should be the std. behaviour @@ -49,13 +50,13 @@ def run_test(self): conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) - assert(conn.sock!=None) #according to http/1.1 connection must still be open! + assert(conn.sock is not None) #according to http/1.1 connection must still be open! #send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) #must also response with a correct json-rpc message - assert(conn.sock!=None) #according to http/1.1 connection must still be open! + assert(conn.sock is not None) #according to http/1.1 connection must still be open! conn.close() #now do the same with "Connection: close" @@ -66,7 +67,7 @@ def run_test(self): conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) - assert(conn.sock==None) #now the connection must be closed after the response + assert(conn.sock is None) #now the connection must be closed after the response #node1 (2nd node) is running with disabled keep-alive option urlNode1 = urllib.parse.urlparse(self.nodes[1].url) @@ -89,7 +90,7 @@ def run_test(self): conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) - assert(conn.sock!=None) #connection must be closed because ravend should use keep-alive by default + assert(conn.sock is not None) #connection must be closed because ravend should use keep-alive by default # Check excessive request size conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index f97323c012..ddf5b678ad 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -3,18 +3,14 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the REST API.""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes_bi, - assert_equal, - Decimal, - json, - hex_str_to_bytes, - assert_greater_than) -from struct import (unpack, pack) +from struct import unpack, pack from io import BytesIO from codecs import encode +from test_framework.test_framework import RavenTestFramework +from test_framework.util import connect_nodes_bi, assert_equal, Decimal, json, hex_str_to_bytes, assert_greater_than import http.client import urllib.parse @@ -46,6 +42,8 @@ def http_post_call(host, port, path, requestdata = '', response_object = 0): return conn.getresponse().read() + +# noinspection PyTypeChecker class RESTTest (RavenTestFramework): FORMAT_SEPARATOR = "." @@ -112,7 +110,7 @@ def run_test(self): #check chainTip response assert_equal(json_obj['chaintipHash'], bb_hash) - #make sure there is no utox in the response because this oupoint has been spent + # make sure there is no utxo in the response because this oupoint has been spent assert_equal(len(json_obj['utxos']), 0) #check bitmap @@ -156,7 +154,7 @@ def run_test(self): txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json") json_obj = json.loads(json_string) - vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then) + #vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then) # get n of 0.1 outpoint n = 0 for vout in json_obj['vout']: @@ -285,10 +283,9 @@ def run_test(self): # check block tx details # let's make 3 tx and mine them on node 1 - txs = [] - txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) - txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) - txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)) + txs = [self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11), + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11), + self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)] self.sync_all() # check that there are exactly 3 transactions in the TX memory pool before generating the block diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py index 7be5776e0a..baac2387f8 100755 --- a/test/functional/interface_zmq.py +++ b/test/functional/interface_zmq.py @@ -3,17 +3,18 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + + """Test the ZMQ notification interface.""" + import configparser import os import struct +from test_framework.test_framework import RavenTestFramework, SkipTest +from test_framework.util import assert_equal, bytes_to_hex_str, hash256, x16_hash_block -from test_framework.test_framework import (RavenTestFramework, SkipTest) -from test_framework.util import (assert_equal, - bytes_to_hex_str, - hash256, - x16_hash_block) +# noinspection PyUnresolvedReferences class ZMQSubscriber: def __init__(self, socket, topic): self.sequence = 0 @@ -33,6 +34,7 @@ def receive(self): return body +# noinspection PyUnresolvedReferences class ZMQTest (RavenTestFramework): def set_test_params(self): self.num_nodes = 2 @@ -93,15 +95,15 @@ def _zmq_test(self): txid = self.hashtx.receive() # Should receive the coinbase raw transaction. - hex = self.rawtx.receive() - assert_equal(bytes_to_hex_str(hash256(hex)), + hex_data = self.rawtx.receive() + assert_equal(bytes_to_hex_str(hash256(hex_data)), self.nodes[1].getrawtransaction(bytes_to_hex_str(txid), True)["hash"]) # Should receive the generated block hash. - hash = bytes_to_hex_str(self.hashblock.receive()) - assert_equal(genhashes[x], hash) + hash_data = bytes_to_hex_str(self.hashblock.receive()) + assert_equal(genhashes[x], hash_data) # The block should only have the coinbase txid. - assert_equal([bytes_to_hex_str(txid)], self.nodes[1].getblock(hash)["tx"]) + assert_equal([bytes_to_hex_str(txid)], self.nodes[1].getblock(hash_data)["tx"]) # Should receive the generated raw block. block = self.rawblock.receive() @@ -116,8 +118,8 @@ def _zmq_test(self): assert_equal(payment_txid, bytes_to_hex_str(txid)) # Should receive the broadcasted raw transaction. - hex = self.rawtx.receive() - assert_equal(payment_txid, bytes_to_hex_str(hash256(hex))) + hex_data = self.rawtx.receive() + assert_equal(payment_txid, bytes_to_hex_str(hash256(hex_data))) if __name__ == '__main__': ZMQTest().main() diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py index 28d91358b5..075fda459b 100755 --- a/test/functional/mempool_limit.py +++ b/test/functional/mempool_limit.py @@ -3,62 +3,66 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test mempool limiting together/eviction with the wallet.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (gen_return_txouts, - create_confirmed_utxos, - satoshi_round, - create_lots_of_big_transactions) +from test_framework.util import gen_return_txouts, create_confirmed_utxos, satoshi_round, \ + create_lots_of_big_transactions + +# noinspection PyAttributeOutsideInit class MempoolLimitTest(RavenTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 - self.extra_args = [["-maxmempool=9", "-spendzeroconfchange=0"]] - self.thirtyTransactions = 9 #tx are created in groups of 30. Value here will be multiplied by thirty for the number of tx. + self.extra_args = [["-maxmempool=10", "-spendzeroconfchange=0"]] + self.thirtyTransactions = 9 # tx are created in groups of 30. Value here will be multiplied by thirty for the number of tx. def run_test(self): txouts = gen_return_txouts() relayfee = self.nodes[0].getnetworkinfo()['relayfee'] txids = [] - utxos = create_confirmed_utxos(relayfee, self.nodes[0], self.thirtyTransactions*30) + utxos = create_confirmed_utxos(relayfee, self.nodes[0], self.thirtyTransactions * 30) - #create a mempool tx that will be evicted + # create a mempool tx that will be evicted us0 = utxos.pop() - inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}] - outputs = {self.nodes[0].getnewaddress() : 0.1} + inputs = [{"txid": us0["txid"], "vout": us0["vout"]}] + outputs = {self.nodes[0].getnewaddress(): 0.1} tx = self.nodes[0].createrawtransaction(inputs, outputs) # Any fee calc method should work as longs as base_fee is set proportionally... - #1 - txF = self.nodes[0].fundrawtransaction(tx) - base_fee = satoshi_round(0.01025*100) # DEFAULT_FALLBACK_FEE (settxfee(0) is default and falls through to this) + # 1 + tx_f = self.nodes[0].fundrawtransaction(tx) + base_fee = satoshi_round( + 0.01025 * 100) # DEFAULT_FALLBACK_FEE (settxfee(0) is default and falls through to this) - #2 + # 2 # self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee (this is too low and will be bumped to MINFEE) - # txF = self.nodes[0].fundrawtransaction(tx) + # tx_f = self.nodes[0].fundrawtransaction(tx) # base_fee = satoshi_round(0.0005*100) # DEFAULT_TRANSACTION_MINFEE # self.nodes[0].settxfee(0) # return to automatic fee selection - #3 - # txF = self.nodes[0].fundrawtransaction(tx, {"feeRate": relayfee}) + # 3 + # tx_f = self.nodes[0].fundrawtransaction(tx, {"feeRate": relayfee}) # relayfee = self.nodes[0].getnetworkinfo()['relayfee'] # base_fee = relayfee*100 - txFS = self.nodes[0].signrawtransaction(txF['hex']) - txid = self.nodes[0].sendrawtransaction(txFS['hex']) + tx_fs = self.nodes[0].signrawtransaction(tx_f['hex']) + txid = self.nodes[0].sendrawtransaction(tx_fs['hex']) - for i in range (self.thirtyTransactions): + for i in range(self.thirtyTransactions): txids.append([]) - txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee) + txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30 * i:30 * i + 30], 30, + (i + 1) * base_fee) # by now, the tx should be evicted, check confirmation state - assert(txid not in self.nodes[0].getrawmempool()) + assert (txid not in self.nodes[0].getrawmempool()) txdata = self.nodes[0].gettransaction(txid) - assert(txdata['confirmations'] == 0) #confirmation should still be 0 + assert (txdata['confirmations'] == 0) # confirmation should still be 0 + if __name__ == '__main__': MempoolLimitTest().main() diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index 954f73255f..0c05695987 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -3,15 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test descendant package tracking code.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (satoshi_round, - Decimal, - assert_equal, - assert_raises_rpc_error, - sync_blocks, - sync_mempools) +from test_framework.util import satoshi_round, Decimal, assert_equal, assert_raises_rpc_error, sync_blocks, sync_mempools from test_framework.mininode import COIN MAX_ANCESTORS = 25 @@ -24,7 +20,8 @@ def set_test_params(self): # Build a transaction that spends parent_txid:vout # Return amount sent - def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): + @staticmethod + def chain_transaction(node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee)/num_outputs) inputs = [ {'txid' : parent_txid, 'vout' : vout} ] outputs = {} @@ -35,10 +32,10 @@ def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): txid = node.sendrawtransaction(signedtx['hex']) fulltx = node.getrawtransaction(txid, 1) assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output - return (txid, send_value) + return txid, send_value def run_test(self): - ''' Mine some blocks and have them mature. ''' + """ Mine some blocks and have them mature. """ self.nodes[0].generate(101) utxo = self.nodes[0].listunspent(10) txid = utxo[0]['txid'] @@ -141,7 +138,7 @@ def run_test(self): descendant_fees = 0 for x in reversed(chain): descendant_fees += mempool[x]['fee'] - if (x == chain[-1]): + if x == chain[-1]: assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002)) assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000) @@ -236,7 +233,7 @@ def run_test(self): outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee } rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransaction(rawtx) - txid = self.nodes[0].sendrawtransaction(signedtx['hex']) + self.nodes[0].sendrawtransaction(signedtx['hex']) sync_mempools(self.nodes) # Now try to disconnect the tip on each node... diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index 8681f680f1..c53f44db1e 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test mempool persistence. + +""" +Test mempool persistence. By default, ravend will dump mempool on shutdown and then reload it on startup. This can be overridden with @@ -34,13 +36,12 @@ mempool. - Verify that savemempool throws when the RPC is called if node1 can't write to disk. - """ + import os import time - from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, Decimal, wait_until, assert_raises_rpc_error) +from test_framework.util import assert_equal, Decimal, wait_until, assert_raises_rpc_error class MempoolPersistTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/mempool_resurrect.py b/test/functional/mempool_resurrect.py index 6504d73478..79bc2068ec 100755 --- a/test/functional/mempool_resurrect.py +++ b/test/functional/mempool_resurrect.py @@ -3,10 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test resurrection of mined transactions when the blockchain is re-organized.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (create_tx, assert_equal) +from test_framework.util import create_tx, assert_equal # Create one-input, one-output, no-fee transaction: class MempoolCoinbaseTest(RavenTestFramework): diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py index d0f437039b..c5272a62c8 100755 --- a/test/functional/mining_getblocktemplate_longpoll.py +++ b/test/functional/mining_getblocktemplate_longpoll.py @@ -3,19 +3,19 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test longpolling with getblocktemplate.""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (get_rpc_proxy, random_transaction, Decimal) +"""Test longpolling with getblocktemplate.""" import threading +from test_framework.test_framework import RavenTestFramework +from test_framework.util import get_rpc_proxy, random_transaction, Decimal class LongpollThread(threading.Thread): def __init__(self, node): threading.Thread.__init__(self) # query current longpollid - templat = node.getblocktemplate() - self.longpollid = templat['longpollid'] + template = node.getblocktemplate() + self.longpollid = template['longpollid'] # create a new connection to the node, we can't use the same # connection from two threads self.node = get_rpc_proxy(node.url, 1, timeout=600, coverage_dir=node.coverage_dir) @@ -30,11 +30,11 @@ def set_test_params(self): def run_test(self): self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.") self.nodes[0].generate(10) - templat = self.nodes[0].getblocktemplate() - longpollid = templat['longpollid'] + template = self.nodes[0].getblocktemplate() + longpollid = template['longpollid'] # longpollid should not change between successive invocations if nothing else happens - templat2 = self.nodes[0].getblocktemplate() - assert(templat2['longpollid'] == longpollid) + template_2 = self.nodes[0].getblocktemplate() + assert(template_2['longpollid'] == longpollid) # Test 1: test that the longpolling wait if we do nothing thr = LongpollThread(self.nodes[0]) diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py index 67a4dc2b8b..f2165540b3 100755 --- a/test/functional/mining_prioritisetransaction.py +++ b/test/functional/mining_prioritisetransaction.py @@ -3,11 +3,12 @@ # Copyright (c) 2017-2018 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the prioritisetransaction mining RPC.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (gen_return_txouts, create_confirmed_utxos, create_lots_of_big_transactions, assert_raises_rpc_error, assert_equal, time) -from test_framework.mininode import (COIN, MAX_BLOCK_BASE_SIZE) +from test_framework.util import gen_return_txouts, create_confirmed_utxos, create_lots_of_big_transactions, assert_raises_rpc_error, assert_equal, time +from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE class PrioritiseTransactionTest(RavenTestFramework): def set_test_params(self): @@ -60,7 +61,7 @@ def run_test(self): high_fee_tx = x # Something high-fee should have been mined! - assert(high_fee_tx != None) + assert(high_fee_tx is not None) # Add a prioritisation before a tx is in the mempool (de-prioritising a # high-fee transaction so that it's now low fee). @@ -76,7 +77,7 @@ def run_test(self): # Now verify the modified-high feerate transaction isn't mined before # the other high fee transactions. Keep mining until our mempool has # decreased by all the high fee size that we calculated above. - while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): + while self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]: self.nodes[0].generate(1) # High fee transaction should not have been mined, but other high fee rate @@ -85,7 +86,7 @@ def run_test(self): self.log.info("Assert that de-prioritised transaction is still in mempool") assert(high_fee_tx in mempool) for x in txids[2]: - if (x != high_fee_tx): + if x != high_fee_tx: assert(x not in mempool) # Create a free transaction. Should be rejected. diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index c0d88d936f..e4dfe5290e 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -3,61 +3,22 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test compact blocks (BIP 152). + +""" +Test compact blocks (BIP 152). Version 1 compact blocks are pre-segwit (txids) Version 2 compact blocks are post-segwit (wtxids) """ -from test_framework.mininode import (NodeConnCB, - mininode_lock, - msg_getheaders, - msg_headers, - CBlockHeader, - msg_block, - CTransaction, - CTxIn, - CTxOut, - COutPoint, - msg_cmpctblock, - msg_sendcmpct, - msg_sendheaders, - P2PHeaderAndShortIDs, - PrefilledTransaction, - from_hex, - CBlock, - HeaderAndShortIDs, - CInv, - msg_getdata, - calculate_shortid, - msg_inv, - calculate_shortid, - msg_witness_blocktxn, - msg_blocktxn, - BlockTransactions, - msg_tx, - MSG_WITNESS_FLAG, - msg_witness_block, - msg_getblocktxn, - BlockTransactionsRequest, - ToHex, - CTxInWitness, - ser_uint256, - NodeConn, - NODE_NETWORK, - NetworkThread, - NODE_WITNESS) +from test_framework.mininode import (NodeConnCB, mininode_lock, MsgGetHeaders, MsgHeaders, CBlockHeader, MsgBlock, CTransaction, CTxIn, CTxOut, COutPoint, MsgCmpctBlock, MsgSendCmpct, MsgSendHeaders, + P2PHeaderAndShortIDs, PrefilledTransaction, from_hex, CBlock, HeaderAndShortIDs, CInv, MsgGetdata, MsgInv, calculate_shortid, MsgWitnessBlocktxn, MsgBlockTxn, + BlockTransactions, MsgTx, MSG_WITNESS_FLAG, MsgWitnessBlock, MsgGetBlockTxn, BlockTransactionsRequest, to_hex, CTxInWitness, ser_uint256, NodeConn, NODE_NETWORK, + NetworkThread, NODE_WITNESS) from test_framework.test_framework import RavenTestFramework -from test_framework.util import (wait_until, - assert_equal, - satoshi_round, - Decimal, - random, - get_bip9_status, - p2p_port, - sync_blocks) -from test_framework.blocktools import (create_block, create_coinbase, add_witness_commitment) -from test_framework.script import (CScript, OP_TRUE) +from test_framework.util import wait_until, assert_equal, satoshi_round, Decimal, random, get_bip9_status, p2p_port, sync_blocks +from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment +from test_framework.script import CScript, OP_TRUE # TestNode: A peer we use to send messages to ravend, and store responses. class TestNode(NodeConnCB): @@ -75,13 +36,13 @@ def on_sendcmpct(self, conn, message): def on_cmpctblock(self, conn, message): self.block_announced = True - self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() + self.last_message["cmpctblock"].header_and_shortids.header.calc_x16r() self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256) def on_headers(self, conn, message): self.block_announced = True for x in self.last_message["headers"].headers: - x.calc_sha256() + x.calc_x16r() self.announced_blockhashes.add(x.sha256) def on_inv(self, conn, message): @@ -102,28 +63,28 @@ def clear_block_announcement(self): self.last_message.pop("cmpctblock", None) def get_headers(self, locator, hashstop): - msg = msg_getheaders() + msg = MsgGetHeaders() msg.locator.vHave = locator msg.hashstop = hashstop self.connection.send_message(msg) def send_header_for_blocks(self, new_blocks): - headers_message = msg_headers() + headers_message = MsgHeaders() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) - wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock) + wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock, err_msg="request_headers_and_sync") self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): - return (block_hash in self.announced_blockhashes) - wait_until(received_hash, timeout=timeout, lock=mininode_lock) + return block_hash in self.announced_blockhashes + wait_until(received_hash, timeout=timeout, lock=mininode_lock, err_msg="wait_for_block_disconnect") def send_await_disconnect(self, message, timeout=30): """Sends a message to the node and wait for disconnect. @@ -131,7 +92,7 @@ def send_await_disconnect(self, message, timeout=30): This is used when we want to send a message into the node that we expect will get us disconnected, eg an invalid block.""" self.send_message(message) - wait_until(lambda: not self.connected, timeout=timeout, lock=mininode_lock) + wait_until(lambda: not self.connected, timeout=timeout, lock=mininode_lock, err_msg="send_wait_disconnect") class CompactBlocksTest(RavenTestFramework): def set_test_params(self): @@ -141,7 +102,8 @@ def set_test_params(self): self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]] self.utxos = [] - def build_block_on_tip(self, node, segwit=False): + @staticmethod + def build_block_on_tip(node, segwit=False): height = node.getblockcount() tip = node.getbestblockhash() mtp = node.getblockheader(tip)['mediantime'] @@ -156,7 +118,7 @@ def build_block_on_tip(self, node, segwit=False): def make_utxos(self): # Doesn't matter which node we use, just use node0. block = self.build_block_on_tip(self.nodes[0]) - self.test_node.send_and_ping(msg_block(block)) + self.test_node.send_and_ping(MsgBlock(block)) assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256) self.nodes[0].generate(100) @@ -172,9 +134,9 @@ def make_utxos(self): block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() - self.test_node.send_and_ping(msg_block(block2)) + self.test_node.send_and_ping(MsgBlock(block2)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) - self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) + self.utxos.extend([[tx.x16r, i, out_value] for i in range(10)]) return # Test "sendcmpct" (between peers preferring the same version): @@ -186,11 +148,12 @@ def make_utxos(self): # are made with compact blocks. # If old_node is passed in, request compact blocks with version=preferred-1 # and verify that it receives block announcements via compact block. - def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): + @staticmethod + def test_sendcmpct(node, test_node, preferred_version, old_node=None): # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): - return (len(test_node.last_sendcmpct) > 0) - wait_until(received_sendcmpct, timeout=30, lock=mininode_lock) + return len(test_node.last_sendcmpct) > 0 + wait_until(received_sendcmpct, timeout=30, lock=mininode_lock, err_msg="test_sendcmpct") with mininode_lock: # Check that the first version received is the preferred one assert_equal(test_node.last_sendcmpct[0].version, preferred_version) @@ -200,11 +163,11 @@ def received_sendcmpct(): tip = int(node.getbestblockhash(), 16) - def check_announcement_of_new_block(node, peer, predicate): + def check_announcement_of_new_block(node_data, peer, predicate): peer.clear_block_announcement() - block_hash = int(node.generate(1)[0], 16) + block_hash = int(node_data.generate(1)[0], 16) peer.wait_for_block_announcement(block_hash, timeout=30) - assert(peer.block_announced) + assert peer.block_announced with mininode_lock: assert predicate(peer), ( @@ -224,7 +187,7 @@ def check_announcement_of_new_block(node, peer, predicate): test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version - sendcmpct = msg_sendcmpct() + sendcmpct = MsgSendCmpct() sendcmpct.version = preferred_version+1 sendcmpct.announce = True test_node.send_and_ping(sendcmpct) @@ -252,7 +215,7 @@ def check_announcement_of_new_block(node, peer, predicate): check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after turning on sendheaders - test_node.send_and_ping(msg_sendheaders()) + test_node.send_and_ping(MsgSendHeaders()) check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after sending a version-1, announce=false message. @@ -288,7 +251,7 @@ def test_invalid_cmpctblock_message(self): # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] - self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) + self.test_node.send_await_disconnect(MsgCmpctBlock(cmpct_block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) # Compare the generated shortids to what we expect based on BIP 152, given @@ -315,7 +278,7 @@ def test_compactblock_construction(self, node, test_node, version, use_witness_a segwit_tx_generated = True if use_witness_address: - assert(segwit_tx_generated) # check that our test is not broken + assert segwit_tx_generated # check that our test is not broken # Wait until we've seen the block announcement for the resulting tip tip = int(node.getbestblockhash(), 16) @@ -331,14 +294,13 @@ def test_compactblock_construction(self, node, test_node, version, use_witness_a # Store the raw block in our internal format. block = from_hex(CBlock(), node.getblock("%02x" % block_hash, False)) for tx in block.vtx: - tx.calc_sha256() + tx.calc_x16r() block.rehash() # Wait until the block was announced (via compact blocks) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock, err_msg="test_node.received_block_announcement") # Now fetch and check the compact block - header_and_shortids = None with mininode_lock: assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes @@ -349,21 +311,21 @@ def test_compactblock_construction(self, node, test_node, version, use_witness_a with mininode_lock: test_node.clear_block_announcement() inv = CInv(4, block_hash) # 4 == "CompactBlock" - test_node.send_message(msg_getdata([inv])) + test_node.send_message(MsgGetdata([inv])) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock, err_msg="test_node.received_block_announcement") # Now fetch and check the compact block - header_and_shortids = None with mininode_lock: assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) - def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block): + @staticmethod + def check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block): # Check that we got the right block! - header_and_shortids.header.calc_sha256() + header_and_shortids.header.calc_x16r() assert_equal(header_and_shortids.header.sha256, block_hash) # Make sure the prefilled_txn appears to have included the coinbase @@ -372,14 +334,14 @@ def check_compactblock_construction_from_block(self, version, header_and_shortid # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: - entry.tx.calc_sha256() + entry.tx.calc_x16r() # This checks the non-witness parts of the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) # And this checks the witness - wtxid = entry.tx.calc_sha256(True) + wtxid = entry.tx.calc_x16r(True) if version == 2: - assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True)) + assert_equal(wtxid, block.vtx[entry.index].calc_x16r(True)) else: # Shouldn't have received a witness assert(entry.tx.wit.is_null()) @@ -400,7 +362,7 @@ def check_compactblock_construction_from_block(self, version, header_and_shortid else: tx_hash = block.vtx[index].sha256 if version == 2: - tx_hash = block.vtx[index].calc_sha256(True) + tx_hash = block.vtx[index].calc_x16r(True) shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) @@ -421,12 +383,12 @@ def test_compactblock_requests(self, node, test_node, version, segwit): test_node.last_message.pop("getdata", None) if announce == "inv": - test_node.send_message(msg_inv([CInv(2, block.sha256)])) - wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock) + test_node.send_message(MsgInv([CInv(2, block.sha256)])) + wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock, err_msg="test_compactblock_requests") test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) - wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock) + wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock, err_msg="test_nod.last_message getdata") assert_equal(len(test_node.last_message["getdata"].inv), 1) assert_equal(test_node.last_message["getdata"].inv[0].type, 4) assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) @@ -438,10 +400,10 @@ def test_compactblock_requests(self, node, test_node, version, segwit): [k0, k1] = comp_block.get_siphash_keys() coinbase_hash = block.vtx[0].sha256 if version == 2: - coinbase_hash = block.vtx[0].calc_sha256(True) + coinbase_hash = block.vtx[0].calc_x16r(True) comp_block.shortids = [ calculate_shortid(k0, k1, coinbase_hash) ] - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + test_node.send_and_ping(MsgCmpctBlock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: @@ -451,9 +413,9 @@ def test_compactblock_requests(self, node, test_node, version, segwit): # Send the coinbase, and verify that the tip advances. if version == 2: - msg = msg_witness_blocktxn() + msg = MsgWitnessBlocktxn() else: - msg = msg_blocktxn() + msg = MsgBlockTxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) @@ -468,7 +430,7 @@ def build_block_with_transactions(self, node, utxo, num_transactions): tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) tx.rehash() - utxo = [tx.sha256, 0, tx.vout[0].nValue] + utxo = [tx.x16r, 0, tx.vout[0].nValue] block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() @@ -482,16 +444,16 @@ def test_getblocktxn_requests(self, node, test_node, version): with_witness = (version==2) def test_getblocktxn_response(compact_block, peer, expected_result): - msg = msg_cmpctblock(compact_block.to_p2p()) + msg = MsgCmpctBlock(compact_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert("getblocktxn" in peer.last_message) absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute() assert_equal(absolute_indexes, expected_result) - def test_tip_after_message(node, peer, msg, tip): + def test_tip_after_message(node_data, peer, msg, tip): peer.send_and_ping(msg) - assert_equal(int(node.getbestblockhash(), 16), tip) + assert_equal(int(node_data.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. @@ -504,9 +466,9 @@ def test_tip_after_message(node, peer, msg, tip): test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) - msg_bt = msg_blocktxn() + msg_bt = MsgBlockTxn() if with_witness: - msg_bt = msg_witness_blocktxn() # serialize with witnesses + msg_bt = MsgWitnessBlocktxn() # serialize with witnesses msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:]) test_tip_after_message(node, test_node, msg_bt, block.sha256) @@ -524,7 +486,7 @@ def test_tip_after_message(node, peer, msg, tip): utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) - test_node.send_and_ping(msg_tx(block.vtx[1])) + test_node.send_and_ping(MsgTx(block.vtx[1])) assert(block.vtx[1].hash in node.getrawmempool()) # Prefill 4 out of the 6 transactions, and verify that only the one @@ -541,7 +503,7 @@ def test_tip_after_message(node, peer, msg, tip): block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) for tx in block.vtx[1:]: - test_node.send_message(msg_tx(tx)) + test_node.send_message(MsgTx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() @@ -554,7 +516,7 @@ def test_tip_after_message(node, peer, msg, tip): # Send compact block comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness) - test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) + test_tip_after_message(node, test_node, MsgCmpctBlock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction assert("getblocktxn" not in test_node.last_message) @@ -562,7 +524,7 @@ def test_tip_after_message(node, peer, msg, tip): # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. def test_incorrect_blocktxn_response(self, node, test_node, version): - if (len(self.utxos) == 0): + if len(self.utxos) == 0: self.make_utxos() utxo = self.utxos.pop(0) @@ -570,7 +532,7 @@ def test_incorrect_blocktxn_response(self, node, test_node, version): self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in block.vtx[1:6]: - test_node.send_message(msg_tx(tx)) + test_node.send_message(MsgTx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() @@ -580,8 +542,7 @@ def test_incorrect_blocktxn_response(self, node, test_node, version): # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2)) - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) - absolute_indexes = [] + test_node.send_and_ping(MsgCmpctBlock(comp_block.to_p2p())) with mininode_lock: assert("getblocktxn" in test_node.last_message) absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() @@ -595,9 +556,9 @@ def test_incorrect_blocktxn_response(self, node, test_node, version): # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. - msg = msg_blocktxn() + msg = MsgBlockTxn() if version==2: - msg = msg_witness_blocktxn() + msg = MsgWitnessBlocktxn() msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:]) test_node.send_and_ping(msg) @@ -605,62 +566,64 @@ def test_incorrect_blocktxn_response(self, node, test_node, version): assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request - wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock) + wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock, err_msg="test_node.last_message getdata") assert_equal(len(test_node.last_message["getdata"].inv), 1) assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG) assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) # Deliver the block if version==2: - test_node.send_and_ping(msg_witness_block(block)) + test_node.send_and_ping(MsgWitnessBlock(block)) else: - test_node.send_and_ping(msg_block(block)) + test_node.send_and_ping(MsgBlock(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) - def test_getblocktxn_handler(self, node, test_node, version): + @staticmethod + def test_getblocktxn_handler(node, test_node, version): # ravend will not send blocktxn responses for blocks whose height is # more than 10 blocks deep. MAX_GETBLOCKTXN_DEPTH = 10 chain_height = node.getblockcount() current_height = chain_height - while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): + while current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH: block_hash = node.getblockhash(current_height) block = from_hex(CBlock(), node.getblock(block_hash, False)) - msg = msg_getblocktxn() + msg = MsgGetBlockTxn() msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request))) test_node.send_message(msg) - wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock) + wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock, err_msg="test_getblocktxn_handler") - [tx.calc_sha256() for tx in block.vtx] + [tx.calc_x16r() for tx in block.vtx] with mininode_lock: assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0) - tx.calc_sha256() + tx.calc_x16r() assert_equal(tx.sha256, block.vtx[index].sha256) if version == 1: # Witnesses should have been stripped assert(tx.wit.is_null()) else: # Check that the witness matches - assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True)) + assert_equal(tx.calc_x16r(True), block.vtx[index].calc_x16r(True)) test_node.last_message.pop("blocktxn", None) current_height -= 1 # Next request should send a full block response, as we're past the # allowed depth for a blocktxn response. block_hash = node.getblockhash(current_height) + # noinspection PyUnboundLocalVariable msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0]) with mininode_lock: test_node.last_message.pop("block", None) test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: - test_node.last_message["block"].block.calc_sha256() + test_node.last_message["block"].block.calc_x16r() assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16)) assert "blocktxn" not in test_node.last_message @@ -671,22 +634,22 @@ def test_compactblocks_not_at_tip(self, node, test_node): for _ in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock, err_msg="test_compactblocks_not_at_tip test_node.received_block_announcement") test_node.clear_block_announcement() - test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) - wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) + test_node.send_message(MsgGetdata([CInv(4, int(new_blocks[0], 16))])) + wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock, err_msg="test_compactblocks_not_at_tip testnode.last_message") test_node.clear_block_announcement() node.generate(1) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock, err_msg="test_compactblocks_not_at_tip test_node.received_block_announcement") test_node.clear_block_announcement() with mininode_lock: test_node.last_message.pop("block", None) - test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) - wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock) + test_node.send_message(MsgGetdata([CInv(4, int(new_blocks[0], 16))])) + wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock, err_msg="test_node.received_block_announcement test_node.last_message") with mininode_lock: - test_node.last_message["block"].block.calc_sha256() + test_node.last_message["block"].block.calc_x16r() assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. @@ -698,7 +661,7 @@ def test_compactblocks_not_at_tip(self, node, test_node): comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) - test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + test_node.send_and_ping(MsgCmpctBlock(comp_block.to_p2p())) tips = node.getchaintips() found = False @@ -707,11 +670,11 @@ def test_compactblocks_not_at_tip(self, node, test_node): assert_equal(x["status"], "headers-only") found = True break - assert(found) + assert found # Requesting this block via getblocktxn should silently fail # (to avoid fingerprinting attacks). - msg = msg_getblocktxn() + msg = MsgGetBlockTxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: test_node.last_message.pop("blocktxn", None) @@ -719,7 +682,8 @@ def test_compactblocks_not_at_tip(self, node, test_node): with mininode_lock: assert "blocktxn" not in test_node.last_message - def activate_segwit(self, node): + @staticmethod + def activate_segwit(node): node.generate(144*3) assert_equal(get_bip9_status(node, "segwit")["status"], 'active') @@ -730,16 +694,16 @@ def test_end_to_end_block_relay(self, node, listeners): [l.clear_block_announcement() for l in listeners] - # ToHex() won't serialize with witness, but this block has no witnesses + # to_hex() won't serialize with witness, but this block has no witnesses # anyway. TODO: repeat this test with witness tx's to a segwit node. - node.submitblock(ToHex(block)) + node.submitblock(to_hex(block)) for l in listeners: - wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock) + wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock, err_msg="test_end_to_end_block_relay received_block_announcement") with mininode_lock: for l in listeners: assert "cmpctblock" in l.last_message - l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() + l.last_message["cmpctblock"].header_and_shortids.header.calc_x16r() assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) # Test that we don't get disconnected if we relay a compact block with valid header, @@ -762,7 +726,7 @@ def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit): # verify that we don't get disconnected. comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit) - msg = msg_cmpctblock(comp_block.to_p2p()) + msg = MsgCmpctBlock(comp_block.to_p2p()) test_node.send_and_ping(msg) # Check that the tip didn't advance @@ -771,11 +735,12 @@ def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit): # Helper for enabling cb announcements # Send the sendcmpct request and sync headers - def request_cb_announcements(self, peer, node, version): + @staticmethod + def request_cb_announcements(peer, node, version): tip = node.getbestblockhash() peer.get_headers(locator=[int(tip, 16)], hashstop=0) - msg = msg_sendcmpct() + msg = MsgSendCmpct() msg.version = version msg.announce = True peer.send_and_ping(msg) @@ -783,28 +748,28 @@ def request_cb_announcements(self, peer, node, version): def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer): assert(len(self.utxos)) - def announce_cmpct_block(node, peer): + def announce_cmpct_block(node_data, peer): utxo = self.utxos.pop(0) - block = self.build_block_with_transactions(node, utxo, 5) + block_data = self.build_block_with_transactions(node_data, utxo, 5) - cmpct_block = HeaderAndShortIDs() - cmpct_block.initialize_from_block(block) - msg = msg_cmpctblock(cmpct_block.to_p2p()) - peer.send_and_ping(msg) + cmpct_block_data = HeaderAndShortIDs() + cmpct_block_data.initialize_from_block(block_data) + msg_data = MsgCmpctBlock(cmpct_block_data.to_p2p()) + peer.send_and_ping(msg_data) with mininode_lock: assert "getblocktxn" in peer.last_message - return block, cmpct_block + return block_data, cmpct_block_data block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: - delivery_peer.send_message(msg_tx(tx)) + delivery_peer.send_message(MsgTx(tx)) delivery_peer.sync_with_ping() mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) - delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) + delivery_peer.send_and_ping(MsgCmpctBlock(cmpct_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.sha256) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) @@ -813,17 +778,17 @@ def announce_cmpct_block(node, peer): block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: - delivery_peer.send_message(msg_tx(tx)) + delivery_peer.send_message(MsgTx(tx)) delivery_peer.sync_with_ping() cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ] cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)] cmpct_block.use_witness = True - delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) + delivery_peer.send_and_ping(MsgCmpctBlock(cmpct_block.to_p2p())) assert(int(node.getbestblockhash(), 16) != block.sha256) - msg = msg_blocktxn() + msg = MsgBlockTxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = block.vtx[1:] stalling_peer.send_and_ping(msg) @@ -835,12 +800,11 @@ def run_test(self): self.segwit_node = TestNode() self.old_node = TestNode() # version 1 peer <--> segwit node - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node)) - connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], - self.segwit_node, services=NODE_NETWORK|NODE_WITNESS)) - connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], - self.old_node, services=NODE_NETWORK)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node), + NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], + self.segwit_node, services=NODE_NETWORK | NODE_WITNESS), + NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], + self.old_node, services=NODE_NETWORK)] self.test_node.add_connection(connections[0]) self.segwit_node.add_connection(connections[1]) self.old_node.add_connection(connections[2]) @@ -936,7 +900,7 @@ def run_test(self): # node1 will not download blocks from node0. self.log.info("Syncing nodes...") assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash()) - while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()): + while self.nodes[0].getblockcount() > self.nodes[1].getblockcount(): block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1) self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False)) assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash()) diff --git a/test/functional/p2p_disconnect_ban.py b/test/functional/p2p_disconnect_ban.py index 50fda319ac..6a3e3161a0 100755 --- a/test/functional/p2p_disconnect_ban.py +++ b/test/functional/p2p_disconnect_ban.py @@ -3,14 +3,12 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test node disconnect and ban behavior""" -import time +import time from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, - assert_raises_rpc_error, - connect_nodes_bi, - wait_until) +from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes_bi, wait_until class DisconnectBanTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index fff8fa6e15..2863dda09c 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -3,24 +3,26 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test processing of feefilter messages. - (Wallet now has DEFAULT_TRANSACTION_MINFEE = 0.00050000""" +""" +Test processing of feefilter messages. -from test_framework.mininode import (mininode_lock, NodeConnCB, NodeConn, NetworkThread, msg_feefilter) -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (sync_blocks, p2p_port, Decimal, sync_mempools) -import time +(Wallet now has DEFAULT_TRANSACTION_MINFEE = 0.01000000 +""" +import time +from test_framework.mininode import mininode_lock, NodeConnCB, NodeConn, NetworkThread, MsgFeeFilter +from test_framework.test_framework import RavenTestFramework +from test_framework.util import sync_blocks, p2p_port, Decimal, sync_mempools -def hashToHex(hash): - return format(hash, '064x') +def hash_to_hex(hash_data): + return format(hash_data, '064x') # Wait up to 60 secs to see if the testnode has received all the expected invs -def allInvsMatch(invsExpected, testnode): +def all_invs_match(invs_expected, testnode): for _ in range(60): with mininode_lock: - if (sorted(invsExpected) == sorted(testnode.txinvs)): + if sorted(invs_expected) == sorted(testnode.txinvs): return True time.sleep(1) return False @@ -32,8 +34,8 @@ def __init__(self): def on_inv(self, conn, message): for i in message.inv: - if (i.type == 1): - self.txinvs.append(hashToHex(i.hash)) + if i.type == 1: + self.txinvs.append(hash_to_hex(i.hash)) def clear_invs(self): with mininode_lock: @@ -57,23 +59,23 @@ def run_test(self): NetworkThread().start() test_node.wait_for_verack() - # Test that invs are received for all txs at feerate of 2,000,000 sats + # Test that invs are received for all txs at feerate of 2,000,000 corbies node1.settxfee(Decimal("0.02000000")) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - assert(allInvsMatch(txids, test_node)) + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] + assert(all_invs_match(txids, test_node)) test_node.clear_invs() - # Set a filter of 1,500,000 sats (must be above 1,000,000 sats (min fee is enforced) - test_node.send_and_ping(msg_feefilter(1500000)) + # Set a filter of 1,500,000 corbies (must be above 1,000,000 corbies (min fee is enforced) + test_node.send_and_ping(MsgFeeFilter(1500000)) # Test that txs are still being received (paying 70 sat/byte) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - assert(allInvsMatch(txids, test_node)) + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] + assert(all_invs_match(txids, test_node)) test_node.clear_invs() - # Change tx fee rate to 1,350,000 sats and test they are no longer received + # Change tx fee rate to 1,350,000 corbies and test they are no longer received node1.settxfee(Decimal("0.013500000")) - [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] sync_mempools(self.nodes) # must be sure node 0 has received all txs # Raise the tx fee back up above the mintxfee, submit 1 tx on node 0, @@ -88,13 +90,13 @@ def run_test(self): # as well. node0.settxfee(Decimal("0.01600000")) txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] # - assert(allInvsMatch(txids, test_node)) + assert(all_invs_match(txids, test_node)) test_node.clear_invs() # Remove fee filter and check that txs are received again - test_node.send_and_ping(msg_feefilter(0)) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] - assert(allInvsMatch(txids, test_node)) + test_node.send_and_ping(MsgFeeFilter(0)) + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] + assert(all_invs_match(txids, test_node)) test_node.clear_invs() if __name__ == '__main__': diff --git a/test/functional/p2p_fingerprint.py b/test/functional/p2p_fingerprint.py index 7fbd310c03..73aaa10f25 100755 --- a/test/functional/p2p_fingerprint.py +++ b/test/functional/p2p_fingerprint.py @@ -3,26 +3,19 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test various fingerprinting protections. + +""" +Test various fingerprinting protections. If an stale block more than a month old or its header are requested by a peer, the node should pretend that it does not have it to avoid fingerprinting. """ import time - -from test_framework.blocktools import (create_block, create_coinbase) -from test_framework.mininode import (CInv, - NetworkThread, - NodeConn, - NodeConnCB, - msg_headers, - msg_block, - msg_getdata, - msg_getheaders, - wait_until) +from test_framework.blocktools import create_block, create_coinbase +from test_framework.mininode import CInv, NetworkThread, NodeConn, NodeConnCB, MsgHeaders, MsgBlock, MsgGetdata, MsgGetHeaders, wait_until from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, p2p_port,) +from test_framework.util import assert_equal, p2p_port class P2PFingerprintTest(RavenTestFramework): def set_test_params(self): @@ -30,7 +23,8 @@ def set_test_params(self): self.num_nodes = 1 # Build a chain of blocks on top of given one - def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time): + @staticmethod + def build_chain(nblocks, prev_hash, prev_height, prev_median_time): blocks = [] for _ in range(nblocks): coinbase = create_coinbase(prev_height + 1) @@ -45,24 +39,28 @@ def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time): return blocks # Send a getdata request for a given block hash - def send_block_request(self, block_hash, node): - msg = msg_getdata() + @staticmethod + def send_block_request(block_hash, node): + msg = MsgGetdata() msg.inv.append(CInv(2, block_hash)) # 2 == "Block" node.send_message(msg) # Send a getheaders request for a given single block hash - def send_header_request(self, block_hash, node): - msg = msg_getheaders() + @staticmethod + def send_header_request(block_hash, node): + msg = MsgGetHeaders() msg.hashstop = block_hash node.send_message(msg) # Check whether last block received from node has a given hash - def last_block_equals(self, expected_hash, node): + @staticmethod + def last_block_equals(expected_hash, node): block_msg = node.last_message.get("block") return block_msg and block_msg.block.rehash() == expected_hash # Check whether last block header received from node has a given hash - def last_header_equals(self, expected_hash, node): + @staticmethod + def last_header_equals(expected_hash, node): headers_msg = node.last_message.get("headers") return (headers_msg and headers_msg.headers and @@ -75,8 +73,7 @@ def last_header_equals(self, expected_hash, node): def run_test(self): node0 = NodeConnCB() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0)] node0.add_connection(connections[0]) NetworkThread().start() @@ -95,10 +92,10 @@ def run_test(self): new_blocks = self.build_chain(5, block_hash, height, block_time) # Force reorg to a longer chain - node0.send_message(msg_headers(new_blocks)) + node0.send_message(MsgHeaders(new_blocks)) node0.wait_for_getdata() for block in new_blocks: - node0.send_and_ping(msg_block(block)) + node0.send_and_ping(MsgBlock(block)) # Check that reorg succeeded assert_equal(self.nodes[0].getblockcount(), 13) @@ -108,12 +105,12 @@ def run_test(self): # Check that getdata request for stale block succeeds self.send_block_request(stale_hash, node0) test_function = lambda: self.last_block_equals(stale_hash, node0) - wait_until(test_function, timeout=3) + wait_until(test_function, timeout=3, err_msg="test_function 1") # Check that getheader request for stale block header succeeds self.send_header_request(stale_hash, node0) test_function = lambda: self.last_header_equals(stale_hash, node0) - wait_until(test_function, timeout=3) + wait_until(test_function, timeout=3, err_msg="test_function 2") # Longest chain is extended so stale is much older than chain tip self.nodes[0].setmocktime(0) @@ -144,11 +141,11 @@ def run_test(self): self.send_block_request(block_hash, node0) test_function = lambda: self.last_block_equals(block_hash, node0) - wait_until(test_function, timeout=3) + wait_until(test_function, timeout=3, err_msg="test_function 3") self.send_header_request(block_hash, node0) test_function = lambda: self.last_header_equals(block_hash, node0) - wait_until(test_function, timeout=3) + wait_until(test_function, timeout=3, err_msg="test_function 4") if __name__ == '__main__': P2PFingerprintTest().main() diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py index 6ed88d790e..c48150a94f 100755 --- a/test/functional/p2p_invalid_block.py +++ b/test/functional/p2p_invalid_block.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test node responses to invalid blocks. + +""" +Test node responses to invalid blocks. In this test we connect to one node over p2p, and test block requests: 1) Valid blocks should be requested and become chain tip. @@ -12,18 +14,19 @@ re-requested. """ -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import assert_equal -from test_framework.comptool import (TestManager, TestInstance, RejectResult) -from test_framework.blocktools import (NetworkThread, create_block, create_coinbase, create_transaction, COIN) import copy import time +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import assert_equal +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import create_block, create_coinbase, create_transaction, COIN +from test_framework.mininode import NetworkThread # Use the ComparisonTestFramework with 1 node: only use --testbinary. class InvalidBlockRequestTest(ComparisonTestFramework): - ''' Can either run this test as 1 node with expected answers, or two and compare them. - Change the "outcome" variable from each TestInstance object to only do the comparison. ''' + """ Can either run this test as 1 node with expected answers, or two and compare them. + Change the "outcome" variable from each TestInstance object to only do the comparison. """ def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True @@ -105,7 +108,7 @@ def get_tests(self): self.block_time += 1 block3.vtx[0].vout[0].nValue = 100 * COIN # Too high! block3.vtx[0].sha256=None - block3.vtx[0].calc_sha256() + block3.vtx[0].calc_x16r() block3.hashMerkleRoot = block3.calc_merkle_root() block3.rehash() block3.solve() diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index 3decdeafb3..75a065172c 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -3,23 +3,24 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test node responses to invalid transactions. + +""" +Test node responses to invalid transactions. In this test we connect to one node over p2p, and test tx requests. """ -from test_framework.test_framework import ComparisonTestFramework -from test_framework.comptool import (TestManager, TestInstance, RejectResult) -from test_framework.blocktools import (NetworkThread, create_block, create_coinbase, create_transaction, COIN) import time - - +from test_framework.test_framework import ComparisonTestFramework +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import create_block, create_coinbase, create_transaction, COIN +from test_framework.mininode import NetworkThread # Use the ComparisonTestFramework with 1 node: only use --testbinary. class InvalidTxRequestTest(ComparisonTestFramework): - ''' Can either run this test as 1 node with expected answers, or two and compare them. - Change the "outcome" variable from each TestInstance object to only do the comparison. ''' + """ Can either run this test as 1 node with expected answers, or two and compare them. + Change the "outcome" variable from each TestInstance object to only do the comparison. """ def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index dadd3c5a5c..12e2f17761 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -3,12 +3,14 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test message sending before handshake completion. + +""" +Test message sending before handshake completion. A node should never send anything other than VERSION/VERACK/REJECT until it's received a VERACK. -This test connects to a node and sends it a few messages, trying to intice it +This test connects to a node and sends it a few messages, trying to entice it into sending us something it shouldn't. Also test that nodes that send unsupported service bits to ravend are disconnected @@ -18,12 +20,14 @@ UPDATE: Raven RIP-2 uses bit 1 << 5. Currently there are no unsupported service bits. """ -from test_framework.mininode import (NodeConnCB, NodeConn, msg_verack, msg_ping, msg_getaddr, NetworkThread, mininode_lock) +from test_framework.mininode import NodeConnCB, NodeConn, MsgVerack, MsgPing, MsgGetAddr, NetworkThread, mininode_lock from test_framework.test_framework import RavenTestFramework -from test_framework.util import (logger, p2p_port, wait_until, time) +from test_framework.util import logger, p2p_port, wait_until, time banscore = 10 + +# noinspection PyMethodOverriding class CLazyNode(NodeConnCB): def __init__(self): super().__init__() @@ -69,7 +73,7 @@ class CNodeNoVersionBan(CLazyNode): def on_open(self, conn): super().on_open(conn) for _ in range(banscore): - self.send_message(msg_verack()) + self.send_message(MsgVerack()) def on_reject(self, conn, message): pass @@ -92,8 +96,8 @@ def on_verack(self, conn, message): pass # list! def on_version(self, conn, message): self.version_received = True - conn.send_message(msg_ping()) - conn.send_message(msg_getaddr()) + conn.send_message(MsgPing()) + conn.send_message(MsgGetAddr()) class P2PLeakTest(RavenTestFramework): def set_test_params(self): @@ -107,10 +111,9 @@ def run_test(self): CLazyNode() CLazyNode() - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False), + NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False), + NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode)] no_version_bannode.add_connection(connections[0]) no_version_idlenode.add_connection(connections[1]) no_verack_idlenode.add_connection(connections[2]) diff --git a/test/functional/p2p_leak_tx.py b/test/functional/p2p_leak_tx.py new file mode 100755 index 0000000000..99bf1bdba6 --- /dev/null +++ b/test/functional/p2p_leak_tx.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Copyright (c) 2017-2018 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test that we don't leak txs to inbound peers that we haven't yet announced to""" + +from test_framework.mininode import MsgGetdata, CInv, NetworkThread, NodeConn, NodeConnCB +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, p2p_port + +class TestNode(NodeConnCB): + def on_inv(self, conn, message): + pass + + +class P2PLeakTxTest(RavenTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + + def run_test(self): + gen_node = self.nodes[0] # The block and tx generating node + gen_node.generate(1) + + # Setup the attacking p2p connection and start up the network thread. + self.inbound_peer = TestNode() + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.inbound_peer)] + self.inbound_peer.add_connection(connections[0]) + NetworkThread().start() # Start up network handling in another thread + + + max_repeats = 48 + self.log.info("Running test up to {} times.".format(max_repeats)) + for i in range(max_repeats): + self.log.info('Run repeat {}'.format(i + 1)) + txid = gen_node.sendtoaddress(gen_node.getnewaddress(), 0.01) + + want_tx = MsgGetdata() + want_tx.inv.append(CInv(t=1, h=int(txid, 16))) + self.inbound_peer.last_message.pop('notfound', None) + connections[0].send_message(want_tx) + self.inbound_peer.sync_with_ping() + + if self.inbound_peer.last_message.get('notfound'): + self.log.debug('tx {} was not yet announced to us.'.format(txid)) + self.log.debug("node has responded with a notfound message. End test.") + assert_equal(self.inbound_peer.last_message['notfound'].vec[0].hash, int(txid, 16)) + self.inbound_peer.last_message.pop('notfound') + break + else: + self.log.debug('tx {} was already announced to us. Try test again.'.format(txid)) + assert int(txid, 16) in [inv.hash for inv in self.inbound_peer.last_message['inv'].inv] + + +if __name__ == '__main__': + P2PLeakTxTest().main() diff --git a/test/functional/p2p_mempool.py b/test/functional/p2p_mempool.py index 177ce0e945..d8ae7e2563 100755 --- a/test/functional/p2p_mempool.py +++ b/test/functional/p2p_mempool.py @@ -3,15 +3,17 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test p2p mempool message. + +""" +Test p2p mempool message. Test that nodes are disconnected if they send mempool messages when bloom filters are not enabled. """ -from test_framework.mininode import (NodeConn, NodeConnCB, NetworkThread, msg_mempool) +from test_framework.mininode import NodeConn, NodeConnCB, NetworkThread, MsgMempool from test_framework.test_framework import RavenTestFramework -from test_framework.util import (p2p_port, assert_equal) +from test_framework.util import p2p_port, assert_equal class P2PMempoolTests(RavenTestFramework): def set_test_params(self): @@ -28,7 +30,7 @@ def run_test(self): aTestNode.wait_for_verack() #request mempool - aTestNode.send_message(msg_mempool()) + aTestNode.send_message(MsgMempool()) aTestNode.wait_for_disconnect() #mininode must be disconnected at this point diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index a086df8400..3999b1ca18 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -3,85 +3,21 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test segwit transactions and blocks on P2P network.""" -from test_framework.mininode import (NodeConnCB, - mininode_lock, - msg_inv, - CInv, - msg_block, - msg_headers, - CBlockHeader, - msg_headers, - msg_getdata, - msg_tx, - msg_witness_tx, - msg_witness_block, - NODE_WITNESS, - CTxIn, - COutPoint, - CTxInWitness, - CTxWitness, - MAX_BLOCK_BASE_SIZE, - ser_vector, - MSG_WITNESS_FLAG, - CBlock, - NodeConn, - NODE_NETWORK, - NetworkThread) -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, - connect_nodes, - get_bip9_status, - sync_blocks, - bytes_to_hex_str, - hex_str_to_bytes, - sync_mempools, - p2p_port) -from test_framework.script import (CScript, - CScriptOp, - OP_DUP, - OP_HASH160, - OP_EQUALVERIFY, - OP_CHECKMULTISIG, - SegwitVersion1SignatureHash, - OP_CHECKSIG, - CTransaction, - CTxOut, - OP_TRUE, - CTransaction, - CTxOut, - OP_TRUE, - CScriptNum, - hash160, - OP_EQUAL, - sha256, - OP_0, - OP_RETURN, - ser_uint256, - OP_2DROP, - uint256_from_str, - OP_DROP, - struct, - OP_1, - OP_16, - SIGHASH_ANYONECANPAY, - SIGHASH_ALL, - SIGHASH_NONE, - SIGHASH_SINGLE, - OP_IF, - OP_ELSE, - OP_ENDIF, - SignatureHash) -from test_framework.blocktools import (create_block, - create_coinbase, - add_witness_commitment, - get_witness_script, - WITNESS_COMMITMENT_HEADER) -from test_framework.key import (CECKey, CPubKey) import time import random from binascii import hexlify +from test_framework.mininode import (NodeConnCB, mininode_lock, MsgInv, CInv, MsgBlock, CBlockHeader, MsgHeaders, MsgGetdata, MsgTx, MsgWitnessTx, MsgWitnessBlock, NODE_WITNESS, CTxIn, COutPoint, + CTxInWitness, CTxWitness, MAX_BLOCK_BASE_SIZE, ser_vector, MSG_WITNESS_FLAG, CBlock, NodeConn, NODE_NETWORK, NetworkThread) +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, connect_nodes, get_bip9_status, sync_blocks, bytes_to_hex_str, hex_str_to_bytes, sync_mempools, p2p_port +from test_framework.script import (CScript, CScriptOp, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKMULTISIG, segwit_version1_signature_hash, OP_CHECKSIG, CTransaction, CTxOut, OP_TRUE, CScriptNum, + hash160, OP_EQUAL, sha256, OP_0, OP_RETURN, ser_uint256, OP_2DROP, uint256_from_str, OP_DROP, struct, OP_1, OP_16, SIGHASH_ANYONECANPAY, SIGHASH_ALL, SIGHASH_NONE, + SIGHASH_SINGLE, OP_IF, OP_ELSE, OP_ENDIF, signature_hash) +from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER +from test_framework.key import CECKey, CPubKey # The versionbit bit used to signal activation of SegWit VB_WITNESS_BIT = 1 @@ -112,19 +48,19 @@ def on_getdata(self, conn, message): def announce_tx_and_wait_for_getdata(self, tx, timeout=60): with mininode_lock: self.last_message.pop("getdata", None) - self.send_message(msg_inv(inv=[CInv(1, tx.sha256)])) + self.send_message(MsgInv(inv=[CInv(1, tx.x16r)])) self.wait_for_getdata(timeout) - def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60): + def announce_block_and_wait_for_getdata(self, block, use_header): with mininode_lock: self.last_message.pop("getdata", None) self.last_message.pop("getheaders", None) - msg = msg_headers() + msg = MsgHeaders() msg.headers = [ CBlockHeader(block) ] if use_header: self.send_message(msg) else: - self.send_message(msg_inv(inv=[CInv(2, block.sha256)])) + self.send_message(MsgInv(inv=[CInv(2, block.x16r)])) self.wait_for_getheaders() self.send_message(msg) self.wait_for_getdata() @@ -132,18 +68,18 @@ def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60): def request_block(self, blockhash, inv_type, timeout=60): with mininode_lock: self.last_message.pop("block", None) - self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)])) + self.send_message(MsgGetdata(inv=[CInv(inv_type, blockhash)])) self.wait_for_block(blockhash, timeout) return self.last_message["block"].block def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None): - tx_message = msg_tx(tx) + tx_message = MsgTx(tx) if with_witness: - tx_message = msg_witness_tx(tx) + tx_message = MsgWitnessTx(tx) self.send_message(tx_message) self.sync_with_ping() assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted) - if (reason != None and not accepted): + if reason is not None and not accepted: # Check the rejection reason as well. with mininode_lock: assert_equal(self.last_message["reject"].reason, reason) @@ -151,29 +87,29 @@ def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None): # Test whether a witness block had the correct effect on the tip def test_witness_block(self, block, accepted, with_witness=True): if with_witness: - self.send_message(msg_witness_block(block)) + self.send_message(MsgWitnessBlock(block)) else: - self.send_message(msg_block(block)) + self.send_message(MsgBlock(block)) self.sync_with_ping() assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted) # Used to keep track of anyone-can-spend outputs that we can use in the tests -class UTXO(): - def __init__(self, sha256, n, nValue): - self.sha256 = sha256 +class UTXO: + def __init__(self, x16r, n, n_value): + self.x16r = x16r self.n = n - self.nValue = nValue + self.nValue = n_value # Helper for getting the script associated with a P2PKH -def GetP2PKHScript(pubkeyhash): +def get_p2pkh_script(pubkeyhash): return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)]) # Add signature for a P2PK witness program. -def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key): - tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value) +def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key): + tx_hash = segwit_version1_signature_hash(script, tx_to, in_idx, hashtype, value) signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1') - txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script] - txTo.rehash() + tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script] + tx_to.rehash() class SegWitTest(RavenTestFramework): @@ -190,17 +126,18 @@ def setup_network(self): ''' Helpers ''' # Build a block on top of node0's tip. - def build_next_block(self, nVersion=4): + def build_next_block(self, n_version=4): tip = self.nodes[0].getbestblockhash() height = self.nodes[0].getblockcount() + 1 block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1 block = create_block(int(tip, 16), create_coinbase(height), block_time) - block.nVersion = nVersion + block.nVersion = n_version block.rehash() return block # Adds list of transactions to block, adds witness commitment, then solves. - def update_witness_block_with_transactions(self, block, tx_list, nonce=0): + @staticmethod + def update_witness_block_with_transactions(block, tx_list, nonce=0): block.vtx.extend(tx_list) add_witness_commitment(block, nonce) block.solve() @@ -218,11 +155,11 @@ def test_non_witness_transaction(self): # Mine a block with an anyone-can-spend coinbase, # let it mature, then try to spend it. self.log.info("Testing non-witness transaction") - block = self.build_next_block(nVersion=1) + block = self.build_next_block(n_version=1) block.solve() - self.test_node.send_message(msg_block(block)) + self.test_node.send_message(MsgBlock(block)) self.test_node.sync_with_ping() # make sure the block was processed - txid = block.vtx[0].sha256 + txid = block.vtx[0].x16r self.nodes[0].generate(99) # let the block mature @@ -230,17 +167,17 @@ def test_non_witness_transaction(self): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(txid, 0), b"")) tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE]))) - tx.calc_sha256() + tx.calc_x16r() # Check that serializing it with or without witness is the same # This is a sanity check of our testing framework. - assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize()) + assert_equal(MsgTx(tx).serialize(), MsgWitnessTx(tx).serialize()) - self.test_node.send_message(msg_witness_tx(tx)) + self.test_node.send_message(MsgWitnessTx(tx)) self.test_node.sync_with_ping() # make sure the tx was processed assert(tx.hash in self.nodes[0].getrawmempool()) # Save this transaction for later - self.utxo.append(UTXO(tx.sha256, 0, 49*100000000)) + self.utxo.append(UTXO(tx.x16r, 0, 49*100000000)) self.nodes[0].generate(1) @@ -253,7 +190,7 @@ def test_unnecessary_witness_before_segwit_activation(self): assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active') tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])] @@ -261,10 +198,10 @@ def test_unnecessary_witness_before_segwit_activation(self): # Verify the hash with witness differs from the txid # (otherwise our testing framework must be broken!) tx.rehash() - assert(tx.sha256 != tx.calc_sha256(with_witness=True)) + assert(tx.x16r != tx.calc_x16r(with_witness=True)) # Construct a segwit-signaling block that includes the transaction. - block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT))) + block = self.build_next_block(n_version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT))) self.update_witness_block_with_transactions(block, [tx]) # Sending witness data before activation is not allowed (anti-spam # rule). @@ -276,13 +213,13 @@ def test_unnecessary_witness_before_segwit_activation(self): # But it should not be permanently marked bad... # Resend without witness information. - self.test_node.send_message(msg_block(block)) + self.test_node.send_message(MsgBlock(block)) self.test_node.sync_with_ping() assert_equal(self.nodes[0].getbestblockhash(), block.hash) sync_blocks(self.nodes) - # Create a p2sh output -- this is so we can pass the standardness + # Create a p2sh output -- this is so we can pass the standard-ness # rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped # in P2SH). p2sh_program = CScript([OP_TRUE]) @@ -290,9 +227,9 @@ def test_unnecessary_witness_before_segwit_activation(self): scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL]) # Now check that unnecessary witnesses can't be used to blind a node - # to a transaction, eg by violating standardness checks. + # to a transaction, eg by violating standard-ness checks. tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vin.append(CTxIn(COutPoint(tx.x16r, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey)) tx2.rehash() self.test_node.test_transaction_acceptance(tx2, False, True) @@ -305,7 +242,7 @@ def test_unnecessary_witness_before_segwit_activation(self): # rejected for having a witness before segwit activation shouldn't be added # to the rejection cache. tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program]))) + tx3.vin.append(CTxIn(COutPoint(tx2.x16r, 0), CScript([p2sh_program]))) tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey)) tx3.wit.vtxinwit.append(CTxInWitness()) tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000] @@ -319,7 +256,7 @@ def test_unnecessary_witness_before_segwit_activation(self): # Now create a new anyone-can-spend utxo for the next test. tx4 = CTransaction() - tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program]))) + tx4.vin.append(CTxIn(COutPoint(tx3.x16r, 0), CScript([p2sh_program]))) tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE]))) tx4.rehash() self.test_node.test_transaction_acceptance(tx3, False, True) @@ -330,7 +267,7 @@ def test_unnecessary_witness_before_segwit_activation(self): # Update our utxo list; we spent the first entry. self.utxo.pop(0) - self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue)) + self.utxo.append(UTXO(tx4.x16r, 0, tx4.vout[0].nValue)) # Mine enough blocks for segwit's vb state to be 'started'. @@ -348,7 +285,6 @@ def advance_to_segwit_started(self): # TODO: we could verify that lockin only happens at the right threshold of # signalling blocks, rather than just at the right period boundary. def advance_to_segwit_lockin(self): - height = self.nodes[0].getblockcount() assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started') # Advance to end of period, and verify lock-in happens at the end self.nodes[0].generate(VB_PERIOD-1) @@ -381,7 +317,7 @@ def test_witness_commitments(self): block.solve() # Test the test -- witness serialization should be different - assert(msg_witness_block(block).serialize() != msg_block(block).serialize()) + assert(MsgWitnessBlock(block).serialize() != MsgBlock(block).serialize()) # This empty block should be valid. self.test_node.test_witness_block(block, accepted=True) @@ -400,7 +336,7 @@ def test_witness_commitments(self): # Now test commitments with actual transactions assert (len(self.utxo) > 0) tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) # Let's construct a witness program witness_program = CScript([OP_TRUE]) @@ -411,7 +347,7 @@ def test_witness_commitments(self): # tx2 will spend tx1, and send back to a regular anyone-can-spend address tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vin.append(CTxIn(COutPoint(tx.x16r, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] @@ -448,7 +384,7 @@ def test_witness_commitments(self): # omit the commitment. block_4 = self.build_next_block() tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) + tx3.vin.append(CTxIn(COutPoint(tx2.x16r, 0), b"")) tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program)) tx3.rehash() block_4.vtx.append(tx3) @@ -458,7 +394,7 @@ def test_witness_commitments(self): # Update available utxo's for use in later test. self.utxo.pop(0) - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) + self.utxo.append(UTXO(tx3.x16r, 0, tx3.vout[0].nValue)) def test_block_malleability(self): @@ -505,7 +441,7 @@ def test_block_malleability(self): def test_witness_block_size(self): self.log.info("Testing witness block size limit") # TODO: Test that non-witness carrying blocks can't exceed 1MB - # Skipping this test for now; this is covered in p2p_fullblock.py + # Skipping this test for now; this is covered in feature_block.py # Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB. block = self.build_next_block() @@ -523,7 +459,7 @@ def test_witness_block_size(self): witness_hash = uint256_from_str(sha256(witness_program)) scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)]) - prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n) + prevout = COutPoint(self.utxo[0].x16r, self.utxo[0].n) value = self.utxo[0].nValue parent_tx = CTransaction() @@ -537,7 +473,7 @@ def test_witness_block_size(self): child_tx = CTransaction() for i in range(NUM_OUTPUTS): - child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) + child_tx.vin.append(CTxIn(COutPoint(parent_tx.x16r, i), b"")) child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] for i in range(NUM_OUTPUTS): child_tx.wit.vtxinwit.append(CTxInWitness()) @@ -578,7 +514,7 @@ def test_witness_block_size(self): # Update available utxo's self.utxo.pop(0) - self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue)) + self.utxo.append(UTXO(block.vtx[-1].x16r, 0, block.vtx[-1].vout[0].nValue)) # submitblock will try to add the nonce automatically, so that mining @@ -632,7 +568,7 @@ def test_extra_witness_data(self): # First try extra witness data on a tx that doesn't require a witness tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey)) tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output tx.wit.vtxinwit.append(CTxInWitness()) @@ -655,8 +591,8 @@ def test_extra_witness_data(self): # Now try extra witness/signature data on an input that DOES require a # witness tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness + tx2.vin.append(CTxIn(COutPoint(tx.x16r, 0), b"")) # witness output + tx2.vin.append(CTxIn(COutPoint(tx.x16r, 1), b"")) # non-witness tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()]) tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ] @@ -691,11 +627,11 @@ def test_extra_witness_data(self): # Update utxo for later tests self.utxo.pop(0) - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + self.utxo.append(UTXO(tx2.x16r, 0, tx2.vout[0].nValue)) def test_max_witness_push_length(self): - ''' Should only allow up to 520 byte pushes in witness stack ''' + """ Should only allow up to 520 byte pushes in witness stack """ self.log.info("Testing maximum witness push size") MAX_SCRIPT_ELEMENT_SIZE = 520 assert(len(self.utxo)) @@ -707,12 +643,12 @@ def test_max_witness_push_length(self): scriptPubKey = CScript([OP_0, witness_hash]) tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) tx.rehash() tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vin.append(CTxIn(COutPoint(tx.x16r, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) # First try a 521-byte stack element @@ -723,7 +659,7 @@ def test_max_witness_push_length(self): self.test_node.test_witness_block(block, accepted=False) # Now reduce the length of the stack element - tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE) + tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * MAX_SCRIPT_ELEMENT_SIZE add_witness_commitment(block) block.solve() @@ -731,7 +667,7 @@ def test_max_witness_push_length(self): # Update the utxo for later tests self.utxo.pop() - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + self.utxo.append(UTXO(tx2.x16r, 0, tx2.vout[0].nValue)) def test_max_witness_program_length(self): # Can create witness outputs that are long, but can't be greater than @@ -749,12 +685,12 @@ def test_max_witness_program_length(self): block = self.build_next_block() tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey)) tx.rehash() tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vin.append(CTxIn(COutPoint(tx.x16r, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program] @@ -772,7 +708,7 @@ def test_max_witness_program_length(self): tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey) tx.rehash() - tx2.vin[0].prevout.hash = tx.sha256 + tx2.vin[0].prevout.hash = tx.x16r tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program] tx2.rehash() block.vtx = [block.vtx[0]] @@ -780,11 +716,11 @@ def test_max_witness_program_length(self): self.test_node.test_witness_block(block, accepted=True) self.utxo.pop() - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + self.utxo.append(UTXO(tx2.x16r, 0, tx2.vout[0].nValue)) def test_witness_input_length(self): - ''' Ensure that vin length must match vtxinwit length ''' + """ Ensure that vin length must match vtxinwit length """ self.log.info("Testing witness input length") assert(len(self.utxo)) @@ -794,7 +730,7 @@ def test_witness_input_length(self): # Create a transaction that splits our utxo into many outputs tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) nValue = self.utxo[0].nValue for i in range(10): tx.vout.append(CTxOut(int(nValue/10), scriptPubKey)) @@ -828,7 +764,7 @@ def serialize_with_witness(self): tx2 = BrokenCTransaction() for i in range(10): - tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) + tx2.vin.append(CTxIn(COutPoint(tx.x16r, i), b"")) tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE]))) # First try using a too long vtxinwit @@ -864,7 +800,7 @@ def serialize_with_witness(self): self.test_node.test_witness_block(block, accepted=True) self.utxo.pop() - self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + self.utxo.append(UTXO(tx2.x16r, 0, tx2.vout[0].nValue)) def test_witness_tx_relay_before_segwit_activation(self): @@ -874,13 +810,13 @@ def test_witness_tx_relay_before_segwit_activation(self): # not be added to recently rejected list. assert(len(self.utxo)) tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ] tx.rehash() - tx_hash = tx.sha256 + tx_hash = tx.x16r tx_value = tx.vout[0].nValue # Verify that if a peer doesn't set nServices to include NODE_WITNESS, @@ -893,7 +829,7 @@ def test_witness_tx_relay_before_segwit_activation(self): try: self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2) self.log.error("Error: duplicate tx getdata!") - assert(False) + assert False except AssertionError: pass @@ -926,13 +862,13 @@ def test_tx_relay_after_segwit_activation(self): # when spending a non-witness output. assert(len(self.utxo)) tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ] tx.rehash() - tx_hash = tx.sha256 + tx_hash = tx.x16r # Verify that unnecessary witnesses are rejected. self.test_node.announce_tx_and_wait_for_getdata(tx) @@ -953,7 +889,7 @@ def test_tx_relay_after_segwit_activation(self): tx2.rehash() tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) + tx3.vin.append(CTxIn(COutPoint(tx2.x16r, 0), b"")) tx3.wit.vtxinwit.append(CTxInWitness()) # Add too-large for IsStandard witness and check that it does not enter reject filter @@ -982,14 +918,14 @@ def test_tx_relay_after_segwit_activation(self): tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] # Also check that old_node gets a tx announcement, even though this is # a witness transaction. - self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed + self.old_node.wait_for_inv([CInv(1, tx2.x16r)]) # wait until tx2 was inv'ed self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True) - self.old_node.wait_for_inv([CInv(1, tx3.sha256)]) + self.old_node.wait_for_inv([CInv(1, tx3.x16r)]) # Test that getrawtransaction returns correct witness information # hash, size, vsize raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1) - assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True)) + assert_equal(int(raw_tx["hash"], 16), tx3.calc_x16r(True)) assert_equal(raw_tx["size"], len(tx3.serialize_with_witness())) vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4 assert_equal(raw_tx["vsize"], vsize) @@ -1002,7 +938,7 @@ def test_tx_relay_after_segwit_activation(self): assert_equal(len(self.nodes[0].getrawmempool()), 0) self.utxo.pop(0) - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) + self.utxo.append(UTXO(tx3.x16r, 0, tx3.vout[0].nValue)) # Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG @@ -1024,14 +960,14 @@ def test_block_relay(self, segwit_activated): assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) self.test_node.test_witness_block(block1, True) - block2 = self.build_next_block(nVersion=4) + block2 = self.build_next_block(n_version=4) block2.solve() self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) self.test_node.test_witness_block(block2, True) - block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15))) + block3 = self.build_next_block(n_version=(VB_TOP_BITS | (1 << 15))) block3.solve() self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True) assert(self.test_node.last_message["getdata"].inv[0].type == blocktype) @@ -1039,7 +975,7 @@ def test_block_relay(self, segwit_activated): # Check that we can getdata for witness blocks or regular blocks, # and the right thing happens. - if segwit_activated == False: + if not segwit_activated: # Before activation, we should be able to request old blocks with # or without witness, and they should be the same. chain_height = self.nodes[0].getblockcount() @@ -1068,8 +1004,8 @@ def test_block_relay(self, segwit_activated): self.test_node.test_witness_block(block, accepted=True) # Now try to retrieve it... rpc_block = self.nodes[0].getblock(block.hash, False) - non_wit_block = self.test_node.request_block(block.sha256, 2) - wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG) + non_wit_block = self.test_node.request_block(block.x16r, 2) + wit_block = self.test_node.request_block(block.x16r, 2|MSG_WITNESS_FLAG) assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block)) assert_equal(wit_block.serialize(False), non_wit_block.serialize()) assert_equal(wit_block.serialize(True), block.serialize(True)) @@ -1082,7 +1018,7 @@ def test_block_relay(self, segwit_activated): assert_equal(rpc_details["weight"], weight) # Upgraded node should not ask for blocks from unupgraded - block4 = self.build_next_block(nVersion=4) + block4 = self.build_next_block(n_version=4) block4.solve() self.old_node.getdataset = set() @@ -1094,11 +1030,11 @@ def test_block_relay(self, segwit_activated): # and then check to see if that particular getdata has been received. # Since 0.14, inv's will only be responded to with a getheaders, so send a header # to announce this block. - msg = msg_headers() + msg = MsgHeaders() msg.headers = [ CBlockHeader(block4) ] self.old_node.send_message(msg) self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0]) - assert(block4.sha256 not in self.old_node.getdataset) + assert(block4.x16r not in self.old_node.getdataset) # V0 segwit outputs should be standard after activation, but not before. def test_standardness_v0(self, segwit_activated): @@ -1114,7 +1050,7 @@ def test_standardness_v0(self, segwit_activated): # First prepare a p2sh output (so that spending it will pass standardness) p2sh_tx = CTransaction() - p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] + p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")] p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)] p2sh_tx.rehash() @@ -1126,7 +1062,7 @@ def test_standardness_v0(self, segwit_activated): # Now test standardness of v0 P2WSH outputs. # Start by creating a transaction with two outputs. tx = CTransaction() - tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] + tx.vin = [CTxIn(COutPoint(p2sh_tx.x16r, 0), CScript([witness_program]))] tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)] tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later tx.rehash() @@ -1138,13 +1074,13 @@ def test_standardness_v0(self, segwit_activated): tx2 = CTransaction() if segwit_activated: # if tx was accepted, then we spend the second output. - tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")] + tx2.vin = [CTxIn(COutPoint(tx.x16r, 1), b"")] tx2.vout = [CTxOut(7000, scriptPubKey)] tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] else: # if tx wasn't accepted, we just re-spend the p2sh output we started with. - tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] + tx2.vin = [CTxIn(COutPoint(p2sh_tx.x16r, 0), CScript([witness_program]))] tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)] tx2.rehash() @@ -1156,7 +1092,7 @@ def test_standardness_v0(self, segwit_activated): # tx and tx2 were both accepted. Don't bother trying to reclaim the # P2PKH output; just send tx's first output back to an anyone-can-spend. sync_mempools([self.nodes[0], self.nodes[1]]) - tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] + tx3.vin = [CTxIn(COutPoint(tx.x16r, 0), b"")] tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))] tx3.wit.vtxinwit.append(CTxInWitness()) tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program] @@ -1164,7 +1100,7 @@ def test_standardness_v0(self, segwit_activated): self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True) else: # tx and tx2 didn't go anywhere; just clean up the p2sh_tx output. - tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] + tx3.vin = [CTxIn(COutPoint(p2sh_tx.x16r, 0), CScript([witness_program]))] tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)] tx3.rehash() self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True) @@ -1172,7 +1108,7 @@ def test_standardness_v0(self, segwit_activated): self.nodes[0].generate(1) sync_blocks(self.nodes) self.utxo.pop(0) - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) + self.utxo.append(UTXO(tx3.x16r, 0, tx3.vout[0].nValue)) assert_equal(len(self.nodes[1].getrawmempool()), 0) @@ -1182,9 +1118,9 @@ def test_segwit_versions(self): self.log.info("Testing standardness/consensus for segwit versions (0-16)") assert(len(self.utxo)) NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16 - if (len(self.utxo) < NUM_TESTS): + if len(self.utxo) < NUM_TESTS: tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS for i in range(NUM_TESTS): tx.vout.append(CTxOut(split_value, CScript([OP_TRUE]))) @@ -1194,7 +1130,7 @@ def test_segwit_versions(self): self.test_node.test_witness_block(block, accepted=True) self.utxo.pop(0) for i in range(NUM_TESTS): - self.utxo.append(UTXO(tx.sha256, i, split_value)) + self.utxo.append(UTXO(tx.x16r, i, split_value)) sync_blocks(self.nodes) temp_utxo = [] @@ -1207,13 +1143,13 @@ def test_segwit_versions(self): count += 1 # First try to spend to a future version segwit scriptPubKey. scriptPubKey = CScript([CScriptOp(version), witness_hash]) - tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] + tx.vin = [CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")] tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)] tx.rehash() self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False) self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True) self.utxo.pop(0) - temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue)) + temp_utxo.append(UTXO(tx.x16r, 0, tx.vout[0].nValue)) self.nodes[0].generate(1) # Mine all the transactions sync_blocks(self.nodes) @@ -1223,7 +1159,7 @@ def test_segwit_versions(self): # are non-standard scriptPubKey = CScript([CScriptOp(OP_1), witness_hash]) tx2 = CTransaction() - tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] + tx2.vin = [CTxIn(COutPoint(tx.x16r, 0), b"")] tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)] tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] @@ -1233,13 +1169,13 @@ def test_segwit_versions(self): self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True) self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False) temp_utxo.pop() # last entry in temp_utxo was the output we just spent - temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) + temp_utxo.append(UTXO(tx2.x16r, 0, tx2.vout[0].nValue)) # Spend everything in temp_utxo back to an OP_TRUE output. tx3 = CTransaction() total_value = 0 for i in temp_utxo: - tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) + tx3.vin.append(CTxIn(COutPoint(i.x16r, i.n), b"")) tx3.wit.vtxinwit.append(CTxInWitness()) total_value += i.nValue tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] @@ -1259,7 +1195,7 @@ def test_segwit_versions(self): sync_blocks(self.nodes) # Add utxo to our list - self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue)) + self.utxo.append(UTXO(tx3.x16r, 0, tx3.vout[0].nValue)) def test_premature_coinbase_witness_spend(self): @@ -1276,7 +1212,7 @@ def test_premature_coinbase_witness_spend(self): self.test_node.test_witness_block(block, accepted=True) spend_tx = CTransaction() - spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")] + spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].x16r, 0), b"")] spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)] spend_tx.wit.vtxinwit.append(CTxInWitness()) spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ] @@ -1310,7 +1246,7 @@ def test_signature_version_1(self): # First create a witness output for use in the tests. assert(len(self.utxo)) tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) tx.rehash() @@ -1323,33 +1259,33 @@ def test_signature_version_1(self): self.utxo.pop(0) # Test each hashtype - prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) + prev_utxo = UTXO(tx.x16r, 0, tx.vout[0].nValue) for sigflag in [ 0, SIGHASH_ANYONECANPAY ]: for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]: hashtype |= sigflag block = self.build_next_block() tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) + tx.vin.append(CTxIn(COutPoint(prev_utxo.x16r, prev_utxo.n), b"")) tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey)) tx.wit.vtxinwit.append(CTxInWitness()) # Too-large input value - sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key) + sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key) self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=False) # Too-small input value - sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key) + sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key) block.vtx.pop() # remove last tx self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=False) # Now try correct value - sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key) + sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key) block.vtx.pop() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True) - prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue) + prev_utxo = UTXO(tx.x16r, 0, tx.vout[0].nValue) # Test combinations of signature hashes. # Split the utxo into a lot of outputs. @@ -1360,14 +1296,14 @@ def test_signature_version_1(self): NUM_TESTS = 500 temp_utxos = [] tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) + tx.vin.append(CTxIn(COutPoint(prev_utxo.x16r, prev_utxo.n), b"")) split_value = prev_utxo.nValue // NUM_TESTS for i in range(NUM_TESTS): tx.vout.append(CTxOut(split_value, scriptPubKey)) tx.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) + sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) for i in range(NUM_TESTS): - temp_utxos.append(UTXO(tx.sha256, i, split_value)) + temp_utxos.append(UTXO(tx.x16r, i, split_value)) block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) @@ -1377,7 +1313,7 @@ def test_signature_version_1(self): used_sighash_single_out_of_bounds = False for i in range(NUM_TESTS): # Ping regularly to keep the connection alive - if (not i % 100): + if not i % 100: self.test_node.sync_with_ping() # Choose random number of inputs to use. num_inputs = random.randint(1, 10) @@ -1387,39 +1323,39 @@ def test_signature_version_1(self): assert(len(temp_utxos) > num_inputs) tx = CTransaction() total_value = 0 - for i in range(num_inputs): - tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b"")) + for j in range(num_inputs): + tx.vin.append(CTxIn(COutPoint(temp_utxos[j].x16r, temp_utxos[j].n), b"")) tx.wit.vtxinwit.append(CTxInWitness()) - total_value += temp_utxos[i].nValue + total_value += temp_utxos[j].nValue split_value = total_value // num_outputs - for i in range(num_outputs): + for _ in range(num_outputs): tx.vout.append(CTxOut(split_value, scriptPubKey)) - for i in range(num_inputs): + for k in range(num_inputs): # Now try to sign each input, using a random hashtype. anyonecanpay = 0 if random.randint(0, 1): anyonecanpay = SIGHASH_ANYONECANPAY hashtype = random.randint(1, 3) | anyonecanpay - sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key) - if (hashtype == SIGHASH_SINGLE and i >= num_outputs): + sign_p2pk_witness_input(witness_program, tx, k, hashtype, temp_utxos[k].nValue, key) + if hashtype == SIGHASH_SINGLE and k >= num_outputs: used_sighash_single_out_of_bounds = True tx.rehash() - for i in range(num_outputs): - temp_utxos.append(UTXO(tx.sha256, i, split_value)) + for l in range(num_outputs): + temp_utxos.append(UTXO(tx.x16r, l, split_value)) temp_utxos = temp_utxos[num_inputs:] block.vtx.append(tx) # Test the block periodically, if we're close to maxblocksize - if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000): + if get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000: self.update_witness_block_with_transactions(block, []) self.test_node.test_witness_block(block, accepted=True) block = self.build_next_block() - if (not used_sighash_single_out_of_bounds): + if not used_sighash_single_out_of_bounds: self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value") # Test the transactions we've added to the block - if (len(block.vtx) > 1): + if len(block.vtx) > 1: self.update_witness_block_with_transactions(block, []) self.test_node.test_witness_block(block, accepted=True) @@ -1427,16 +1363,16 @@ def test_signature_version_1(self): pubkeyhash = hash160(pubkey) scriptPKH = CScript([OP_0, pubkeyhash]) tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(temp_utxos[0].x16r, temp_utxos[0].n), b"")) tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH)) tx.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) + sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vin.append(CTxIn(COutPoint(tx.x16r, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) - script = GetP2PKHScript(pubkeyhash) - sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) + script = get_p2pkh_script(pubkeyhash) + sig_hash = segwit_version1_signature_hash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL # Check that we can't have a scriptSig @@ -1465,17 +1401,17 @@ def test_signature_version_1(self): # Just spend to our usual anyone-can-spend output # Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up # the signatures as we go. - tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) + tx.vin.append(CTxIn(COutPoint(i.x16r, i.n), b"")) tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE]))) tx.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key) + sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_SINGLE | SIGHASH_ANYONECANPAY, i.nValue, key) index += 1 block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) self.test_node.test_witness_block(block, accepted=True) for i in range(len(tx.vout)): - self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue)) + self.utxo.append(UTXO(tx.x16r, i, tx.vout[i].nValue)) # Test P2SH wrapped witness programs. @@ -1494,7 +1430,7 @@ def test_p2sh_witness(self, segwit_activated): # Fund the P2SH output tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey)) tx.rehash() @@ -1507,7 +1443,7 @@ def test_p2sh_witness(self, segwit_activated): # Now test attempts to spend the output. spend_tx = CTransaction() - spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig)) + spend_tx.vin.append(CTxIn(COutPoint(tx.x16r, 0), scriptSig)) spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))) spend_tx.rehash() @@ -1545,7 +1481,7 @@ def test_p2sh_witness(self, segwit_activated): # Update self.utxo self.utxo.pop(0) - self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue)) + self.utxo.append(UTXO(spend_tx.x16r, 0, spend_tx.vout[0].nValue)) # Test the behavior of starting up a segwit-aware node after the softfork # has activated. As segwit requires different block data than pre-segwit @@ -1580,7 +1516,7 @@ def test_upgrade_after_activation(self, node_id): def test_witness_sigops(self): - '''Ensure sigop counting is correct inside witnesses.''' + """Ensure sigop counting is correct inside witnesses.""" self.log.info("Testing sigops limit") assert(len(self.utxo)) @@ -1609,14 +1545,14 @@ def test_witness_sigops(self): # If we spend this script instead, we would exactly reach our sigop # limit (for witness sigops). - witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF]) + witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * extra_sigops_available + [OP_ENDIF]) witness_hash_justright = sha256(witness_program_justright) scriptPubKey_justright = CScript([OP_0, witness_hash_justright]) # First split our available utxo into a bunch of outputs split_value = self.utxo[0].nValue // outputs tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) for i in range(outputs): tx.vout.append(CTxOut(split_value, scriptPubKey)) tx.vout[-2].scriptPubKey = scriptPubKey_toomany @@ -1632,7 +1568,7 @@ def test_witness_sigops(self): # too many sigops. total_value = 0 for i in range(outputs-1): - tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) + tx2.vin.append(CTxIn(COutPoint(tx.x16r, i), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ] total_value += tx.vout[i].nValue @@ -1673,7 +1609,7 @@ def test_witness_sigops(self): # output of tx block_5 = self.build_next_block() tx2.vout.pop() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b"")) + tx2.vin.append(CTxIn(COutPoint(tx.x16r, outputs-1), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ] tx2.rehash() @@ -1697,7 +1633,7 @@ def test_getblocktemplate_before_lockin(self): # Workaround: # Can either change the tip, or change the mempool and wait 5 seconds - # to trigger a recomputation of getblocktemplate. + # to trigger a re-computation of getblocktemplate. txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16) # Using mocktime lets us avoid sleep() sync_mempools(self.nodes) @@ -1749,7 +1685,7 @@ def test_uncompressed_pubkey(self): pubkeyhash = hash160(pubkey) scriptPKH = CScript([OP_0, pubkeyhash]) tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b"")) + tx.vin.append(CTxIn(COutPoint(utxo.x16r, utxo.n), b"")) tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH)) tx.rehash() @@ -1765,10 +1701,10 @@ def test_uncompressed_pubkey(self): scriptWSH = CScript([OP_0, witness_hash]) tx2 = CTransaction() - tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) + tx2.vin.append(CTxIn(COutPoint(tx.x16r, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH)) - script = GetP2PKHScript(pubkeyhash) - sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) + script = get_p2pkh_script(pubkeyhash) + sig_hash = segwit_version1_signature_hash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue) signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL tx2.wit.vtxinwit.append(CTxInWitness()) tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ] @@ -1789,10 +1725,10 @@ def test_uncompressed_pubkey(self): scriptSig = CScript([scriptWSH]) tx3 = CTransaction() - tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) + tx3.vin.append(CTxIn(COutPoint(tx2.x16r, 0), b"")) tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH)) tx3.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) + sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) # Should fail policy test. self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') @@ -1804,12 +1740,12 @@ def test_uncompressed_pubkey(self): # Test 3: P2SH(P2WSH) # Try to spend the P2SH output created in the last test. # Send it to a P2PKH output, which we'll use in the next test. - scriptPubKey = GetP2PKHScript(pubkeyhash) + scriptPubKey = get_p2pkh_script(pubkeyhash) tx4 = CTransaction() - tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig)) + tx4.vin.append(CTxIn(COutPoint(tx3.x16r, 0), scriptSig)) tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey)) tx4.wit.vtxinwit.append(CTxInWitness()) - sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) + sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) # Should fail policy test. self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') @@ -1820,9 +1756,9 @@ def test_uncompressed_pubkey(self): # Test 4: Uncompressed pubkeys should still be valid in non-segwit # transactions. tx5 = CTransaction() - tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b"")) + tx5.vin.append(CTxIn(COutPoint(tx4.x16r, 0), b"")) tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE]))) - (sig_hash, _) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL) + (sig_hash, _) = signature_hash(scriptPubKey, tx5, 0, SIGHASH_ALL) signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL tx5.vin[0].scriptSig = CScript([signature, pubkey]) tx5.rehash() @@ -1831,24 +1767,21 @@ def test_uncompressed_pubkey(self): block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx5]) self.test_node.test_witness_block(block, accepted=True) - self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue)) + self.utxo.append(UTXO(tx5.x16r, 0, tx5.vout[0].nValue)) def test_non_standard_witness(self): self.log.info("Testing detection of non-standard P2WSH witness") pad = chr(1).encode('latin-1') # Create scripts for tests - scripts = [] - scripts.append(CScript([OP_DROP] * 100)) - scripts.append(CScript([OP_DROP] * 99)) - scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60)) - scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61)) + scripts = [CScript([OP_DROP] * 100), CScript([OP_DROP] * 99), CScript([pad * 59] * 59 + [OP_DROP] * 60), + CScript([pad * 59] * 59 + [OP_DROP] * 61)] p2wsh_scripts = [] assert(len(self.utxo)) tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) + tx.vin.append(CTxIn(COutPoint(self.utxo[0].x16r, self.utxo[0].n), b"")) # For each script, generate a pair of P2WSH and P2SH-P2WSH output. outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2) @@ -1859,7 +1792,7 @@ def test_non_standard_witness(self): tx.vout.append(CTxOut(outputvalue, p2wsh)) tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL]))) tx.rehash() - txid = tx.sha256 + txid = tx.x16r self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True) self.nodes[0].generate(1) diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py index fe57e8fe02..6c2b71b1e7 100755 --- a/test/functional/p2p_sendheaders.py +++ b/test/functional/p2p_sendheaders.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test behavior of headers messages to announce blocks. + +""" +Test behavior of headers messages to announce blocks. Setup: @@ -74,23 +76,10 @@ Expect: disconnect. """ -from test_framework.mininode import (NodeConnCB, - mininode_lock, - msg_getdata, - msg_getheaders, - msg_headers, - NodeConn, - NetworkThread, - msg_block, - CInv, - msg_inv, - CBlockHeader, - msg_getblocks, - msg_sendheaders) +from test_framework.mininode import NodeConnCB, mininode_lock, MsgGetdata, MsgGetHeaders, MsgHeaders, NodeConn, NetworkThread, MsgBlock, CInv, MsgInv, CBlockHeader, MsgGetBlocks, MsgSendHeaders from test_framework.test_framework import RavenTestFramework -from test_framework.util import (wait_until, sync_blocks, p2p_port, assert_equal) -from test_framework.blocktools import (create_block, create_coinbase) - +from test_framework.util import wait_until, sync_blocks, p2p_port, assert_equal +from test_framework.blocktools import create_block, create_coinbase direct_fetch_response_time = 0.05 @@ -108,19 +97,19 @@ def clear_last_announcement(self): # Request data for a list of block hashes def get_data(self, block_hashes): - msg = msg_getdata() + msg = MsgGetdata() for x in block_hashes: msg.inv.append(CInv(2, x)) self.connection.send_message(msg) def get_headers(self, locator, hashstop): - msg = msg_getheaders() + msg = MsgGetHeaders() msg.locator.vHave = locator msg.hashstop = hashstop self.connection.send_message(msg) def send_block_inv(self, blockhash): - msg = msg_inv() + msg = MsgInv() msg.inv = [CInv(2, blockhash)] self.connection.send_message(msg) @@ -131,17 +120,17 @@ def on_inv(self, conn, message): def on_headers(self, conn, message): if len(message.headers): self.block_announced = True - message.headers[-1].calc_sha256() + message.headers[-1].calc_x16r() self.last_blockhash_announced = message.headers[-1].sha256 # Test whether the last announcement we received had the # right header or the right inv # inv and headers should be lists of block hashes def check_last_announcement(self, headers=None, inv=None): - expect_headers = headers if headers != None else [] - expect_inv = inv if inv != None else [] + expect_headers = headers if headers is not None else [] + expect_inv = inv if inv is not None else [] test_function = lambda: self.block_announced - wait_until(test_function, timeout=60, lock=mininode_lock) + wait_until(test_function, timeout=60, lock=mininode_lock, err_msg="Waiting for last announcement") with mininode_lock: self.block_announced = False @@ -163,26 +152,27 @@ def check_last_announcement(self, headers=None, inv=None): self.last_message.pop("headers", None) return success + # noinspection PyMethodOverriding def wait_for_getdata(self, hash_list, timeout=60): - if hash_list == []: + if not hash_list: return test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list - wait_until(test_function, timeout=timeout, lock=mininode_lock) + wait_until(test_function, timeout=timeout, lock=mininode_lock, err_msg="waiting for getData()") return def wait_for_block_announcement(self, block_hash, timeout=60): test_function = lambda: self.last_blockhash_announced == block_hash - wait_until(test_function, timeout=timeout, lock=mininode_lock) + wait_until(test_function, timeout=timeout, lock=mininode_lock, err_msg="waiting for block announcement") return def send_header_for_blocks(self, new_blocks): - headers_message = msg_headers() + headers_message = MsgHeaders() headers_message.headers = [ CBlockHeader(b) for b in new_blocks ] self.send_message(headers_message) def send_getblocks(self, locator): - getblocks_message = msg_getblocks() + getblocks_message = MsgGetBlocks() getblocks_message.locator.vHave = locator self.send_message(getblocks_message) @@ -224,11 +214,10 @@ def run_test(self): self.p2p_connections = [inv_node, test_node] - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node), + NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0)] # Set nServices to 0 for test_node, so no block download will occur outside of # direct fetching - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0)) inv_node.add_connection(connections[0]) test_node.add_connection(connections[1]) @@ -247,6 +236,7 @@ def run_test(self): # PART 1 # 1. Mine a block; expect inv announcements each time self.log.info("Part 1: headers don't start before sendheaders message...") + block_time = 0 for i in range(4): old_tip = tip tip = self.mine_blocks(1) @@ -272,7 +262,7 @@ def run_test(self): new_block.solve() test_node.send_header_for_blocks([new_block]) test_node.wait_for_getdata([new_block.sha256]) - test_node.send_message(msg_block(new_block)) + test_node.send_message(MsgBlock(new_block)) test_node.sync_with_ping() # make sure this block is processed inv_node.clear_last_announcement() test_node.clear_last_announcement() @@ -282,7 +272,7 @@ def run_test(self): # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. - test_node.send_message(msg_sendheaders()) + test_node.send_message(MsgSendHeaders()) prev_tip = int(self.nodes[0].getbestblockhash(), 16) test_node.get_headers(locator=[prev_tip], hashstop=0) test_node.sync_with_ping() @@ -326,7 +316,7 @@ def run_test(self): # getdata requests (the check is further down) inv_node.send_header_for_blocks(blocks) inv_node.sync_with_ping() - [ test_node.send_message(msg_block(x)) for x in blocks ] + [test_node.send_message(MsgBlock(x)) for x in blocks] test_node.sync_with_ping() inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also @@ -419,7 +409,7 @@ def run_test(self): tip = blocks[-1].sha256 block_time += 1 height += 1 - inv_node.send_message(msg_block(blocks[-1])) + inv_node.send_message(MsgBlock(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed test_node.last_message.pop("getdata", None) @@ -440,9 +430,9 @@ def run_test(self): test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() - test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time) + test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=int(direct_fetch_response_time)) - [ test_node.send_message(msg_block(x)) for x in blocks ] + [test_node.send_message(MsgBlock(x)) for x in blocks] test_node.sync_with_ping() @@ -471,13 +461,13 @@ def run_test(self): # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() - test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time) + test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=int(direct_fetch_response_time)) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() - test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time) + test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=int(direct_fetch_response_time)) # Announcing 1 more header should not trigger any response test_node.last_message.pop("getdata", None) @@ -489,7 +479,7 @@ def run_test(self): self.log.info("Part 4: success!") # Now deliver all those blocks we announced. - [ test_node.send_message(msg_block(x)) for x in blocks ] + [test_node.send_message(MsgBlock(x)) for x in blocks] self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent @@ -511,7 +501,7 @@ def run_test(self): test_node.wait_for_getheaders() test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) - [ test_node.send_message(msg_block(x)) for x in blocks ] + [test_node.send_message(MsgBlock(x)) for x in blocks] test_node.sync_with_ping() assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py index 0aad462531..578594f36b 100755 --- a/test/functional/p2p_timeouts.py +++ b/test/functional/p2p_timeouts.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test various net timeouts. + +""" +Test various net timeouts. - Create three ravend nodes: @@ -23,8 +25,7 @@ """ from time import sleep - -from test_framework.mininode import (NodeConn, NodeConnCB, NetworkThread, msg_ping) +from test_framework.mininode import NodeConn, NodeConnCB, NetworkThread, MsgPing from test_framework.test_framework import RavenTestFramework from test_framework.util import p2p_port @@ -44,10 +45,9 @@ def run_test(self): self.no_version_node = TestNode() # never send version (just ping) self.no_send_node = TestNode() # never send anything - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False)) - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node), + NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False), + NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False)] self.no_verack_node.add_connection(connections[0]) self.no_version_node.add_connection(connections[1]) self.no_send_node.add_connection(connections[2]) @@ -56,11 +56,11 @@ def run_test(self): sleep(1) - assert(self.no_verack_node.connected) - assert(self.no_version_node.connected) - assert(self.no_send_node.connected) + assert self.no_verack_node.connected + assert self.no_version_node.connected + assert self.no_send_node.connected - ping_msg = msg_ping() + ping_msg = MsgPing() connections[0].send_message(ping_msg) connections[1].send_message(ping_msg) @@ -68,9 +68,9 @@ def run_test(self): assert "version" in self.no_verack_node.last_message - assert(self.no_verack_node.connected) - assert(self.no_version_node.connected) - assert(self.no_send_node.connected) + assert self.no_verack_node.connected + assert self.no_version_node.connected + assert self.no_send_node.connected connections[0].send_message(ping_msg) connections[1].send_message(ping_msg) diff --git a/test/functional/p2p_acceptblock.py b/test/functional/p2p_unrequested_blocks.py similarity index 84% rename from test/functional/p2p_acceptblock.py rename to test/functional/p2p_unrequested_blocks.py index 8054e8d3a3..e3f1f45f4b 100755 --- a/test/functional/p2p_acceptblock.py +++ b/test/functional/p2p_unrequested_blocks.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test processing of unrequested blocks. + +""" +Test processing of unrequested blocks. Since behavior differs when receiving unrequested blocks from whitelisted peers versus non-whitelisted peers, this tests the behavior of both (effectively two @@ -58,24 +60,11 @@ """ -from test_framework.mininode import (NodeConn, - NodeConnCB, - NetworkThread, - msg_block, - msg_headers, - CBlockHeader, - mininode_lock, - msg_inv, - CInv) -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (os, - p2p_port, - assert_equal, - assert_raises_rpc_error, - connect_nodes, - sync_blocks) import time -from test_framework.blocktools import (create_block, create_coinbase) +from test_framework.mininode import NodeConn, NodeConnCB, NetworkThread, MsgBlock, MsgHeaders, CBlockHeader, mininode_lock, MsgInv, CInv +from test_framework.test_framework import RavenTestFramework +from test_framework.util import os, p2p_port, assert_equal, assert_raises_rpc_error, connect_nodes, sync_blocks +from test_framework.blocktools import create_block, create_coinbase class AcceptBlockTest(RavenTestFramework): def add_options(self, parser): @@ -102,10 +91,9 @@ def run_test(self): white_node = NodeConnCB() # connects to node1 (whitelisted) min_work_node = NodeConnCB() # connects to node2 (not whitelisted) - connections = [] - connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) - connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) - connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node)) + connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node), + NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node), + NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node)] test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) min_work_node.add_connection(connections[2]) @@ -129,9 +117,9 @@ def run_test(self): blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 - test_node.send_message(msg_block(blocks_h2[0])) - white_node.send_message(msg_block(blocks_h2[1])) - min_work_node.send_message(msg_block(blocks_h2[2])) + test_node.send_message(MsgBlock(blocks_h2[0])) + white_node.send_message(MsgBlock(blocks_h2[1])) + min_work_node.send_message(MsgBlock(blocks_h2[2])) for x in [test_node, white_node, min_work_node]: x.sync_with_ping() @@ -145,8 +133,8 @@ def run_test(self): for i in range(2): blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1)) blocks_h2f[i].solve() - test_node.send_message(msg_block(blocks_h2f[0])) - white_node.send_message(msg_block(blocks_h2f[1])) + test_node.send_message(MsgBlock(blocks_h2f[0])) + white_node.send_message(MsgBlock(blocks_h2f[1])) for x in [test_node, white_node]: x.sync_with_ping() @@ -165,8 +153,8 @@ def run_test(self): for i in range(2): blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1)) blocks_h3[i].solve() - test_node.send_message(msg_block(blocks_h3[0])) - white_node.send_message(msg_block(blocks_h3[1])) + test_node.send_message(MsgBlock(blocks_h3[0])) + white_node.send_message(MsgBlock(blocks_h3[1])) for x in [test_node, white_node]: x.sync_with_ping() @@ -188,14 +176,14 @@ def run_test(self): # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 - headers_message = msg_headers() + headers_message = MsgHeaders() all_blocks = [] # node0's blocks for j in range(2): for i in range(288): next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1) next_block.solve() if j==0: - test_node.send_message(msg_block(next_block)) + test_node.send_message(MsgBlock(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) @@ -209,7 +197,7 @@ def run_test(self): headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip - white_node.send_message(msg_block(tips[1])) # Now deliver the tip + white_node.send_message(MsgBlock(tips[1])) # Now deliver the tip white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer") @@ -217,7 +205,7 @@ def run_test(self): # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). - test_node.send_message(msg_block(blocks_h2f[0])) + test_node.send_message(MsgBlock(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then @@ -234,7 +222,7 @@ def run_test(self): with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) - test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) + test_node.send_message(MsgInv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: @@ -245,7 +233,7 @@ def run_test(self): self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) - test_node.send_message(msg_block(blocks_h2f[0])) + test_node.send_message(MsgBlock(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) diff --git a/test/functional/rpc_addressindex.py b/test/functional/rpc_addressindex.py index 338becb384..ab2b2c9f19 100755 --- a/test/functional/rpc_addressindex.py +++ b/test/functional/rpc_addressindex.py @@ -4,16 +4,14 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# Test addressindex generation and fetching -# +"""Test addressindex generation and fetching""" +import binascii import time from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes_bi, assert_equal) -from test_framework.script import (CScript, OP_HASH160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_CHECKSIG) -from test_framework.mininode import (CTransaction, CTxIn, CTxOut, COutPoint) -import binascii +from test_framework.util import connect_nodes_bi, assert_equal +from test_framework.script import CScript, OP_HASH160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_CHECKSIG +from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint class AddressIndexTest(RavenTestFramework): @@ -55,37 +53,37 @@ def run_test(self): # Check p2pkh and p2sh address indexes self.log.info("Testing p2pkh and p2sh address index...") - txid0 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 10) + tx_id0 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 10) self.nodes[0].generate(1) - txidb0 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 10) + tx_idb0 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 10) self.nodes[0].generate(1) - txid1 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 15) + tx_id1 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 15) self.nodes[0].generate(1) - txidb1 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 15) + tx_idb1 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 15) self.nodes[0].generate(1) - txid2 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 20) + tx_id2 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 20) self.nodes[0].generate(1) - txidb2 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 20) + tx_idb2 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 20) self.nodes[0].generate(1) self.sync_all() txids = self.nodes[1].getaddresstxids("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs") assert_equal(len(txids), 3) - assert_equal(txids[0], txid0) - assert_equal(txids[1], txid1) - assert_equal(txids[2], txid2) + assert_equal(txids[0], tx_id0) + assert_equal(txids[1], tx_id1) + assert_equal(txids[2], tx_id2) - txidsb = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br") - assert_equal(len(txidsb), 3) - assert_equal(txidsb[0], txidb0) - assert_equal(txidsb[1], txidb1) - assert_equal(txidsb[2], txidb2) + tx_idsb = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br") + assert_equal(len(tx_idsb), 3) + assert_equal(tx_idsb[0], tx_idb0) + assert_equal(tx_idsb[1], tx_idb1) + assert_equal(tx_idsb[2], tx_idb2) # Check that limiting by height works self.log.info("Testing querying txids by range of block heights..") @@ -95,18 +93,18 @@ def run_test(self): "end": 110 }) assert_equal(len(height_txids), 2) - assert_equal(height_txids[0], txidb0) - assert_equal(height_txids[1], txidb1) + assert_equal(height_txids[0], tx_idb0) + assert_equal(height_txids[1], tx_idb1) # Check that multiple addresses works - multitxids = self.nodes[1].getaddresstxids({"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", "mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs"]}) - assert_equal(len(multitxids), 6) - assert_equal(multitxids[0], txid0) - assert_equal(multitxids[1], txidb0) - assert_equal(multitxids[2], txid1) - assert_equal(multitxids[3], txidb1) - assert_equal(multitxids[4], txid2) - assert_equal(multitxids[5], txidb2) + multi_tx_ids = self.nodes[1].getaddresstxids({"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", "mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs"]}) + assert_equal(len(multi_tx_ids), 6) + assert_equal(multi_tx_ids[0], tx_id0) + assert_equal(multi_tx_ids[1], tx_idb0) + assert_equal(multi_tx_ids[2], tx_id1) + assert_equal(multi_tx_ids[3], tx_idb1) + assert_equal(multi_tx_ids[4], tx_id2) + assert_equal(multi_tx_ids[5], tx_idb2) # Check that balances are correct balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br") @@ -114,12 +112,12 @@ def run_test(self): # Check that outputs with the same address will only return one txid self.log.info("Testing for txid uniqueness...") - addressHash = bytes([99,73,164,24,252,69,120,209,10,55,43,84,180,92,40,12,200,196,56,47]) - scriptPubKey = CScript([OP_HASH160, addressHash, OP_EQUAL]) + address_hash = bytes([99,73,164,24,252,69,120,209,10,55,43,84,180,92,40,12,200,196,56,47]) + script_pub_key = CScript([OP_HASH160, address_hash, OP_EQUAL]) unspent = self.nodes[0].listunspent() tx = CTransaction() tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))] - tx.vout = [CTxOut(10, scriptPubKey), CTxOut(11, scriptPubKey)] + tx.vout = [CTxOut(10, script_pub_key), CTxOut(11, script_pub_key)] tx.rehash() signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8")) @@ -128,9 +126,9 @@ def run_test(self): self.nodes[0].generate(1) self.sync_all() - txidsmany = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br") - assert_equal(len(txidsmany), 4) - assert_equal(txidsmany[3], sent_txid) + tx_ids_many = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br") + assert_equal(len(tx_ids_many), 4) + assert_equal(tx_ids_many[3], sent_txid) # Check that balances are correct self.log.info("Testing balances...") @@ -141,15 +139,15 @@ def run_test(self): self.log.info("Testing balances after spending...") privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG" address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW" - addressHash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220]) - scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG]) + address_hash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220]) + script_pub_key2 = CScript([OP_DUP, OP_HASH160, address_hash2, OP_EQUALVERIFY, OP_CHECKSIG]) self.nodes[0].importprivkey(privkey2) unspent = self.nodes[0].listunspent() tx = CTransaction() tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))] amount = int(unspent[0]["amount"] * 100000000 - 230000) - tx.vout = [CTxOut(amount, scriptPubKey2)] + tx.vout = [CTxOut(amount, script_pub_key2)] signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8")) spending_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True) self.nodes[0].generate(1) @@ -161,11 +159,11 @@ def run_test(self): tx.vin = [CTxIn(COutPoint(int(spending_txid, 16), 0))] send_amount = 1 * 100000000 + 12840 change_amount = amount - send_amount - 230000 - tx.vout = [CTxOut(change_amount, scriptPubKey2), CTxOut(send_amount, scriptPubKey)] + tx.vout = [CTxOut(change_amount, script_pub_key2), CTxOut(send_amount, script_pub_key)] tx.rehash() signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8")) - sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True) + self.nodes[0].sendrawtransaction(signed_tx["hex"], True) self.nodes[0].generate(1) self.sync_all() @@ -182,8 +180,8 @@ def run_test(self): assert_equal(deltas[0]["blockindex"], 1) # Check that entire range will be queried - deltasAll = self.nodes[1].getaddressdeltas({"addresses": [address2]}) - assert_equal(len(deltasAll), len(deltas)) + deltas_all = self.nodes[1].getaddressdeltas({"addresses": [address2]}) + assert_equal(len(deltas_all), len(deltas)) # Check that deltas can be returned from range of block heights deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 113, "end": 113}) @@ -230,45 +228,45 @@ def run_test(self): # Check mempool indexing self.log.info("Testing mempool indexing...") - privKey3 = "cVfUn53hAbRrDEuMexyfgDpZPhF7KqXpS8UZevsyTDaugB7HZ3CD" + priv_key3 = "cVfUn53hAbRrDEuMexyfgDpZPhF7KqXpS8UZevsyTDaugB7HZ3CD" address3 = "mw4ynwhS7MmrQ27hr82kgqu7zryNDK26JB" - addressHash3 = bytes([170,152,114,181,187,205,181,17,216,158,14,17,170,39,218,115,253,44,63,80]) - scriptPubKey3 = CScript([OP_DUP, OP_HASH160, addressHash3, OP_EQUALVERIFY, OP_CHECKSIG]) + address_hash3 = bytes([170,152,114,181,187,205,181,17,216,158,14,17,170,39,218,115,253,44,63,80]) + script_pub_key3 = CScript([OP_DUP, OP_HASH160, address_hash3, OP_EQUALVERIFY, OP_CHECKSIG]) #address4 = "2N8oFVB2vThAKury4vnLquW2zVjsYjjAkYQ" - scriptPubKey4 = CScript([OP_HASH160, addressHash3, OP_EQUAL]) + script_pub_key4 = CScript([OP_HASH160, address_hash3, OP_EQUAL]) unspent = self.nodes[2].listunspent() tx = CTransaction() tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))] amount = int(unspent[0]["amount"] * 100000000 - 230000) - tx.vout = [CTxOut(amount, scriptPubKey3)] + tx.vout = [CTxOut(amount, script_pub_key3)] tx.rehash() signed_tx = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8")) - memtxid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True) + mem_txid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True) time.sleep(2) tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(int(unspent[1]["txid"], 16), unspent[1]["vout"]))] amount = int(unspent[1]["amount"] * 100000000 - 300000) tx2.vout = [ - CTxOut(int(amount / 4), scriptPubKey3), - CTxOut(int(amount / 4), scriptPubKey3), - CTxOut(int(amount / 4), scriptPubKey4), - CTxOut(int(amount / 4), scriptPubKey4) + CTxOut(int(amount / 4), script_pub_key3), + CTxOut(int(amount / 4), script_pub_key3), + CTxOut(int(amount / 4), script_pub_key4), + CTxOut(int(amount / 4), script_pub_key4) ] tx2.rehash() signed_tx2 = self.nodes[2].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8")) - memtxid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True) + mem_txid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True) time.sleep(2) mempool = self.nodes[2].getaddressmempool({"addresses": [address3]}) assert_equal(len(mempool), 3) - assert_equal(mempool[0]["txid"], memtxid1) + assert_equal(mempool[0]["txid"], mem_txid1) assert_equal(mempool[0]["address"], address3) assert_equal(mempool[0]["index"], 0) - assert_equal(mempool[1]["txid"], memtxid2) + assert_equal(mempool[1]["txid"], mem_txid2) assert_equal(mempool[1]["index"], 0) - assert_equal(mempool[2]["txid"], memtxid2) + assert_equal(mempool[2]["txid"], mem_txid2) assert_equal(mempool[2]["index"], 1) self.nodes[2].generate(1) @@ -278,21 +276,21 @@ def run_test(self): tx = CTransaction() tx.vin = [ - CTxIn(COutPoint(int(memtxid2, 16), 0)), - CTxIn(COutPoint(int(memtxid2, 16), 1)) + CTxIn(COutPoint(int(mem_txid2, 16), 0)), + CTxIn(COutPoint(int(mem_txid2, 16), 1)) ] - tx.vout = [CTxOut(int(amount / 2 - 340000), scriptPubKey2)] + tx.vout = [CTxOut(int(amount / 2 - 340000), script_pub_key2)] tx.rehash() - self.nodes[2].importprivkey(privKey3) + self.nodes[2].importprivkey(priv_key3) signed_tx3 = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8")) self.nodes[2].sendrawtransaction(signed_tx3["hex"], True) time.sleep(2) mempool3 = self.nodes[2].getaddressmempool({"addresses": [address3]}) assert_equal(len(mempool3), 2) - assert_equal(mempool3[0]["prevtxid"], memtxid2) + assert_equal(mempool3[0]["prevtxid"], mem_txid2) assert_equal(mempool3[0]["prevout"], 0) - assert_equal(mempool3[1]["prevtxid"], memtxid2) + assert_equal(mempool3[1]["prevtxid"], mem_txid2) assert_equal(mempool3[1]["prevout"], 1) # sending and receiving to the same address diff --git a/test/functional/rpc_assettransfer.py b/test/functional/rpc_assettransfer.py index 5e0b9088ba..cd9268aff1 100755 --- a/test/functional/rpc_assettransfer.py +++ b/test/functional/rpc_assettransfer.py @@ -4,16 +4,10 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# Test transferring assets rpc calls -# +"""Test transferring assets rpc calls""" -import time from test_framework.test_framework import RavenTestFramework -from test_framework.util import * -from test_framework.script import * -from test_framework.mininode import * -import binascii +from test_framework.util import connect_all_nodes_bi, assert_equal, assert_raises_rpc_error class AssetTransferTest(RavenTestFramework): @@ -50,8 +44,7 @@ def run_test(self): self.log.info("Calling issue()...") address0 = n0.getnewaddress() ipfs_hash = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8" - n0.issue(asset_name="TRANSFER_TEST", qty=1000, to_address=address0, change_address="", \ - units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) + n0.issue(asset_name="TRANSFER_TEST", qty=1000, to_address=address0, change_address="", units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash) n0.generate(1) @@ -101,7 +94,6 @@ def run_test(self): n0_from_addresses = [n0_asset_change, n0_new_address] - n0_from_address = n0_asset_change n1_already_received_address_2 = n1_address n1_address = n1.getnewaddress() @@ -123,8 +115,7 @@ def run_test(self): # Add the address the only contain 150 TRANSFER_TEST assets n0_from_addresses = [n0_asset_change] - assert_raises_rpc_error(-25, "Insufficient asset funds", \ - n0.transferfromaddresses, "TRANSFER_TEST", n0_from_addresses, 450, n1_address, '', 0, n0_rvn_change, n0_asset_change) + assert_raises_rpc_error(-25, "Insufficient asset funds", n0.transferfromaddresses, "TRANSFER_TEST", n0_from_addresses, 450, n1_address, '', 0, n0_rvn_change, n0_asset_change) # Verify that the failed transaction doesn't change the already mined address values on the wallet assert_equal(n0.listassetbalancesbyaddress(n1_already_received_address)["TRANSFER_TEST"], 200) diff --git a/test/functional/rpc_bind.py b/test/functional/rpc_bind.py index ef13cf6c67..77edd91855 100755 --- a/test/functional/rpc_bind.py +++ b/test/functional/rpc_bind.py @@ -3,14 +3,14 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test running ravend with the -rpcbind and -rpcallowip options.""" import socket import sys - -from test_framework.test_framework import (RavenTestFramework, SkipTest) -from test_framework.util import (assert_equal, get_rpc_proxy, rpc_url, get_datadir_path, rpc_port, assert_raises_rpc_error) -from test_framework.netutil import (addr_to_hex, get_bind_addrs, all_interfaces) +from test_framework.test_framework import RavenTestFramework, SkipTest +from test_framework.util import assert_equal, get_rpc_proxy, rpc_url, get_datadir_path, rpc_port, assert_raises_rpc_error +from test_framework.netutil import addr_to_hex, get_bind_addrs, all_interfaces class RPCBindTest(RavenTestFramework): def set_test_params(self): @@ -21,11 +21,11 @@ def setup_network(self): self.add_nodes(self.num_nodes, None) def run_bind_test(self, allow_ips, connect_to, addresses, expected): - ''' + """ Start a node with requested rpcallowip and rpcbind parameters, then try to connect, and check if the set of bound addresses matches the expected set. - ''' + """ self.log.info("Bind test for %s" % str(addresses)) expected = [(addr_to_hex(addr), port) for (addr, port) in expected] base_args = ['-disablewallet', '-nolisten'] @@ -39,10 +39,10 @@ def run_bind_test(self, allow_ips, connect_to, addresses, expected): self.stop_nodes() def run_allowip_test(self, allow_ips, rpchost, rpcport): - ''' + """ Start a node with rpcallow IP, and request getnetworkinfo at a non-localhost IP. - ''' + """ self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport)) base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips] self.nodes[0].rpchost = None @@ -67,23 +67,23 @@ def run_test(self): try: s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) s.connect(("::1",1)) - s.close + s.close() except OSError: raise SkipTest("This test requires IPv6 support.") self.log.info("Using interface %s for testing" % non_loopback_ip) - defaultport = rpc_port(0) + default_port = rpc_port(0) # check default without rpcallowip (IPv4 and IPv6 localhost) self.run_bind_test(None, '127.0.0.1', [], - [('127.0.0.1', defaultport), ('::1', defaultport)]) + [('127.0.0.1', default_port), ('::1', default_port)]) # check default with rpcallowip (IPv6 any) self.run_bind_test(['127.0.0.1'], '127.0.0.1', [], - [('::0', defaultport)]) + [('::0', default_port)]) # check only IPv4 localhost (explicit) self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'], - [('127.0.0.1', defaultport)]) + [('127.0.0.1', default_port)]) # check only IPv4 localhost (explicit) with alternative port self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'], [('127.0.0.1', 32171)]) @@ -92,17 +92,17 @@ def run_test(self): [('127.0.0.1', 32171), ('127.0.0.1', 32172)]) # check only IPv6 localhost (explicit) self.run_bind_test(['[::1]'], '[::1]', ['[::1]'], - [('::1', defaultport)]) + [('::1', default_port)]) # check both IPv4 and IPv6 localhost (explicit) self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'], - [('127.0.0.1', defaultport), ('::1', defaultport)]) + [('127.0.0.1', default_port), ('::1', default_port)]) # check only non-loopback interface self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip], - [(non_loopback_ip, defaultport)]) + [(non_loopback_ip, default_port)]) # Check that with invalid rpcallowip, we are denied - self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport) - assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport) + self.run_allowip_test([non_loopback_ip], non_loopback_ip, default_port) + assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, default_port) if __name__ == '__main__': RPCBindTest().main() diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 87a8be917e..67a37eeb04 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test RPCs related to blockchainstate. + +""" +Test RPCs related to blockchainstate. Test the following RPCs: - gettxoutsetinfo @@ -21,15 +23,8 @@ from decimal import Decimal import http.client import subprocess - from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, - assert_greater_than, - assert_greater_than_or_equal, - assert_raises, - assert_raises_rpc_error, - assert_is_hex_string, - assert_is_hash_string,) +from test_framework.util import assert_equal, assert_greater_than, assert_greater_than_or_equal, assert_raises, assert_raises_rpc_error, assert_is_hex_string, assert_is_hash_string class BlockchainTest(RavenTestFramework): def set_test_params(self): @@ -98,32 +93,32 @@ def _test_getblockchaininfo(self): assert_greater_than(res['size_on_disk'], 0) def _test_getchaintxstats(self): - chaintxstats = self.nodes[0].getchaintxstats(1) + chain_tx_stats = self.nodes[0].getchaintxstats(1) # 200 txs plus genesis tx - assert_equal(chaintxstats['txcount'], 201) + assert_equal(chain_tx_stats['txcount'], 201) # tx rate should be 1 per 1 minutes, or 1/60 # we have to round because of binary math - assert_equal(round(chaintxstats['txrate'] * 60, 1), Decimal(1)) + assert_equal(round(chain_tx_stats['txrate'] * 60, 1), Decimal(1)) b1 = self.nodes[0].getblock(self.nodes[0].getblockhash(1)) b200 = self.nodes[0].getblock(self.nodes[0].getblockhash(200)) time_diff = b200['mediantime'] - b1['mediantime'] - chaintxstats = self.nodes[0].getchaintxstats() - assert_equal(chaintxstats['time'], b200['time']) - assert_equal(chaintxstats['txcount'], 201) - assert_equal(chaintxstats['window_block_count'], 199) - assert_equal(chaintxstats['window_tx_count'], 199) - assert_equal(chaintxstats['window_interval'], time_diff) - assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199)) - - chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1['hash']) - assert_equal(chaintxstats['time'], b1['time']) - assert_equal(chaintxstats['txcount'], 2) - assert_equal(chaintxstats['window_block_count'], 0) - assert('window_tx_count' not in chaintxstats) - assert('window_interval' not in chaintxstats) - assert('txrate' not in chaintxstats) + chain_tx_stats = self.nodes[0].getchaintxstats() + assert_equal(chain_tx_stats['time'], b200['time']) + assert_equal(chain_tx_stats['txcount'], 201) + assert_equal(chain_tx_stats['window_block_count'], 199) + assert_equal(chain_tx_stats['window_tx_count'], 199) + assert_equal(chain_tx_stats['window_interval'], time_diff) + assert_equal(round(chain_tx_stats['txrate'] * time_diff, 10), Decimal(199)) + + chain_tx_stats = self.nodes[0].getchaintxstats(blockhash=b1['hash']) + assert_equal(chain_tx_stats['time'], b1['time']) + assert_equal(chain_tx_stats['txcount'], 2) + assert_equal(chain_tx_stats['window_block_count'], 0) + assert('window_tx_count' not in chain_tx_stats) + assert('window_interval' not in chain_tx_stats) + assert('txrate' not in chain_tx_stats) assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, 201) @@ -174,14 +169,14 @@ def _test_getblockheader(self): assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "nonsense") - besthash = node.getbestblockhash() - secondbesthash = node.getblockhash(199) - header = node.getblockheader(besthash) + best_hash = node.getbestblockhash() + second_best_hash = node.getblockhash(199) + header = node.getblockheader(best_hash) - assert_equal(header['hash'], besthash) + assert_equal(header['hash'], best_hash) assert_equal(header['height'], 200) assert_equal(header['confirmations'], 1) - assert_equal(header['previousblockhash'], secondbesthash) + assert_equal(header['previousblockhash'], second_best_hash) assert_is_hex_string(header['chainwork']) assert_is_hash_string(header['hash']) assert_is_hash_string(header['previousblockhash']) diff --git a/test/functional/rpc_decodescript.py b/test/functional/rpc_decodescript.py index 6393d2613e..662e6d160a 100755 --- a/test/functional/rpc_decodescript.py +++ b/test/functional/rpc_decodescript.py @@ -3,12 +3,13 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test decoding scripts via decodescript RPC command.""" +from io import BytesIO from test_framework.test_framework import RavenTestFramework from test_framework.util import assert_equal -from test_framework.mininode import (CTransaction, hex_str_to_bytes, bytes_to_hex_str) -from io import BytesIO +from test_framework.mininode import CTransaction, hex_str_to_bytes, bytes_to_hex_str class DecodeScriptTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/rpc_deprecated.py b/test/functional/rpc_deprecated.py index 7742b8fc64..af6c784232 100755 --- a/test/functional/rpc_deprecated.py +++ b/test/functional/rpc_deprecated.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test deprecation of RPC calls.""" + from test_framework.test_framework import RavenTestFramework from test_framework.util import assert_raises_rpc_error diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py index 5912509e66..43dcc19995 100755 --- a/test/functional/rpc_fundrawtransaction.py +++ b/test/functional/rpc_fundrawtransaction.py @@ -3,17 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the fundrawtransaction RPC.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes_bi, - assert_equal, - Decimal, - assert_raises_rpc_error, - assert_greater_than, - count_bytes, - assert_fee_amount, - assert_greater_than_or_equal) +from test_framework.util import connect_nodes_bi, assert_equal, Decimal, assert_raises_rpc_error, assert_greater_than, count_bytes, assert_fee_amount, assert_greater_than_or_equal def get_unspent(listunspent, amount): for utx in listunspent: @@ -56,9 +50,9 @@ def run_test(self): self.sync_all() # ensure that setting changePosition in fundraw with an exact match is handled properly - rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():5000}) - rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]}) - assert_equal(rawmatch["changepos"], -1) + raw_match = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():5000}) + raw_match = self.nodes[2].fundrawtransaction(raw_match, {"changePosition":1, "subtractFeeFromOutputs":[0]}) + assert_equal(raw_match["changepos"], -1) watchonly_address = self.nodes[0].getnewaddress() watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"] @@ -80,9 +74,10 @@ def run_test(self): inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 1.0 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) + self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] + assert_greater_than(fee, 0.0, err_msg="Fee Greater Than Zero") dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert(len(dec_tx['vin']) > 0) #test that we have enough inputs @@ -92,10 +87,11 @@ def run_test(self): inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 2.2 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) + self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] + assert_greater_than(fee, 0.0, err_msg="Fee Greater Than Zero") dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert(len(dec_tx['vin']) > 0) #test if we have enough inputs @@ -105,10 +101,11 @@ def run_test(self): inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 2.6 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) + self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] + assert_greater_than(fee, 0.0, err_msg="Fee Greater Than Zero") dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert(len(dec_tx['vin']) > 0) assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') @@ -120,10 +117,11 @@ def run_test(self): inputs = [ ] outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 } rawtx = self.nodes[2].createrawtransaction(inputs, outputs) - dec_tx = self.nodes[2].decoderawtransaction(rawtx) + self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] + assert_greater_than(fee, 0.0, err_msg="Fee Greater Than Zero") dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: @@ -151,7 +149,7 @@ def run_test(self): for out in dec_tx['vout']: totalOut += out['value'] - assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee + assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalOut+fee ##################################################################### @@ -239,6 +237,7 @@ def run_test(self): rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] + assert_greater_than(fee, 0.0, err_msg="Fee Greater Than Zero") dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 @@ -270,6 +269,7 @@ def run_test(self): rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] + assert_greater_than(fee, 0.0, err_msg="Fee Greater Than Zero") dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 @@ -303,6 +303,7 @@ def run_test(self): rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] + assert_greater_than(fee, 0.0, err_msg="Fee Greater Than Zero") dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 @@ -327,7 +328,7 @@ def run_test(self): #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance) + assert(0 <= feeDelta <= feeTolerance) ############################################################ ############################################################ @@ -342,7 +343,7 @@ def run_test(self): #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance) + assert(0 <= feeDelta <= feeTolerance) ############################################################ @@ -369,7 +370,7 @@ def run_test(self): #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance) + assert(0 <= feeDelta <= feeTolerance) ############################################################ @@ -402,7 +403,7 @@ def run_test(self): #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance) + assert(0 <= feeDelta <= feeTolerance) ############################################################ @@ -420,7 +421,7 @@ def run_test(self): # send 1.2 RVN to msig addr - txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) + self.nodes[0].sendtoaddress(mSigObj, 1.2) self.sync_all() self.nodes[1].generate(1) self.sync_all() @@ -432,7 +433,7 @@ def run_test(self): fundedTx = self.nodes[2].fundrawtransaction(rawtx) signedTx = self.nodes[2].signrawtransaction(fundedTx['hex']) - txId = self.nodes[2].sendrawtransaction(signedTx['hex']) + self.nodes[2].sendrawtransaction(signedTx['hex']) self.sync_all() self.nodes[1].generate(1) self.sync_all() @@ -486,7 +487,7 @@ def run_test(self): #now we need to unlock self.nodes[1].walletpassphrase("test", 600) signedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) - txId = self.nodes[1].sendrawtransaction(signedTx['hex']) + self.nodes[1].sendrawtransaction(signedTx['hex']) self.nodes[1].generate(1) self.sync_all() @@ -521,7 +522,7 @@ def run_test(self): #compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) - assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs + assert(0 <= feeDelta <= feeTolerance * 19) #~19 inputs ############################################# @@ -547,7 +548,7 @@ def run_test(self): rawtx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawtx) fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex']) - txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) + self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() @@ -641,9 +642,9 @@ def run_test(self): if out['value'] > 1.0: changeaddress += out['scriptPubKey']['addresses'][0] assert(changeaddress != "") - nextaddr = self.nodes[3].getnewaddress() + next_addr = self.nodes[3].getnewaddress() # Now the change address key should be removed from the keypool - assert(changeaddress != nextaddr) + assert(changeaddress != next_addr) ###################################### # Test subtractFeeFromOutputs option # diff --git a/test/functional/rpc_getchaintips.py b/test/functional/rpc_getchaintips.py index 3952bc0b6a..30700ec41c 100755 --- a/test/functional/rpc_getchaintips.py +++ b/test/functional/rpc_getchaintips.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the getchaintips RPC. + +""" +Test the getchaintips RPC. - introduce a network split - work on chains of different lengths diff --git a/test/functional/rpc_invalidateblock.py b/test/functional/rpc_invalidateblock.py index 142e8a241b..53003689dd 100755 --- a/test/functional/rpc_invalidateblock.py +++ b/test/functional/rpc_invalidateblock.py @@ -3,10 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the invalidateblock RPC.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes_bi, sync_blocks, time, assert_equal) +from test_framework.util import connect_nodes_bi, sync_blocks, time, assert_equal class InvalidateTest(RavenTestFramework): def set_test_params(self): @@ -22,7 +23,7 @@ def run_test(self): self.log.info("Mine 4 blocks on Node 0") self.nodes[0].generate(4) assert(self.nodes[0].getblockcount() == 4) - besthash = self.nodes[0].getbestblockhash() + best_hash = self.nodes[0].getbestblockhash() self.log.info("Mine competing 6 blocks on Node 1") self.nodes[1].generate(6) @@ -32,14 +33,14 @@ def run_test(self): connect_nodes_bi(self.nodes,0,1) sync_blocks(self.nodes[0:2]) assert(self.nodes[0].getblockcount() == 6) - badhash = self.nodes[1].getblockhash(2) + bad_hash = self.nodes[1].getblockhash(2) self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain") - self.nodes[0].invalidateblock(badhash) - newheight = self.nodes[0].getblockcount() - newhash = self.nodes[0].getbestblockhash() - if (newheight != 4 or newhash != besthash): - raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight)) + self.nodes[0].invalidateblock(bad_hash) + new_height = self.nodes[0].getblockcount() + new_hash = self.nodes[0].getbestblockhash() + if new_height != 4 or new_hash != best_hash: + raise AssertionError("Wrong tip for node0, hash %s, height %d"%(new_hash,new_height)) self.log.info("Make sure we won't reorg to a lower work chain:") connect_nodes_bi(self.nodes,1,2) diff --git a/test/functional/rpc_misc.py b/test/functional/rpc_misc.py new file mode 100755 index 0000000000..13907ac25a --- /dev/null +++ b/test/functional/rpc_misc.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# Copyright (c) 2019 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test RPC misc output.""" + +import xml.etree.ElementTree as ElementTree +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_raises_rpc_error, assert_equal, assert_greater_than, assert_greater_than_or_equal +from test_framework.authproxy import JSONRPCException + + +class RpcMiscTest(RavenTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def run_test(self): + node = self.nodes[0] + + #self.log.info("test CHECK_NONFATAL") + #assert_raises_rpc_error(-1, "Internal bug detected: 'request.params.size() != 100'", lambda: node.echo(*[0] * 100),) + + self.log.info("test getmemoryinfo") + memory = node.getmemoryinfo()['locked'] + assert_greater_than(memory['used'], 0) + assert_greater_than(memory['free'], 0) + assert_greater_than(memory['total'], 0) + # assert_greater_than_or_equal() for locked in case locking pages failed at some point + assert_greater_than_or_equal(memory['locked'], 0) + assert_greater_than(memory['chunks_used'], 0) + assert_greater_than(memory['chunks_free'], 0) + assert_equal(memory['used'] + memory['free'], memory['total']) + + self.log.info("test mallocinfo") + try: + mallocinfo = node.getmemoryinfo(mode="mallocinfo") + self.log.info('getmemoryinfo(mode="mallocinfo") call succeeded') + tree = ElementTree.fromstring(mallocinfo) + assert_equal(tree.tag, 'malloc') + except JSONRPCException: + self.log.info('getmemoryinfo(mode="mallocinfo") not available') + assert_raises_rpc_error(-8, 'mallocinfo is only available when compiled with glibc 2.10+', node.getmemoryinfo, mode="mallocinfo") + + assert_raises_rpc_error(-8, "unknown mode foobar", node.getmemoryinfo, mode="foobar") + + self.log.info("test logging") + assert_equal(node.logging()['qt'], True) + node.logging(exclude=['qt']) + assert_equal(node.logging()['qt'], False) + node.logging(include=['qt']) + assert_equal(node.logging()['qt'], True) + + +if __name__ == '__main__': + RpcMiscTest().main() diff --git a/test/functional/rpc_named_arguments.py b/test/functional/rpc_named_arguments.py index 81067cafd2..5c9f203724 100755 --- a/test/functional/rpc_named_arguments.py +++ b/test/functional/rpc_named_arguments.py @@ -3,11 +3,14 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test using named arguments for RPCs.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error) +from test_framework.util import assert_equal, assert_raises_rpc_error + +# noinspection PyTypeChecker class NamedArgumentTest(RavenTestFramework): def set_test_params(self): self.num_nodes = 1 diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 6e021baa24..6652a97583 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -3,15 +3,16 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test RPC calls related to net. + +""" +Test RPC calls related to net. Tests correspond to code in rpc/net.cpp. """ import time - from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error, connect_nodes_bi, p2p_port) +from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes_bi, p2p_port class NetTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/rpc_preciousblock.py b/test/functional/rpc_preciousblock.py index 9c9789eafc..6979b46166 100755 --- a/test/functional/rpc_preciousblock.py +++ b/test/functional/rpc_preciousblock.py @@ -3,11 +3,13 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the preciousblock RPC.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, connect_nodes_bi, sync_chain, sync_blocks) +from test_framework.util import assert_equal, connect_nodes_bi, sync_chain, sync_blocks +# noinspection PyBroadException def unidirectional_node_sync_via_rpc(node_src, node_dest): blocks_to_copy = [] blockhash = node_src.getbestblockhash() @@ -20,8 +22,8 @@ def unidirectional_node_sync_via_rpc(node_src, node_dest): blockhash = node_src.getblockheader(blockhash, True)['previousblockhash'] blocks_to_copy.reverse() for blockhash in blocks_to_copy: - blockdata = node_src.getblock(blockhash, False) - assert(node_dest.submitblock(blockdata) in (None, 'inconclusive')) + block_data = node_src.getblock(blockhash, False) + assert(node_dest.submitblock(block_data) in (None, 'inconclusive')) def node_sync_via_rpc(nodes): for node_src in nodes: diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index de63741e5c..d07645c461 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the rawtransaction RPCs. + +""" +Test the rawtransaction RPCs. Test the following RPCs: - createrawtransaction @@ -14,7 +16,7 @@ """ from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes_bi, assert_raises_rpc_error, assert_equal, Decimal) +from test_framework.util import connect_nodes_bi, assert_raises_rpc_error, assert_equal, Decimal # Create one-input, one-output, no-fee transaction: class RawTransactionsTest(RavenTestFramework): @@ -67,7 +69,7 @@ def run_test(self): bal = self.nodes[2].getbalance() # send 1.2 RVN to msig adr - txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) + self.nodes[0].sendtoaddress(mSigObj, 1.2) self.sync_all() self.nodes[0].generate(1) self.sync_all() @@ -88,7 +90,7 @@ def run_test(self): txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) decTx = self.nodes[0].gettransaction(txId) - rawTx = self.nodes[0].decoderawtransaction(decTx['hex']) + self.nodes[0].decoderawtransaction(decTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() @@ -135,7 +137,7 @@ def run_test(self): txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) decTx = self.nodes[0].gettransaction(txId) - rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex']) + self.nodes[0].decoderawtransaction(decTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() @@ -164,7 +166,7 @@ def run_test(self): rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']]) self.log.debug(rawTxComb) self.nodes[2].sendrawtransaction(rawTxComb) - rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb) + self.nodes[0].decoderawtransaction(rawTxComb) self.sync_all() self.nodes[0].generate(1) self.sync_all() @@ -200,8 +202,8 @@ def run_test(self): inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}] outputs = { self.nodes[0].getnewaddress() : 1 } rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - decrawtx= self.nodes[0].decoderawtransaction(rawtx) - assert_equal(decrawtx['vin'][0]['sequence'], 1000) + dec_raw_tx= self.nodes[0].decoderawtransaction(rawtx) + assert_equal(dec_raw_tx['vin'][0]['sequence'], 1000) # 9. invalid parameters - sequence number out of range inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}] @@ -216,8 +218,8 @@ def run_test(self): inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}] outputs = { self.nodes[0].getnewaddress() : 1 } rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - decrawtx= self.nodes[0].decoderawtransaction(rawtx) - assert_equal(decrawtx['vin'][0]['sequence'], 4294967294) + dec_raw_tx= self.nodes[0].decoderawtransaction(rawtx) + assert_equal(dec_raw_tx['vin'][0]['sequence'], 4294967294) if __name__ == '__main__': RawTransactionsTest().main() diff --git a/test/functional/rpc_setban.py b/test/functional/rpc_setban.py new file mode 100755 index 0000000000..d3634e93a1 --- /dev/null +++ b/test/functional/rpc_setban.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2019 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test the setban rpc call.""" + +import time +from test_framework.test_framework import RavenTestFramework +from test_framework.util import connect_nodes, p2p_port, assert_equal + +class SetBanTests(RavenTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + self.extra_args = [[],[]] + + def run_test(self): + # Node 0 connects to Node 1, check that the noban permission is not granted + connect_nodes(self.nodes[0], 1) + assert_equal(self.nodes[0].getconnectioncount(), 2) + assert_equal(self.nodes[1].getconnectioncount(), 2) + + # Node 0 get banned by Node 1 + self.nodes[1].setban("127.0.0.1", "add") + + # Node 0 should not be able to reconnect + self.restart_node(1, []) + self.nodes[0].addnode("127.0.0.1:" + str(p2p_port(1)), "onetry") + time.sleep(1) + assert_equal(self.nodes[0].getconnectioncount(), 0) + self.nodes[1].assert_debug_log(expected_msgs=['dropped (banned)\n'], timeout=5) + assert_equal(self.nodes[1].getconnectioncount(), 0) + + # However, node 0 should be able to reconnect if it has noban permission + self.restart_node(1, ['-whitelist=127.0.0.1']) + connect_nodes(self.nodes[0], 1) + assert_equal(self.nodes[0].getconnectioncount(), 1) + assert_equal(self.nodes[1].getconnectioncount(), 1) + + # If we remove the ban, Node 0 should be able to reconnect even without noban permission + self.nodes[1].setban("127.0.0.1", "remove") + self.restart_node(1, []) + connect_nodes(self.nodes[0], 1) + assert_equal(self.nodes[0].getconnectioncount(), 1) + assert_equal(self.nodes[1].getconnectioncount(), 1) + +if __name__ == '__main__': + SetBanTests().main() diff --git a/test/functional/rpc_signmessage.py b/test/functional/rpc_signmessage.py index 884b6ddbb0..3a213310ed 100755 --- a/test/functional/rpc_signmessage.py +++ b/test/functional/rpc_signmessage.py @@ -3,6 +3,7 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test RPC commands for signing and verifying messages.""" from test_framework.test_framework import RavenTestFramework diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py index ecb2616415..93cf7537df 100755 --- a/test/functional/rpc_signrawtransaction.py +++ b/test/functional/rpc_signrawtransaction.py @@ -3,11 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test transaction signing using the signrawtransaction RPC.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error) - +from test_framework.util import assert_equal, assert_raises_rpc_error class SignRawTransactionsTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/rpc_spentindex.py b/test/functional/rpc_spentindex.py index efef4fb5b5..bae5db6ea5 100755 --- a/test/functional/rpc_spentindex.py +++ b/test/functional/rpc_spentindex.py @@ -4,16 +4,13 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# Test RPC addressindex generation and fetching -# +"""Test RPC addressindex generation and fetching""" -import time -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes_bi, assert_equal) -from test_framework.script import (CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG) -from test_framework.mininode import (CTransaction, CTxIn, COutPoint, CTxOut) import binascii +from test_framework.test_framework import RavenTestFramework +from test_framework.util import connect_nodes_bi, assert_equal +from test_framework.script import CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG +from test_framework.mininode import CTransaction, CTxIn, COutPoint, CTxOut class SpentIndexTest(RavenTestFramework): @@ -49,16 +46,16 @@ def run_test(self): # Check that self.log.info("Testing spent index...") - feeSatoshis = 192000 + fee_satoshis = 192000 privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG" #address = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW" - addressHash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220]) - scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG]) + address_hash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220]) + script_pub_key = CScript([OP_DUP, OP_HASH160, address_hash, OP_EQUALVERIFY, OP_CHECKSIG]) unspent = self.nodes[0].listunspent() tx = CTransaction() - amount = int(unspent[0]["amount"] * 100000000 - feeSatoshis) + amount = int(unspent[0]["amount"] * 100000000 - fee_satoshis) tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))] - tx.vout = [CTxOut(amount, scriptPubKey)] + tx.vout = [CTxOut(amount, script_pub_key)] tx.rehash() signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8")) @@ -77,25 +74,25 @@ def run_test(self): self.log.info("Testing getrawtransaction method...") # Check that verbose raw transaction includes spent info - txVerbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1) - assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentTxId"], txid) - assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentIndex"], 0) - assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentHeight"], 106) + tx_verbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1) + assert_equal(tx_verbose["vout"][unspent[0]["vout"]]["spentTxId"], txid) + assert_equal(tx_verbose["vout"][unspent[0]["vout"]]["spentIndex"], 0) + assert_equal(tx_verbose["vout"][unspent[0]["vout"]]["spentHeight"], 106) # Check that verbose raw transaction includes input values - txVerbose2 = self.nodes[3].getrawtransaction(txid, 1) - assert_equal(float(txVerbose2["vin"][0]["value"]), (amount + feeSatoshis) / 100000000) - assert_equal(txVerbose2["vin"][0]["valueSat"], amount + feeSatoshis) + tx_verbose2 = self.nodes[3].getrawtransaction(txid, 1) + assert_equal(float(tx_verbose2["vin"][0]["value"]), (amount + fee_satoshis) / 100000000) + assert_equal(tx_verbose2["vin"][0]["valueSat"], amount + fee_satoshis) # Check that verbose raw transaction includes address values and input values #privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG" address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW" - addressHash2 = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220]) - scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG]) + address_hash2 = bytes([11, 47, 10, 12, 49, 191, 224, 64, 107, 12, 204, 19, 129, 253, 190, 49, 25, 70, 218, 220]) + script_pub_key2 = CScript([OP_DUP, OP_HASH160, address_hash2, OP_EQUALVERIFY, OP_CHECKSIG]) tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(int(txid, 16), 0))] - amount = int(amount - feeSatoshis) - tx2.vout = [CTxOut(amount, scriptPubKey2)] + amount = int(amount - fee_satoshis) + tx2.vout = [CTxOut(amount, script_pub_key2)] tx.rehash() self.nodes[0].importprivkey(privkey) signed_tx2 = self.nodes[0].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8")) @@ -103,20 +100,20 @@ def run_test(self): # Check the mempool index self.sync_all() - txVerbose3 = self.nodes[1].getrawtransaction(txid2, 1) - assert_equal(txVerbose3["vin"][0]["address"], address2) - assert_equal(txVerbose3["vin"][0]["valueSat"], amount + feeSatoshis) - assert_equal(float(txVerbose3["vin"][0]["value"]), (amount + feeSatoshis) / 100000000) + tx_verbose3 = self.nodes[1].getrawtransaction(txid2, 1) + assert_equal(tx_verbose3["vin"][0]["address"], address2) + assert_equal(tx_verbose3["vin"][0]["valueSat"], amount + fee_satoshis) + assert_equal(float(tx_verbose3["vin"][0]["value"]), (amount + fee_satoshis) / 100000000) # Check the database index block_hash = self.nodes[0].generate(1) self.sync_all() - txVerbose4 = self.nodes[3].getrawtransaction(txid2, 1) - assert_equal(txVerbose4["vin"][0]["address"], address2) - assert_equal(txVerbose4["vin"][0]["valueSat"], amount + feeSatoshis) - assert_equal(float(txVerbose4["vin"][0]["value"]), (amount + feeSatoshis) / 100000000) + tx_verbose4 = self.nodes[3].getrawtransaction(txid2, 1) + assert_equal(tx_verbose4["vin"][0]["address"], address2) + assert_equal(tx_verbose4["vin"][0]["valueSat"], amount + fee_satoshis) + assert_equal(float(tx_verbose4["vin"][0]["value"]), (amount + fee_satoshis) / 100000000) # Check block deltas self.log.info("Testing getblockdeltas...") @@ -130,7 +127,7 @@ def run_test(self): assert_equal(block["deltas"][1]["txid"], txid2) assert_equal(block["deltas"][1]["inputs"][0]["index"], 0) assert_equal(block["deltas"][1]["inputs"][0]["address"], "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW") - assert_equal(block["deltas"][1]["inputs"][0]["satoshis"], (amount + feeSatoshis) * -1) + assert_equal(block["deltas"][1]["inputs"][0]["satoshis"], (amount + fee_satoshis) * -1) assert_equal(block["deltas"][1]["inputs"][0]["prevtxid"], txid) assert_equal(block["deltas"][1]["inputs"][0]["prevout"], 0) assert_equal(block["deltas"][1]["outputs"][0]["index"], 0) diff --git a/test/functional/rpc_timestampindex.py b/test/functional/rpc_timestampindex.py index b8bad024e0..2cce0d3bcd 100755 --- a/test/functional/rpc_timestampindex.py +++ b/test/functional/rpc_timestampindex.py @@ -4,15 +4,11 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# Test timestampindex generation and fetching -# +"""Test timestampindex generation and fetching""" import time - from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes_bi, assert_equal) - +from test_framework.util import connect_nodes_bi, assert_equal class TimestampIndexTest(RavenTestFramework): diff --git a/test/functional/rpc_txindex.py b/test/functional/rpc_txindex.py index 409007bbae..4267fc2dff 100755 --- a/test/functional/rpc_txindex.py +++ b/test/functional/rpc_txindex.py @@ -4,16 +4,13 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# Test txindex generation and fetching -# +"""Test txindex generation and fetching""" -import time -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes_bi, assert_equal) -from test_framework.script import (CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG) -from test_framework.mininode import (CTransaction, CTxIn, CTxOut, COutPoint) import binascii +from test_framework.test_framework import RavenTestFramework +from test_framework.util import connect_nodes_bi, assert_equal +from test_framework.script import CScript, OP_DUP, OP_HASH160, OP_EQUALVERIFY, OP_CHECKSIG +from test_framework.mininode import CTransaction, CTxIn, CTxOut, COutPoint class TxIndexTest(RavenTestFramework): @@ -50,13 +47,13 @@ def run_test(self): #privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG" #address = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW" - addressHash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220]) - scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG]) + address_hash = bytes([11,47,10,12,49,191,224,64,107,12,204,19,129,253,190,49,25,70,218,220]) + script_pub_key = CScript([OP_DUP, OP_HASH160, address_hash, OP_EQUALVERIFY, OP_CHECKSIG]) unspent = self.nodes[0].listunspent() tx = CTransaction() amount = int(unspent[0]["amount"] * 10000000) tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))] - tx.vout = [CTxOut(amount, scriptPubKey)] + tx.vout = [CTxOut(amount, script_pub_key)] tx.rehash() signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8")) diff --git a/test/functional/rpc_merkle_blocks.py b/test/functional/rpc_txoutproof.py similarity index 98% rename from test/functional/rpc_merkle_blocks.py rename to test/functional/rpc_txoutproof.py index 5ee257d620..1d0171d1e0 100755 --- a/test/functional/rpc_merkle_blocks.py +++ b/test/functional/rpc_txoutproof.py @@ -3,10 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test gettxoutproof and verifytxoutproof RPCs.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes, assert_equal, assert_raises_rpc_error) +from test_framework.util import connect_nodes, assert_equal, assert_raises_rpc_error class MerkleBlockTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/rpc_users.py b/test/functional/rpc_users.py index abf45ab88e..264b7e5f95 100755 --- a/test/functional/rpc_users.py +++ b/test/functional/rpc_users.py @@ -3,14 +3,14 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test multiple RPC users.""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import str_to_b64str, assert_equal +"""Test multiple RPC users.""" import os import http.client import urllib.parse +from test_framework.test_framework import RavenTestFramework +from test_framework.util import str_to_b64str, assert_equal class HTTPBasicsTest (RavenTestFramework): def set_test_params(self): @@ -38,16 +38,16 @@ def run_test(self): url = urllib.parse.urlparse(self.nodes[0].url) #Old authpair - authpair = url.username + ':' + url.password + auth_pair = url.username + ':' + url.password #New authpair generated via share/rpcuser tool password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM=" #Second authpair with different username password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI=" - authpairnew = "rt:"+password + auth_pair_new = "rt:"+password - headers = {"Authorization": "Basic " + str_to_b64str(authpair)} + headers = {"Authorization": "Basic " + str_to_b64str(auth_pair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -57,7 +57,7 @@ def run_test(self): conn.close() #Use new authpair to confirm both work - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + headers = {"Authorization": "Basic " + str_to_b64str(auth_pair_new)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -67,8 +67,8 @@ def run_test(self): conn.close() #Wrong login name with rt's password - authpairnew = "rtwrong:"+password - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + auth_pair_new = "rtwrong:"+password + headers = {"Authorization": "Basic " + str_to_b64str(auth_pair_new)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -78,8 +78,8 @@ def run_test(self): conn.close() #Wrong password for rt - authpairnew = "rt:"+password+"wrong" - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + auth_pair_new = "rt:"+password+"wrong" + headers = {"Authorization": "Basic " + str_to_b64str(auth_pair_new)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -89,8 +89,8 @@ def run_test(self): conn.close() #Correct for rt2 - authpairnew = "rt2:"+password2 - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + auth_pair_new = "rt2:"+password2 + headers = {"Authorization": "Basic " + str_to_b64str(auth_pair_new)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -100,8 +100,8 @@ def run_test(self): conn.close() #Wrong password for rt2 - authpairnew = "rt2:"+password2+"wrong" - headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} + auth_pair_new = "rt2:"+password2+"wrong" + headers = {"Authorization": "Basic " + str_to_b64str(auth_pair_new)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -116,9 +116,9 @@ def run_test(self): url = urllib.parse.urlparse(self.nodes[1].url) # rpcuser and rpcpassword authpair - rpcuserauthpair = "rpcuser💻:rpcpassword🔑" + rpc_user_auth_pair = "rpcuser💻:rpcpassword🔑" - headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} + headers = {"Authorization": "Basic " + str_to_b64str(rpc_user_auth_pair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -128,8 +128,8 @@ def run_test(self): conn.close() #Wrong login name with rpcuser's password - rpcuserauthpair = "rpcuserwrong:rpcpassword" - headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} + rpc_user_auth_pair = "rpcuserwrong:rpcpassword" + headers = {"Authorization": "Basic " + str_to_b64str(rpc_user_auth_pair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() @@ -139,8 +139,8 @@ def run_test(self): conn.close() #Wrong password for rpcuser - rpcuserauthpair = "rpcuser:rpcpasswordwrong" - headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} + rpc_user_auth_pair = "rpcuser:rpcpasswordwrong" + headers = {"Authorization": "Basic " + str_to_b64str(rpc_user_auth_pair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py index 0893bf9bea..189f5fa030 100644 --- a/test/functional/test_framework/address.py +++ b/test/functional/test_framework/address.py @@ -3,39 +3,42 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Encode and decode BASE58, P2PKH and P2SH addresses.""" -from .script import (hash256, hash160, sha256, CScript, OP_0) -from .util import (bytes_to_hex_str, hex_str_to_bytes) +from .script import hash256, hash160, sha256, CScript, OP_0 +from .util import bytes_to_hex_str, hex_str_to_bytes + +ADDRESS_BCRT1_UNSPENDABLE = 'n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP' chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' def byte_to_base58(b, version): result = '' - str = bytes_to_hex_str(b) - str = bytes_to_hex_str(chr(version).encode('latin-1')) + str - checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str))) - str += checksum[:8] - value = int('0x'+str,0) + hex_str = bytes_to_hex_str(b) + hex_str = bytes_to_hex_str(chr(version).encode('latin-1')) + hex_str + checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(hex_str))) + hex_str += checksum[:8] + value = int('0x' + hex_str, 0) while value > 0: result = chars[value % 58] + result value //= 58 - while (str[:2] == '00'): + while hex_str[:2] == '00': result = chars[0] + result - str = str[2:] + hex_str = hex_str[2:] return result # TODO: def base58_decode -def keyhash_to_p2pkh(hash, main = False): - assert (len(hash) == 20) +def keyhash_to_p2pkh(hash_input, main = False): + assert (len(hash_input) == 20) version = 0 if main else 111 - return byte_to_base58(hash, version) + return byte_to_base58(hash_input, version) -def scripthash_to_p2sh(hash, main = False): - assert (len(hash) == 20) +def scripthash_to_p2sh(hash_in, main = False): + assert (len(hash_in) == 20) version = 5 if main else 196 - return byte_to_base58(hash, version) + return byte_to_base58(hash_in, version) def key_to_p2pkh(key, main = False): key = check_key(key) @@ -45,16 +48,26 @@ def script_to_p2sh(script, main = False): script = check_script(script) return scripthash_to_p2sh(hash160(script), main) +def key_to_p2sh_p2wpkh(key, main = False): + key = check_key(key) + p2shscript = CScript([OP_0, hash160(key)]) + return script_to_p2sh(p2shscript, main) + +def script_to_p2sh_p2wsh(script, main = False): + script = check_script(script) + p2shscript = CScript([OP_0, sha256(script)]) + return script_to_p2sh(p2shscript, main) + def check_key(key): - if (type(key) is str): + if type(key) is str: key = hex_str_to_bytes(key) # Assuming this is hex string - if (type(key) is bytes and (len(key) == 33 or len(key) == 65)): + if type(key) is bytes and (len(key) == 33 or len(key) == 65): return key - assert(False) + assert False def check_script(script): - if (type(script) is str): + if type(script) is str: script = hex_str_to_bytes(script) # Assuming this is hex string - if (type(script) is bytes or type(script) is CScript): + if type(script) is bytes or type(script) is CScript: return script - assert(False) + assert False diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index 0b635a040f..1af996d12b 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -19,7 +19,9 @@ # You should have received a copy of the GNU Lesser General Public License # along with this software; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -"""HTTP proxy for opening RPC connection to ravend. + +""" +HTTP proxy for opening RPC connection to ravend. AuthServiceProxy has the following improvements over python-jsonrpc's ServiceProxy class: @@ -58,13 +60,13 @@ def __init__(self, rpc_error): self.error = rpc_error -def EncodeDecimal(o): +def encode_decimal(o): if isinstance(o, decimal.Decimal): return str(o) raise TypeError(repr(o) + " is not JSON serializable") -class AuthServiceProxy(): +class AuthServiceProxy: __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps @@ -76,8 +78,8 @@ def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connect port = 80 if self.__url.port is None else self.__url.port user = None if self.__url.username is None else self.__url.username.encode('utf8') passwd = None if self.__url.password is None else self.__url.password.encode('utf8') - authpair = user + b':' + passwd - self.__auth_header = b'Basic ' + base64.b64encode(authpair) + auth_pair = user + b':' + passwd + self.__auth_header = b'Basic ' + base64.b64encode(auth_pair) if connection: # Callables re-use the connection of the original proxy @@ -95,45 +97,47 @@ def __getattr__(self, name): name = "%s.%s" % (self._service_name, name) return AuthServiceProxy(self.__service_url, name, connection=self.__conn) - def _request(self, method, path, postdata): - ''' + def _request(self, method, path, post_data): + """ Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. - ''' + """ headers = {'Host': self.__url.hostname, 'User-Agent': USER_AGENT, 'Authorization': self.__auth_header, 'Content-type': 'application/json'} try: - self.__conn.request(method, path, postdata, headers) + self.__conn.request(method, path, post_data, headers) return self._get_response() except http.client.BadStatusLine as e: if e.line == "''": # if connection was closed, try again self.__conn.close() - self.__conn.request(method, path, postdata, headers) + self.__conn.request(method, path, post_data, headers) + print("~~~~~~~~~~~~~~~~~ Bad Status Exception ~~~~~~~~~~~~~~~~~~~~~~~~~~") + print(e) return self._get_response() else: raise except http.client.UnknownProtocol as e: - if e.line == "''": # if connection was closed, try again - self.__conn.close() - self.__conn.request(method, path, postdata, headers) - print("~~~~~~~~~~~~~~~~~ Protocol Exception ~~~~~~~~~~~~~~~~~~~~~~~~~~") - return self._get_response() - else: - raise - except (BrokenPipeError, ConnectionResetError): + self.__conn.close() + self.__conn.request(method, path, post_data, headers) + print("~~~~~~~~~~~~~~~~~ Protocol Exception ~~~~~~~~~~~~~~~~~~~~~~~~~~") + print(e) + return self._get_response() + except (BrokenPipeError, ConnectionResetError) as e: # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset # ConnectionResetError happens on FreeBSD with Python 3.4 self.__conn.close() - self.__conn.request(method, path, postdata, headers) + self.__conn.request(method, path, post_data, headers) + print("~~~~~~~~~~~~~~~~~ Broken Pipe or Connection Reset Exception ~~~~~~~~~~~~~~~~~~~~~~~~~~") + print(e) return self._get_response() def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name, - json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) + json.dumps(args, default=encode_decimal, ensure_ascii=self.ensure_ascii))) if args and argsn: raise ValueError('Cannot handle both named and positional arguments') return {'version': '1.1', @@ -142,12 +146,12 @@ def get_request(self, *args, **argsn): 'id': AuthServiceProxy.__id_count} def __call__(self, *args, **argsn): - postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) - response = self._request('POST', self.__url.path, postdata.encode('utf-8')) + post_data = json.dumps(self.get_request(*args, **argsn), default=encode_decimal, ensure_ascii=self.ensure_ascii) + response = self._request('POST', self.__url.path, post_data.encode('utf-8')) if response['error'] is not None: log.debug("--------") log.debug("Call failed. postdata:") - log.debug(postdata) + log.debug(post_data) log.debug("-------") raise JSONRPCException(response['error']) elif 'result' not in response: @@ -157,7 +161,7 @@ def __call__(self, *args, **argsn): return response['result'] def batch(self, rpc_call_list): - postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) + postdata = json.dumps(list(rpc_call_list), default=encode_decimal, ensure_ascii=self.ensure_ascii) log.debug("--> " + postdata) return self._request('POST', self.__url.path, postdata.encode('utf-8')) @@ -187,13 +191,13 @@ def _get_response(self): raise JSONRPCException({ 'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}) - responsedata = http_response.read().decode('utf8') - response = json.loads(responsedata, parse_float=decimal.Decimal) + response_data = http_response.read().decode('utf8') + response = json.loads(response_data, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: - log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) + log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=encode_decimal, ensure_ascii=self.ensure_ascii))) else: - log.debug("<-- [%.6f] %s" % (elapsed, responsedata)) + log.debug("<-- [%.6f] %s" % (elapsed, response_data)) return response def __truediv__(self, relative_uri): diff --git a/test/functional/test_framework/bignum.py b/test/functional/test_framework/bignum.py index 221a16dd4e..f3ce681e7a 100644 --- a/test/functional/test_framework/bignum.py +++ b/test/functional/test_framework/bignum.py @@ -2,14 +2,15 @@ # # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Big number routines. + +""" +Big number routines. This file is copied from python-ravenlib. """ import struct - # generic big endian MPI format def bn_bytes(v, have_ext=False): diff --git a/test/functional/test_framework/blockstore.py b/test/functional/test_framework/blockstore.py index 5c9ba1f548..3eaa1feab1 100644 --- a/test/functional/test_framework/blockstore.py +++ b/test/functional/test_framework/blockstore.py @@ -3,15 +3,16 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """BlockStore and TxStore helper classes.""" -from .mininode import (logging, CBlock, msg_headers, CBlockHeader, msg_generic, CBlockLocator) +from .mininode import logging, CBlock, MsgHeaders, CBlockHeader, MsgGeneric, CBlockLocator from io import BytesIO import dbm.dumb as dbmd logger = logging.getLogger("TestFramework.blockstore") -class BlockStore(): +class BlockStore: """BlockStore helper class. BlockStore keeps a map of blocks and implements helper functions for @@ -32,7 +33,6 @@ def erase(self, blockhash): # lookup an entry and return the item as raw bytes def get(self, blockhash): - value = None try: value = self.blockDB[repr(blockhash)] except KeyError: @@ -47,7 +47,7 @@ def get_block(self, blockhash): f = BytesIO(serialized_block) ret = CBlock() ret.deserialize(f) - ret.calc_sha256() + ret.calc_x16r() return ret def get_header(self, blockhash): @@ -66,26 +66,26 @@ def headers_for(self, locator, hash_stop, current_tip=None): if current_block_header is None: return None - response = msg_headers() - headersList = [ current_block_header ] - maxheaders = 2000 - while (headersList[0].sha256 not in locator.vHave): - prevBlockHash = headersList[0].hashPrevBlock - prevBlockHeader = self.get_header(prevBlockHash) - if prevBlockHeader is not None: - headersList.insert(0, prevBlockHeader) + response = MsgHeaders() + headers_list = [ current_block_header ] + max_headers = 2000 + while headers_list[0].sha256 not in locator.vHave: + prev_block_hash = headers_list[0].hashPrevBlock + prev_block_header = self.get_header(prev_block_hash) + if prev_block_header is not None: + headers_list.insert(0, prev_block_header) else: break - headersList = headersList[:maxheaders] # truncate if we have too many - hashList = [x.sha256 for x in headersList] - index = len(headersList) - if (hash_stop in hashList): - index = hashList.index(hash_stop)+1 - response.headers = headersList[:index] + headers_list = headers_list[:max_headers] # truncate if we have too many + hash_list = [x.sha256 for x in headers_list] + index = len(headers_list) + if hash_stop in hash_list: + index = hash_list.index(hash_stop)+1 + response.headers = headers_list[:index] return response def add_block(self, block): - block.calc_sha256() + block.calc_x16r() try: self.blockDB[repr(block.sha256)] = bytes(block.serialize()) except TypeError: @@ -101,11 +101,11 @@ def add_header(self, header): def get_blocks(self, inv): responses = [] for i in inv: - if (i.type == 2): # MSG_BLOCK + if i.type == 2: # MSG_BLOCK data = self.get(i.hash) if data is not None: # Use msg_generic to avoid re-serialization - responses.append(msg_generic(b"block", data)) + responses.append(MsgGeneric(b"block", data)) return responses def get_locator(self, current_tip=None): @@ -114,12 +114,12 @@ def get_locator(self, current_tip=None): r = [] counter = 0 step = 1 - lastBlock = self.get_block(current_tip) - while lastBlock is not None: - r.append(lastBlock.hashPrevBlock) + last_block = self.get_block(current_tip) + while last_block is not None: + r.append(last_block.hashPrevBlock) for _ in range(step): - lastBlock = self.get_block(lastBlock.hashPrevBlock) - if lastBlock is None: + last_block = self.get_block(last_block.hashPrevBlock) + if last_block is None: break counter += 1 if counter > 10: @@ -128,7 +128,7 @@ def get_locator(self, current_tip=None): locator.vHave = r return locator -class TxStore(): +class TxStore: def __init__(self, datadir): self.txDB = dbmd.open(datadir + "/transactions", 'c') @@ -137,7 +137,6 @@ def close(self): # lookup an entry and return the item as raw bytes def get(self, txhash): - value = None try: value = self.txDB[repr(txhash)] except KeyError: @@ -145,7 +144,7 @@ def get(self, txhash): return value def add_transaction(self, tx): - tx.calc_sha256() + tx.calc_x16r() try: self.txDB[repr(tx.sha256)] = bytes(tx.serialize()) except TypeError: @@ -154,8 +153,8 @@ def add_transaction(self, tx): def get_transactions(self, inv): responses = [] for i in inv: - if (i.type == 1): # MSG_TX + if i.type == 1: # MSG_TX tx = self.get(i.hash) if tx is not None: - responses.append(msg_generic(b"tx", tx)) + responses.append(MsgGeneric(b"tx", tx)) return responses diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index b427f3504c..70b72b72ce 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -3,36 +3,31 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Utilities for manipulating blocks and transactions.""" -from .mininode import (CBlock, - uint256_from_str, - ser_uint256, - hash256, - CTxInWitness, - CTxOut, - CTxIn, - CTransaction, - COutPoint, - ser_string, - COIN) -from .script import (CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN) +from .mininode import (CBlock, uint256_from_str, ser_uint256, hash256, CTxInWitness, CTxOut, CTxIn, + CTransaction, COutPoint, ser_string, COIN) +from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN # Create a block (with regtest difficulty) -def create_block(hashprev, coinbase, nTime=None): +def create_block(hash_prev, coinbase, n_time=None): block = CBlock() - if nTime is None: + if n_time is None: import time block.nTime = int(time.time()+600) else: - block.nTime = nTime - block.hashPrevBlock = hashprev + block.nTime = n_time + block.hashPrevBlock = hash_prev block.nBits = 0x207fffff # Will break after a difficulty adjustment... block.vtx.append(coinbase) block.hashMerkleRoot = block.calc_merkle_root() - block.calc_sha256() + block.calc_x16r() return block +# Genesis block time (regtest) +REGTEST_GENISIS_BLOCK_TIME = 1537466400 + # From BIP141 WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed" @@ -66,10 +61,10 @@ def serialize_script_num(value): if value == 0: return r neg = value < 0 - absvalue = -value if neg else value - while (absvalue): - r.append(int(absvalue & 0xff)) - absvalue >>= 8 + abs_value = -value if neg else value + while abs_value: + r.append(int(abs_value & 0xff)) + abs_value >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: @@ -83,39 +78,39 @@ def create_coinbase(height, pubkey = None): coinbase = CTransaction() coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), ser_string(serialize_script_num(height)), 0xffffffff)) - coinbaseoutput = CTxOut() - coinbaseoutput.nValue = 5000 * COIN + coin_base_output = CTxOut() + coin_base_output.nValue = 5000 * COIN halvings = int(height/150) # regtest - coinbaseoutput.nValue >>= halvings - if (pubkey != None): - coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) + coin_base_output.nValue >>= halvings + if pubkey is not None: + coin_base_output.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: - coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) - coinbase.vout = [ coinbaseoutput ] - coinbase.calc_sha256() + coin_base_output.scriptPubKey = CScript([OP_TRUE]) + coinbase.vout = [ coin_base_output ] + coinbase.calc_x16r() return coinbase # Create a transaction. # If the scriptPubKey is not specified, make it anyone-can-spend. -def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()): +def create_transaction(prev_tx, n, sig, value, script_pub_key=CScript()): tx = CTransaction() - assert(n < len(prevtx.vout)) - tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) - tx.vout.append(CTxOut(value, scriptPubKey)) - tx.calc_sha256() + assert(n < len(prev_tx.vout)) + tx.vin.append(CTxIn(COutPoint(prev_tx.sha256, n), sig, 0xffffffff)) + tx.vout.append(CTxOut(value, script_pub_key)) + tx.calc_x16r() return tx -def get_legacy_sigopcount_block(block, fAccurate=True): +def get_legacy_sigopcount_block(block, f_accurate=True): count = 0 for tx in block.vtx: - count += get_legacy_sigopcount_tx(tx, fAccurate) + count += get_legacy_sigopcount_tx(tx, f_accurate) return count -def get_legacy_sigopcount_tx(tx, fAccurate=True): +def get_legacy_sigopcount_tx(tx, f_accurate=True): count = 0 for i in tx.vout: - count += i.scriptPubKey.GetSigOpCount(fAccurate) + count += i.scriptPubKey.get_sig_op_count(f_accurate) for j in tx.vin: # scriptSig might be of type bytes, so convert to CScript for the moment - count += CScript(j.scriptSig).GetSigOpCount(fAccurate) + count += CScript(j.scriptSig).get_sig_op_count(f_accurate) return count diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py index 36be9b88a8..50e9b07f09 100755 --- a/test/functional/test_framework/comptool.py +++ b/test/functional/test_framework/comptool.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Compare two or more ravends to each other. + +""" +Compare two or more ravends to each other. To use, create a class that implements get_tests(), and pass it in as the test generator to TestManager. get_tests() should be a python @@ -18,30 +20,18 @@ on_getdata: provide blocks via BlockStore """ -from .mininode import (NodeConn, - CBlock, - msg_inv, - CInv, - msg_headers, - msg_mempool, - mininode_lock, - NodeConnCB, - msg_getheaders, - msg_ping, - msg_block, - CBlockHeader, - MAX_INV_SZ, - CTransaction) -from .blockstore import (BlockStore, TxStore) -from .util import (p2p_port, wait_until) +from .mininode import (NodeConn, CBlock, MsgInv, CInv, MsgHeaders, MsgMempool, mininode_lock, NodeConnCB, + MsgGetHeaders, MsgPing, MsgBlock, CBlockHeader, MAX_INV_SZ, CTransaction) +from .blockstore import BlockStore, TxStore +from .util import p2p_port, wait_until import logging logger=logging.getLogger("TestFramework.comptool") -global mininode_lock +#global mininode_lock -class RejectResult(): +class RejectResult: """Outcome that expects rejection of a transaction or block.""" def __init__(self, code, reason=b''): self.code = code @@ -81,7 +71,7 @@ def add_connection(self, conn): def on_headers(self, conn, message): if len(message.headers) > 0: best_header = message.headers[-1] - best_header.calc_sha256() + best_header.calc_x16r() self.bestblockhash = best_header.sha256 def on_getheaders(self, conn, message): @@ -116,30 +106,30 @@ def on_reject(self, conn, message): def send_inv(self, obj): mtype = 2 if isinstance(obj, CBlock) else 1 - self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)])) + self.conn.send_message(MsgInv([CInv(mtype, obj.sha256)])) def send_getheaders(self): # We ask for headers from their last tip. - m = msg_getheaders() + m = MsgGetHeaders() m.locator = self.block_store.get_locator(self.bestblockhash) self.conn.send_message(m) def send_header(self, header): - m = msg_headers() + m = MsgHeaders() m.headers.append(header) self.conn.send_message(m) # This assumes BIP31 def send_ping(self, nonce): self.pingMap[nonce] = True - self.conn.send_message(msg_ping(nonce)) + self.conn.send_message(MsgPing(nonce)) def received_ping_response(self, nonce): return nonce not in self.pingMap def send_mempool(self): self.lastInv = [] - self.conn.send_message(msg_mempool()) + self.conn.send_message(MsgMempool()) # TestInstance: # @@ -170,13 +160,13 @@ def send_mempool(self): # across all connections. (If outcome of final tx is specified as true # or false, then only the last tx is tested against outcome.) -class TestInstance(): +class TestInstance: def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False): self.blocks_and_transactions = objects if objects else [] self.sync_every_block = sync_every_block self.sync_every_tx = sync_every_tx -class TestManager(): +class TestManager: def __init__(self, testgen, datadir): self.test_generator = testgen @@ -203,7 +193,7 @@ def clear_all_connections(self): def wait_for_disconnections(self): def disconnected(): return all(node.closed for node in self.test_nodes) - wait_until(disconnected, timeout=10, lock=mininode_lock) + wait_until(disconnected, timeout=10, lock=mininode_lock, err_msg="wait_for_disconnections") def wait_for_verack(self): return all(node.wait_for_verack() for node in self.test_nodes) @@ -211,7 +201,7 @@ def wait_for_verack(self): def wait_for_pings(self, counter): def received_pongs(): return all(node.received_ping_response(counter) for node in self.test_nodes) - wait_until(received_pongs, lock=mininode_lock) + wait_until(received_pongs, lock=mininode_lock, err_msg="wait_for_pings") # sync_blocks: Wait for all connections to request the blockhash given # then send get_headers to find out the tip of each node, and synchronize @@ -224,7 +214,7 @@ def blocks_requested(): ) # --> error if not requested - wait_until(blocks_requested, attempts=20*num_blocks, lock=mininode_lock) + wait_until(blocks_requested, attempts=20*num_blocks, lock=mininode_lock, err_msg="sync_blocks") # Send getheaders message [ c.cb.send_getheaders() for c in self.connections ] @@ -244,7 +234,7 @@ def transaction_requested(): ) # --> error if not requested - wait_until(transaction_requested, attempts=20*num_events, lock=mininode_lock) + wait_until(transaction_requested, attempts=20*num_events, lock=mininode_lock, err_msg="sync_transaction") # Get the mempool [ c.cb.send_mempool() for c in self.connections ] @@ -270,12 +260,12 @@ def check_results(self, blockhash, outcome): if c.cb.bestblockhash == blockhash: return False if blockhash not in c.cb.block_reject_map: - logger.error('Block not in reject map: %064x' % (blockhash)) + logger.error('Block not in reject map: %064x' % blockhash) return False if not outcome.match(c.cb.block_reject_map[blockhash]): logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)) return False - elif ((c.cb.bestblockhash == blockhash) != outcome): + elif (c.cb.bestblockhash == blockhash) != outcome: return False return True @@ -296,12 +286,12 @@ def check_mempool(self, txhash, outcome): if txhash in c.cb.lastInv: return False if txhash not in c.cb.tx_reject_map: - logger.error('Tx not in reject map: %064x' % (txhash)) + logger.error('Tx not in reject map: %064x' % txhash) return False if not outcome.match(c.cb.tx_reject_map[txhash]): logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)) return False - elif ((txhash in c.cb.lastInv) != outcome): + elif (txhash in c.cb.lastInv) != outcome: return False return True @@ -316,7 +306,7 @@ def run(self): # if we're not syncing on every block or every tx. [ block, block_outcome, tip ] = [ None, None, None ] [ tx, tx_outcome ] = [ None, None ] - invqueue = [] + inv_queue = [] for test_obj in test_instance.blocks_and_transactions: b_or_t = test_obj[0] @@ -325,7 +315,7 @@ def run(self): if isinstance(b_or_t, CBlock): # Block test runner block = b_or_t block_outcome = outcome - tip = block.sha256 + tip = block.x16r # each test_obj can have an optional third argument # to specify the tip we should compare with # (default is to use the block being tested) @@ -339,35 +329,35 @@ def run(self): # node wouldn't send another getdata request while # the earlier one is outstanding. first_block_with_hash = True - if self.block_store.get(block.sha256) is not None: + if self.block_store.get(block.x16r) is not None: first_block_with_hash = False with mininode_lock: self.block_store.add_block(block) for c in self.connections: - if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True: + if first_block_with_hash and block.x16r in c.cb.block_request_map and c.cb.block_request_map[block.x16r] == True: # There was a previous request for this block hash # Most likely, we delivered a header for this block # but never had the block to respond to the getdata - c.send_message(msg_block(block)) + c.send_message(MsgBlock(block)) else: - c.cb.block_request_map[block.sha256] = False + c.cb.block_request_map[block.x16r] = False # Either send inv's to each node and sync, or add - # to invqueue for later inv'ing. - if (test_instance.sync_every_block): + # to inv_queue for later inv'ing. + if test_instance.sync_every_block: # if we expect success, send inv and sync every block # if we expect failure, just push the block and see what happens. - if outcome == True: + if outcome: [ c.cb.send_inv(block) for c in self.connections ] - self.sync_blocks(block.sha256, 1) + self.sync_blocks(block.x16r, 1) else: - [ c.send_message(msg_block(block)) for c in self.connections ] + [c.send_message(MsgBlock(block)) for c in self.connections] [ c.cb.send_ping(self.ping_counter) for c in self.connections ] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 - if (not self.check_results(tip, outcome)): + if not self.check_results(tip, outcome): raise AssertionError("Test failed at test %d" % test_number) else: - invqueue.append(CInv(2, block.sha256)) + inv_queue.append(CInv(2, block.x16r)) elif isinstance(b_or_t, CBlockHeader): block_header = b_or_t self.block_store.add_header(block_header) @@ -381,34 +371,33 @@ def run(self): with mininode_lock: self.tx_store.add_transaction(tx) for c in self.connections: - c.cb.tx_request_map[tx.sha256] = False + c.cb.tx_request_map[tx.x16r] = False # Again, either inv to all nodes or save for later - if (test_instance.sync_every_tx): + if test_instance.sync_every_tx: [ c.cb.send_inv(tx) for c in self.connections ] - self.sync_transaction(tx.sha256, 1) - if (not self.check_mempool(tx.sha256, outcome)): + self.sync_transaction(tx.x16r, 1) + if not self.check_mempool(tx.x16r, outcome): raise AssertionError("Test failed at test %d" % test_number) else: - invqueue.append(CInv(1, tx.sha256)) + inv_queue.append(CInv(1, tx.x16r)) # Ensure we're not overflowing the inv queue - if len(invqueue) == MAX_INV_SZ: - [ c.send_message(msg_inv(invqueue)) for c in self.connections ] - invqueue = [] + if len(inv_queue) == MAX_INV_SZ: + [c.send_message(MsgInv(inv_queue)) for c in self.connections] + inv_queue = [] # Do final sync if we weren't syncing on every block or every tx. - if (not test_instance.sync_every_block and block is not None): - if len(invqueue) > 0: - [ c.send_message(msg_inv(invqueue)) for c in self.connections ] - invqueue = [] - self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions)) - if (not self.check_results(tip, block_outcome)): + if not test_instance.sync_every_block and block is not None: + if len(inv_queue) > 0: + [c.send_message(MsgInv(inv_queue)) for c in self.connections] + inv_queue = [] + self.sync_blocks(block.x16r, len(test_instance.blocks_and_transactions)) + if not self.check_results(tip, block_outcome): raise AssertionError("Block test failed at test %d" % test_number) - if (not test_instance.sync_every_tx and tx is not None): - if len(invqueue) > 0: - [ c.send_message(msg_inv(invqueue)) for c in self.connections ] - invqueue = [] - self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions)) - if (not self.check_mempool(tx.sha256, tx_outcome)): + if not test_instance.sync_every_tx and tx is not None: + if len(inv_queue) > 0: + [c.send_message(MsgInv(inv_queue)) for c in self.connections] + self.sync_transaction(tx.x16r, len(test_instance.blocks_and_transactions)) + if not self.check_mempool(tx.x16r, tx_outcome): raise AssertionError("Mempool test failed at test %d" % test_number) logger.info("Test %d: PASS" % test_number) diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py index 9b56c71efa..ba8d1731d6 100644 --- a/test/functional/test_framework/coverage.py +++ b/test/functional/test_framework/coverage.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Utilities for doing coverage analysis on the RPC interface. + +""" +Utilities for doing coverage analysis on the RPC interface. Provides a way to track which RPC commands are exercised during testing. @@ -11,11 +13,10 @@ import os - REFERENCE_FILENAME = 'rpc_interface.txt' -class AuthServiceProxyWrapper(): +class AuthServiceProxyWrapper: """ An object that wraps AuthServiceProxy to record specific RPC calls. @@ -49,6 +50,7 @@ def __call__(self, *args, **kwargs): self._log_call() return return_val + # noinspection PyProtectedMember def _log_call(self): rpc_method = self.auth_service_proxy_instance._service_name @@ -64,6 +66,7 @@ def get_request(self, *args, **kwargs): self._log_call() return self.auth_service_proxy_instance.get_request(*args, **kwargs) + def get_filename(dirname, n_node): """ Get a filename unique to the test process ID and node. diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index 62aa2cfca3..a6f2667bca 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -1,5 +1,7 @@ # Copyright (c) 2011 Sam Rushing -"""ECC secp256k1 OpenSSL wrapper. + +""" +ECC secp256k1 OpenSSL wrapper. WARNING: This module does not mlock() secrets; your private keys may end up on disk in swap! Use with caution! @@ -12,7 +14,7 @@ import hashlib import sys -ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32') +ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('libeay32')) ssl.BN_new.restype = ctypes.c_void_p ssl.BN_new.argtypes = [] @@ -75,7 +77,7 @@ SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2 # Thx to Sam Devlin for the ctypes magic 64-bit fix. -def _check_result(val, func, args): +def _check_result(val): if val == 0: raise ValueError else: @@ -84,7 +86,7 @@ def _check_result(val, func, args): ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p ssl.EC_KEY_new_by_curve_name.errcheck = _check_result -class CECKey(): +class CECKey: """Wrapper around OpenSSL's EC_KEY""" POINT_CONVERSION_COMPRESSED = 2 @@ -146,17 +148,17 @@ def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()): r = self.get_raw_ecdh_key(other_pubkey) return kdf(r) - def sign(self, hash, low_s = True): + def sign(self, hash_in, low_s = True): # FIXME: need unit tests for below cases - if not isinstance(hash, bytes): - raise TypeError('Hash must be bytes instance; got %r' % hash.__class__) - if len(hash) != 32: + if not isinstance(hash_in, bytes): + raise TypeError('Hash must be bytes instance; got %r' % hash_in.__class__) + if len(hash_in) != 32: raise ValueError('Hash must be exactly 32 bytes long') sig_size0 = ctypes.c_uint32() sig_size0.value = ssl.ECDSA_size(self.k) mb_sig = ctypes.create_string_buffer(sig_size0.value) - result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k) + result = ssl.ECDSA_sign(0, hash_in, len(hash_in), mb_sig, ctypes.byref(sig_size0), self.k) assert 1 == result assert mb_sig.raw[0] == 0x30 assert mb_sig.raw[1] == sig_size0.value - 2 @@ -170,17 +172,17 @@ def sign(self, hash, low_s = True): return mb_sig.raw[:sig_size0.value] else: low_s_value = SECP256K1_ORDER - s_value - low_s_bytes = (low_s_value).to_bytes(33, byteorder='big') + low_s_bytes = low_s_value.to_bytes(33, byteorder='big') while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80: low_s_bytes = low_s_bytes[1:] new_s_size = len(low_s_bytes) new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big') - new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big') + new_s_size_byte = new_s_size.to_bytes(1, byteorder='big') return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes - def verify(self, hash, sig): + def verify(self, hash_in, sig): """Verify a DER signature""" - return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1 + return ssl.ECDSA_verify(0, hash_in, len(hash_in), sig, len(sig), self.k) == 1 def set_compressed(self, compressed): if compressed: @@ -216,8 +218,8 @@ def is_valid(self): def is_compressed(self): return len(self) == 33 - def verify(self, hash, sig): - return self._cec_key.verify(hash, sig) + def verify(self, hash_in, sig): + return self._cec_key.verify(hash_in, sig) def __str__(self): return repr(self) diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index c462ddf8e1..76373c5b66 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -5,6 +5,7 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Raven P2P network half-a-node. This python code was modified from ArtForz' public domain half-a-node, as @@ -33,7 +34,7 @@ import struct import sys import time -from threading import (RLock, Thread) +from threading import RLock, Thread from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until, x16_hash_block @@ -46,7 +47,7 @@ MAX_INV_SZ = 50000 MAX_BLOCK_BASE_SIZE = 1000000 -COIN = 100000000 # 1 rvn in corbies +COIN = 100000000 # 1 rvn in Corbies NODE_NETWORK = (1 << 0) # NODE_GETUTXO = (1 << 1) @@ -85,7 +86,6 @@ def hash256(s): def ser_compact_size(l): - r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: @@ -225,7 +225,7 @@ def from_hex(obj, hex_string): # Convert a binary-serializable object to hex (eg for submission via RPC) -def ToHex(obj): +def to_hex(obj): return bytes_to_hex_str(obj.serialize()) @@ -259,7 +259,7 @@ def __repr__(self): MSG_WITNESS_FLAG = 1 << 30 -class CInv(): +class CInv: typemap = { 0: "Error", 1: "TX", @@ -288,7 +288,7 @@ def __repr__(self): % (self.typemap[self.type], self.hash) -class CBlockLocator(): +class CBlockLocator: def __init__(self): self.nVersion = MY_VERSION self.vHave = [] @@ -308,9 +308,9 @@ def __repr__(self): % (self.nVersion, repr(self.vHave)) -class COutPoint(): - def __init__(self, hash=0, n=0): - self.hash = hash +class COutPoint: + def __init__(self, hash_in=0, n=0): + self.hash = hash_in self.n = n def deserialize(self, f): @@ -327,14 +327,14 @@ def __repr__(self): return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n) -class CTxIn(): - def __init__(self, outpoint=None, scriptSig=b"", nSequence=0): +class CTxIn: + def __init__(self, outpoint=None, script_sig=b"", n_sequence=0): if outpoint is None: self.prevout = COutPoint() else: self.prevout = outpoint - self.scriptSig = scriptSig - self.nSequence = nSequence + self.scriptSig = script_sig + self.nSequence = n_sequence def deserialize(self, f): self.prevout = COutPoint() @@ -355,10 +355,10 @@ def __repr__(self): self.nSequence) -class CTxOut(): - def __init__(self, nValue=0, scriptPubKey=b""): - self.nValue = nValue - self.scriptPubKey = scriptPubKey +class CTxOut: + def __init__(self, n_value=0, script_pub_key=b""): + self.nValue = n_value + self.scriptPubKey = script_pub_key def deserialize(self, f): self.nValue = struct.unpack(" 21000000 * COIN: return False @@ -618,7 +618,7 @@ def __repr__(self): % (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime) -class CBlockHeader(): +class CBlockHeader: def __init__(self, header=None): if header is None: self.set_null() @@ -629,9 +629,9 @@ def __init__(self, header=None): self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce - self.sha256 = header.sha256 + self.x16r = header.sha256 self.hash = header.hash - self.calc_sha256() + self.calc_x16r() def set_null(self): self.nVersion = 1 @@ -640,7 +640,7 @@ def set_null(self): self.nTime = 0 self.nBits = 0 self.nNonce = 0 - self.sha256 = None + self.x16r = None self.hash = None def deserialize(self, f): @@ -650,7 +650,7 @@ def deserialize(self, f): self.nTime = struct.unpack(" target: + if self.x16r > target: return False for tx in self.vtx: if not tx.is_valid(): @@ -749,7 +748,7 @@ def is_valid(self): def solve(self): self.rehash() target = uint256_from_compact(self.nBits) - while self.sha256 > target: + while self.x16r > target: self.nNonce += 1 self.rehash() @@ -759,7 +758,7 @@ def __repr__(self): time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) -class CUnsignedAlert(): +class CUnsignedAlert: def __init__(self): self.nVersion = 1 self.nRelayUntil = 0 @@ -814,7 +813,7 @@ def __repr__(self): self.strComment, self.strStatusBar, self.strReserved) -class CAlert(): +class CAlert: def __init__(self): self.vchMsg = b"" self.vchSig = b"" @@ -834,7 +833,7 @@ def __repr__(self): % (len(self.vchMsg), len(self.vchSig)) -class PrefilledTransaction(): +class PrefilledTransaction: def __init__(self, index=0, tx=None): self.index = index self.tx = tx @@ -861,7 +860,7 @@ def __repr__(self): # This is what we send on the wire, in a cmpctblock message. -class P2PHeaderAndShortIDs(): +class P2PHeaderAndShortIDs: def __init__(self): self.header = CBlockHeader() self.nonce = 0 @@ -905,7 +904,7 @@ def __repr__(self): # P2P version of the above that will use witness serialization (for compact # block version 2) class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs): - def serialize(self): + def serialize(self, with_witness=True): return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True) @@ -918,7 +917,7 @@ def calculate_shortid(k0, k1, tx_hash): # This version gets rid of the array lengths, and reinterprets the differential # encoding into indices that can be used for lookup. -class HeaderAndShortIDs(): +class HeaderAndShortIDs: def __init__(self, p2pheaders_and_shortids=None): self.header = CBlockHeader() self.nonce = 0 @@ -926,7 +925,7 @@ def __init__(self, p2pheaders_and_shortids=None): self.prefilled_txn = [] self.use_witness = False - if p2pheaders_and_shortids != None: + if p2pheaders_and_shortids is not None: self.header = p2pheaders_and_shortids.header self.nonce = p2pheaders_and_shortids.nonce self.shortids = p2pheaders_and_shortids.shortids @@ -961,7 +960,9 @@ def get_siphash_keys(self): return [key0, key1] # Version 2 compact blocks use wtxid in shortids (rather than txid) - def initialize_from_block(self, block, nonce=0, prefill_list=[0], use_witness=False): + def initialize_from_block(self, block, nonce=0, prefill_list=None, use_witness=False): + if prefill_list is None: + prefill_list = [0] self.header = CBlockHeader(block) self.nonce = nonce self.prefilled_txn = [PrefilledTransaction(i, block.vtx[i]) for i in prefill_list] @@ -972,7 +973,7 @@ def initialize_from_block(self, block, nonce=0, prefill_list=[0], use_witness=Fa if i not in prefill_list: tx_hash = block.vtx[i].sha256 if use_witness: - tx_hash = block.vtx[i].calc_sha256(with_witness=True) + tx_hash = block.vtx[i].calc_x16r(with_witness=True) self.shortids.append(calculate_shortid(k0, k1, tx_hash)) def __repr__(self): @@ -980,11 +981,11 @@ def __repr__(self): repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn)) -class BlockTransactionsRequest(): +class BlockTransactionsRequest: def __init__(self, blockhash=0, indexes=None): self.blockhash = blockhash - self.indexes = indexes if indexes != None else [] + self.indexes = indexes if indexes is not None else [] def deserialize(self, f): self.blockhash = deser_uint256(f) @@ -1020,11 +1021,11 @@ def __repr__(self): return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes)) -class BlockTransactions(): +class BlockTransactions: def __init__(self, blockhash=0, transactions=None): self.blockhash = blockhash - self.transactions = transactions if transactions != None else [] + self.transactions = transactions if transactions is not None else [] def deserialize(self, f): self.blockhash = deser_uint256(f) @@ -1044,7 +1045,7 @@ def __repr__(self): # Objects that correspond to messages on the wire -class msg_version(): +class MsgVersion: command = b"version" def __init__(self): @@ -1087,7 +1088,7 @@ def deserialize(self, f): # Relay field is optional for version 70001 onwards try: self.nRelay = struct.unpack(" -class msg_headers(): +class MsgHeaders: command = b"headers" def __init__(self, headers=None): @@ -1432,7 +1455,7 @@ def __repr__(self): return "msg_headers(headers=%s)" % repr(self.headers) -class msg_reject(): +class MsgReject: command = b"reject" REJECT_MALFORMED = 1 @@ -1464,7 +1487,7 @@ def __repr__(self): % (self.message, self.code, self.reason, self.data) -class msg_feefilter(): +class MsgFeeFilter: command = b"feefilter" def __init__(self, feerate=0): @@ -1482,7 +1505,7 @@ def __repr__(self): return "msg_feefilter(feerate=%08x)" % self.feerate -class msg_sendcmpct(): +class MsgSendCmpct: command = b"sendcmpct" def __init__(self): @@ -1503,7 +1526,7 @@ def __repr__(self): return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version) -class msg_cmpctblock(): +class MsgCmpctBlock: command = b"cmpctblock" def __init__(self, header_and_shortids=None): @@ -1522,7 +1545,7 @@ def __repr__(self): return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids) -class msg_getblocktxn(): +class MsgGetBlockTxn: command = b"getblocktxn" def __init__(self): @@ -1541,7 +1564,7 @@ def __repr__(self): return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request)) -class msg_blocktxn(): +class MsgBlockTxn: command = b"blocktxn" def __init__(self): @@ -1559,17 +1582,17 @@ def __repr__(self): return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions)) -class msg_witness_blocktxn(msg_blocktxn): +class MsgWitnessBlocktxn(MsgBlockTxn): def serialize(self): r = b"" r += self.block_transactions.serialize(with_witness=True) return r -class NodeConnCB(): +class NodeConnCB: """Callback and helper functions for P2P connection to a ravend node. - Individual testcases should subclass this and override the on_* methods + Individual test cases should subclass this and override the on_* methods if they want to alter message handling behaviour. """ @@ -1655,6 +1678,8 @@ def on_headers(self, conn, message): pass def on_mempool(self, conn): pass + def on_notfound(self, conn, message): pass + def on_pong(self, conn, message): pass def on_reject(self, conn, message): pass @@ -1666,7 +1691,7 @@ def on_sendheaders(self, conn, message): pass def on_tx(self, conn, message): pass def on_inv(self, conn, message): - want = msg_getdata() + want = MsgGetdata() for i in message.inv: if i.type != 0: want.inv.append(i) @@ -1675,7 +1700,7 @@ def on_inv(self, conn, message): def on_ping(self, conn, message): if conn.ver_send > BIP0031_VERSION: - conn.send_message(msg_pong(message.nonce)) + conn.send_message(MsgPong(message.nonce)) def on_verack(self, conn, message): conn.ver_recv = conn.ver_send @@ -1683,7 +1708,7 @@ def on_verack(self, conn, message): def on_version(self, conn, message): if message.nVersion >= 209: - conn.send_message(msg_verack()) + conn.send_message(MsgVerack()) conn.ver_send = min(MY_VERSION, message.nVersion) if message.nVersion < 209: conn.ver_recv = conn.ver_send @@ -1720,7 +1745,7 @@ def wait_for_inv(self, expected_inv, timeout=60): test_function = lambda: self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash - wait_until(test_function, timeout=timeout, lock=mininode_lock) + wait_until(test_function, timeout=timeout, lock=mininode_lock, err_msg="wait_for_inv") def wait_for_verack(self, timeout=60): test_function = lambda: self.message_count["verack"] @@ -1740,7 +1765,7 @@ def send_and_ping(self, message): # Sync up with the node def sync_with_ping(self, timeout=60): - self.send_message(msg_ping(nonce=self.ping_counter)) + self.send_message(MsgPing(nonce=self.ping_counter)) test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter wait_until(test_function, err_msg="Sync with Ping", timeout=timeout, lock=mininode_lock) self.ping_counter += 1 @@ -1750,29 +1775,31 @@ def sync_with_ping(self, timeout=60): # This class provides an interface for a p2p connection to a specified node class NodeConn(asyncore.dispatcher): messagemap = { - b"version": msg_version, - b"verack": msg_verack, - b"addr": msg_addr, - b"alert": msg_alert, - b"inv": msg_inv, - b"getdata": msg_getdata, - b"getblocks": msg_getblocks, - b"tx": msg_tx, - b"block": msg_block, - b"getaddr": msg_getaddr, - b"ping": msg_ping, - b"pong": msg_pong, - b"headers": msg_headers, - b"getheaders": msg_getheaders, - b"reject": msg_reject, - b"mempool": msg_mempool, - b"feefilter": msg_feefilter, - b"sendheaders": msg_sendheaders, - b"sendcmpct": msg_sendcmpct, - b"cmpctblock": msg_cmpctblock, - b"getblocktxn": msg_getblocktxn, - b"blocktxn": msg_blocktxn + b"version": MsgVersion, + b"verack": MsgVerack, + b"addr": MsgAddr, + b"alert": MsgAlert, + b"inv": MsgInv, + b"getdata": MsgGetdata, + b"getblocks": MsgGetBlocks, + b"tx": MsgTx, + b"block": MsgBlock, + b"getaddr": MsgGetAddr, + b"ping": MsgPing, + b"pong": MsgPong, + b"headers": MsgHeaders, + b"getheaders": MsgGetHeaders, + b"reject": MsgReject, + b"mempool": MsgMempool, + b"notfound": MsgNotFound, + b"feefilter": MsgFeeFilter, + b"sendheaders": MsgSendHeaders, + b"sendcmpct": MsgSendCmpct, + b"cmpctblock": MsgCmpctBlock, + b"getblocktxn": MsgGetBlockTxn, + b"blocktxn": MsgBlockTxn } + MAGIC_BYTES = { "mainnet": b"\x52\x41\x56\x4e", # mainnet "testnet3": b"\x45\x50\x4f\x45", # testnet3 @@ -1798,7 +1825,7 @@ def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE if send_version: # stuff version msg into sendbuf - vt = msg_version() + vt = MsgVersion() vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport @@ -1810,7 +1837,7 @@ def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE try: self.connect((dstaddr, dstport)) - except: + except OSError: self.handle_close() self.rpc = rpc @@ -1827,7 +1854,7 @@ def handle_close(self): self.sendbuf = b"" try: self.close() - except: + except OSError: pass self.cb.on_close(self) @@ -1844,7 +1871,7 @@ def writable(self): with mininode_lock: pre_connection = self.state == "connecting" length = len(self.sendbuf) - return (length > 0 or pre_connection) + return length > 0 or pre_connection def handle_write(self): with mininode_lock: @@ -1858,7 +1885,7 @@ def handle_write(self): try: sent = self.send(self.sendbuf) - except: + except OSError: self.handle_close() return self.sendbuf = self.sendbuf[sent:] @@ -1875,7 +1902,6 @@ def got_data(self): return command = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0] msglen = struct.unpack(" 2**32 struct_size = 40 if is_64bits else 32 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) max_possible = 8 # initial value while True: - bytes = max_possible * struct_size - names = array.array('B', b'\0' * bytes) + byte_data = max_possible * struct_size + names = array.array('B', b'\0' * byte_data) outbytes = struct.unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, # SIOCGIFCONF - struct.pack('iL', bytes, names.buffer_info()[0]) + struct.pack('iL', byte_data, names.buffer_info()[0]) ))[0] - if outbytes == bytes: + if outbytes == byte_data: max_possible *= 2 else: break @@ -113,11 +115,11 @@ def all_interfaces(): for i in range(0, outbytes, struct_size)] def addr_to_hex(addr): - ''' + """ Convert string IPv4 or IPv6 address to binary address as returned by get_bind_addrs. Very naive implementation that certainly doesn't work for all IPv6 variants. - ''' + """ if '.' in addr: # IPv4 addr = [int(x) for x in addr.split('.')] elif ':' in addr: # IPv6 @@ -142,9 +144,9 @@ def addr_to_hex(addr): return hexlify(bytearray(addr)).decode('ascii') def test_ipv6_local(): - ''' + """ Check for (local) IPv6 support. - ''' + """ import socket # By using SOCK_DGRAM this will not actually make a connection, but it will # fail if there is no route to IPv6 localhost. diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index e4cfaaac20..dcf137b107 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -3,16 +3,19 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Functionality to build scripts, as well as SignatureHash(). + +""" +Functionality to build scripts, as well as signature_hash(). This file is modified from python-ravenlib. """ -from .mininode import (CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string) +from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string from binascii import hexlify import hashlib import sys + bchr = chr bord = ord if sys.version > '3': @@ -52,15 +55,15 @@ def encode_op_pushdata(d): raise ValueError("Data too long to encode in a PUSHDATA op") @staticmethod - def encode_op_n(n): + def encode_op_n(number): """Encode a small integer op, returning an opcode""" - if not (0 <= n <= 16): - raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n) + if not (0 <= number <= 16): + raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % number) - if n == 0: + if number == 0: return OP_0 else: - return CScriptOp(OP_1 + n-1) + return CScriptOp(OP_1 + number - 1) def decode_op_n(self): """Decode a small integer opcode, returning an integer""" @@ -88,13 +91,13 @@ def __repr__(self): else: return 'CScriptOp(0x%x)' % self - def __new__(cls, n): + def __new__(cls, number): try: - return _opcode_instances[n] + return _opcode_instances[number] except IndexError: - assert len(_opcode_instances) == n - _opcode_instances.append(super(CScriptOp, cls).__new__(cls, n)) - return _opcode_instances[n] + assert len(_opcode_instances) == number + _opcode_instances.append(super(CScriptOp, cls).__new__(cls, number)) + return _opcode_instances[number] # Populate opcode instance table for n in range(0xff+1): @@ -374,7 +377,7 @@ def __init__(self, msg, data): super(CScriptTruncatedPushDataError, self).__init__(msg) # This is used, eg, for blockchain heights in coinbase scripts (bip34) -class CScriptNum(): +class CScriptNum: def __init__(self, d=0): self.value = d @@ -385,7 +388,7 @@ def encode(obj): return bytes(r) neg = obj.value < 0 absvalue = -obj.value if neg else obj.value - while (absvalue): + while absvalue: r.append(absvalue & 0xff) absvalue >>= 8 if r[-1] & 0x80: @@ -411,7 +414,7 @@ def __coerce_instance(cls, other): if isinstance(other, CScriptOp): other = bchr(other) elif isinstance(other, CScriptNum): - if (other.value == 0): + if other.value == 0: other = bchr(CScriptOp(OP_0)) else: other = CScriptNum.encode(other) @@ -468,44 +471,42 @@ def raw_iter(self): if opcode > OP_PUSHDATA4: yield (opcode, None, sop_idx) else: - datasize = None - pushdata_type = None if opcode < OP_PUSHDATA1: pushdata_type = 'PUSHDATA(%d)' % opcode - datasize = opcode + data_size = opcode elif opcode == OP_PUSHDATA1: pushdata_type = 'PUSHDATA1' if i >= len(self): raise CScriptInvalidError('PUSHDATA1: missing data length') - datasize = bord(self[i]) + data_size = bord(self[i]) i += 1 elif opcode == OP_PUSHDATA2: pushdata_type = 'PUSHDATA2' if i + 1 >= len(self): raise CScriptInvalidError('PUSHDATA2: missing data length') - datasize = bord(self[i]) + (bord(self[i+1]) << 8) + data_size = bord(self[i]) + (bord(self[i+1]) << 8) i += 2 elif opcode == OP_PUSHDATA4: pushdata_type = 'PUSHDATA4' if i + 3 >= len(self): raise CScriptInvalidError('PUSHDATA4: missing data length') - datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24) + data_size = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24) i += 4 else: assert False # shouldn't happen - data = bytes(self[i:i+datasize]) + data = bytes(self[i:i+data_size]) # Check for truncation - if len(data) < datasize: + if len(data) < data_size: raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data) - i += datasize + i += data_size yield (opcode, data, sop_idx) @@ -558,25 +559,25 @@ def _repr(o): return "CScript([%s])" % ', '.join(ops) - def GetSigOpCount(self, fAccurate): + def get_sig_op_count(self, f_accurate): """Get the SigOp count. fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details. Note that this is consensus-critical. """ - n = 0 - lastOpcode = OP_INVALIDOPCODE + number = 0 + last_opcode = OP_INVALIDOPCODE for (opcode, data, sop_idx) in self.raw_iter(): if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY): - n += 1 + number += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): - if fAccurate and (OP_1 <= lastOpcode <= OP_16): - n += opcode.decode_op_n() + if f_accurate and (OP_1 <= last_opcode <= OP_16): + number += opcode.decode_op_n() else: - n += 20 - lastOpcode = opcode - return n + number += 20 + last_opcode = opcode + return number SIGHASH_ALL = 1 @@ -584,8 +585,10 @@ def GetSigOpCount(self, fAccurate): SIGHASH_SINGLE = 3 SIGHASH_ANYONECANPAY = 0x80 -def FindAndDelete(script, sig): - """Consensus critical, see FindAndDelete() in Satoshi codebase""" + +# noinspection PyUnusedLocal +def find_and_delete(script, sig): + """Consensus critical, see find_and_delete() in Satoshi codebase""" r = b'' last_sop_idx = sop_idx = 0 skip = True @@ -602,97 +605,97 @@ def FindAndDelete(script, sig): return CScript(r) -def SignatureHash(script, txTo, inIdx, hashtype): - """Consensus-correct SignatureHash +def signature_hash(script, tx_to, in_idx, hash_type): + """Consensus-correct signature_hash Returns (hash, err) to precisely match the consensus-critical behavior of the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) """ - HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + hash_one = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' - if inIdx >= len(txTo.vin): - return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin))) - txtmp = CTransaction(txTo) + if in_idx >= len(tx_to.vin): + return hash_one, "inIdx %d out of range (%d)" % (in_idx, len(tx_to.vin)) + tx_tmp = CTransaction(tx_to) - for txin in txtmp.vin: + for txin in tx_tmp.vin: txin.scriptSig = b'' - txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR])) + tx_tmp.vin[in_idx].scriptSig = find_and_delete(script, CScript(OP_CODESEPARATOR)) - if (hashtype & 0x1f) == SIGHASH_NONE: - txtmp.vout = [] + if (hash_type & 0x1f) == SIGHASH_NONE: + tx_tmp.vout = [] - for i in range(len(txtmp.vin)): - if i != inIdx: - txtmp.vin[i].nSequence = 0 + for i in range(len(tx_tmp.vin)): + if i != in_idx: + tx_tmp.vin[i].nSequence = 0 - elif (hashtype & 0x1f) == SIGHASH_SINGLE: - outIdx = inIdx - if outIdx >= len(txtmp.vout): - return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout))) + elif (hash_type & 0x1f) == SIGHASH_SINGLE: + out_idx = in_idx + if out_idx >= len(tx_tmp.vout): + return hash_one, "outIdx %d out of range (%d)" % (out_idx, len(tx_tmp.vout)) - tmp = txtmp.vout[outIdx] - txtmp.vout = [] - for i in range(outIdx): - txtmp.vout.append(CTxOut(-1)) - txtmp.vout.append(tmp) + tmp = tx_tmp.vout[out_idx] + tx_tmp.vout = [] + for i in range(out_idx): + tx_tmp.vout.append(CTxOut(-1)) + tx_tmp.vout.append(tmp) - for i in range(len(txtmp.vin)): - if i != inIdx: - txtmp.vin[i].nSequence = 0 + for i in range(len(tx_tmp.vin)): + if i != in_idx: + tx_tmp.vin[i].nSequence = 0 - if hashtype & SIGHASH_ANYONECANPAY: - tmp = txtmp.vin[inIdx] - txtmp.vin = [] - txtmp.vin.append(tmp) + if hash_type & SIGHASH_ANYONECANPAY: + tmp = tx_tmp.vin[in_idx] + tx_tmp.vin = [] + tx_tmp.vin.append(tmp) - s = txtmp.serialize() - s += struct.pack(b" 0: args.append("-connect=127.0.0.1:" + str(p2p_port(0))) @@ -431,7 +420,7 @@ def _initialize_chain(self): from_dir = os.path.join(self.options.cachedir, "node" + str(i)) to_dir = os.path.join(self.options.tmpdir, "node" + str(i)) shutil.copytree(from_dir, to_dir) - initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in raven.conf + initialize_data_dir(self.options.tmpdir, i) # Overwrite port/rpcport in raven.conf def _initialize_chain_clean(self): """Initialize empty blockchain for use by the test. @@ -439,7 +428,7 @@ def _initialize_chain_clean(self): Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(self.num_nodes): - initialize_datadir(self.options.tmpdir, i) + initialize_data_dir(self.options.tmpdir, i) class ComparisonTestFramework(RavenTestFramework): @@ -450,6 +439,9 @@ class ComparisonTestFramework(RavenTestFramework): - 2 binaries: 1 test binary, 1 ref binary - n>2 binaries: 1 test binary, n-1 ref binaries""" + def run_test(self): + pass + def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index fd9720842a..5351213739 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -3,6 +3,7 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Class for ravend node under test""" import decimal @@ -11,15 +12,11 @@ import json import logging import os +import re import subprocess import time -from .util import ( - assert_equal, - get_rpc_proxy, - rpc_url, - wait_until, -) +from .util import assert_equal, get_rpc_proxy, rpc_url, wait_until from .authproxy import JSONRPCException RAVEND_PROC_WAIT_TIMEOUT = 60 @@ -51,7 +48,7 @@ def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mo self.binary = binary self.stderr = stderr self.coverage_dir = coverage_dir - # Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly. + # Most callers will just need to add extra args to the standard list below. For those callers that need more flexibility, they can just set the args property directly. self.extra_args = extra_args self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i] @@ -64,6 +61,7 @@ def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mo self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) self.cleanup_on_exit = True # Whether to kill the node when this object goes away + self.p2ps = [] def __del__(self): # Ensure that we don't leave any ravend processes lying around after @@ -155,6 +153,31 @@ def is_node_stopped(self): def wait_until_stopped(self, timeout=RAVEND_PROC_WAIT_TIMEOUT): wait_until(self.is_node_stopped, err_msg="Wait until Stopped", timeout=timeout) + def assert_debug_log(self, expected_msgs, timeout=2): + time_end = time.time() + timeout + debug_log = os.path.join(self.datadir, self.chain, 'debug.log') + with open(debug_log, encoding='utf-8') as dl: + dl.seek(0, 2) + prev_size = dl.tell() + + yield + + while True: + found = True + with open(debug_log, encoding='utf-8') as dl: + dl.seek(prev_size) + log = dl.read() + print_log = " - " + "\n - ".join(log.splitlines()) + for expected_msg in expected_msgs: + if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: + found = False + if found: + return + if time.time() >= time_end: + break + time.sleep(0.05) + self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log)) + def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 963eaf7944..e6981db4a9 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -3,6 +3,7 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Helpful routines for regression testing.""" from base64 import b64encode @@ -18,48 +19,56 @@ import subprocess from subprocess import CalledProcessError import time - +import socket +from contextlib import closing from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException logger = logging.getLogger("TestFramework.utils") +########################################################################################## +# Assert functions +########################################################################################## +def assert_approx(v, vexp, vspan=0.00001): + """Assert that `v` is within `vspan` of `vexp`""" + if v < vexp - vspan: + raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan))) + if v > vexp + vspan: + raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan))) -# Assert functions -################## def assert_contains(val, arr): if not (val in arr): - raise AssertionError("val %s not in arr" % (val)) + raise AssertionError("val %s not in arr" % val) def assert_does_not_contain(val, arr): - if (val in arr): - raise AssertionError("val %s is in arr" % (val)) + if val in arr: + raise AssertionError("val %s is in arr" % val) -def assert_contains_pair(key, val, dict): - if not (key in dict and val == dict[key]): +def assert_contains_pair(key, val, dict_data): + if not (key in dict_data and val == dict_data[key]): raise AssertionError("k/v pair (%s,%s) not in dict" % (key, val)) -def assert_contains_key(key, dict): - if not (key in dict): - raise AssertionError("key %s is not in dict" % (key)) +def assert_contains_key(key, dict_data): + if not key in dict_data: + raise AssertionError("key %s is not in dict" % key) -def assert_does_not_contain_key(key, dict): - if (key in dict): - raise AssertionError("key %s is in dict" % (key)) +def assert_does_not_contain_key(key, dict_data): + if key in dict_data: + raise AssertionError("key %s is in dict" % key) -def assert_fee_amount(fee, tx_size, fee_per_kB): +def assert_fee_amount(fee, tx_size, fee_per_kb): """Assert the fee was in range""" - target_fee = tx_size * fee_per_kB / 1000 + target_fee = tx_size * fee_per_kb / 1000 if fee < target_fee: raise AssertionError("Fee of %s RVN too low! (Should be %s RVN)" % (str(fee), str(target_fee))) # allow the wallet's estimation to be at most 2 bytes off - if fee > (tx_size + 2) * fee_per_kB / 1000: + if fee > (tx_size + 2) * fee_per_kb / 1000: raise AssertionError("Fee of %s RVN too high! (Should be %s RVN)" % (str(fee), str(target_fee))) @@ -98,7 +107,6 @@ def assert_raises_message(exc, message, fun, *args, **kwds): def assert_raises_process_error(returncode, output, fun, *args, **kwds): """Execute a process and asserts the process return code and output. - Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError and verifies that the return code and output are as expected. Throws AssertionError if no CalledProcessError was raised or if the return code and output are not as expected. @@ -210,18 +218,19 @@ def assert_array_result(object_array, to_match, expected, should_not_find=False) def assert_happening(date_str, within_secs=120): """ Make sure date_str happened withing within_secs seconds of now. - Assumes date_str is in rpc results format e.g. '2019-11-07 17:50:06' and assumed to represent UTC. + Assumes date_str is in rpc results cust_format e.g. '2019-11-07 17:50:06' and assumed to represent UTC. Using a big default to eliminate inaccurate wall clocks... """ - format = '%Y-%m-%d %H:%M:%S' - then = datetime.strptime(date_str, format).replace(tzinfo=timezone.utc) + cust_format = '%Y-%m-%d %H:%M:%S' + then = datetime.strptime(date_str, cust_format).replace(tzinfo=timezone.utc) now = datetime.now(timezone.utc) diff_secs = (now - then).total_seconds() if abs(diff_secs) > within_secs: raise AssertionError("More than expected %s second difference between %s and now(%s) (%ss)" % (within_secs, date_str, now, diff_secs)) -# Utility functions -################### +########################################################################################## +# Utility functions +########################################################################################## def check_json_precision(): """Make sure json library being used does not lose precision converting RVN values""" @@ -295,9 +304,9 @@ def wait_until(predicate, *, err_msg, attempts=float('inf'), timeout=float('inf' assert_greater_than(time.ctime(timeout), time.ctime(), err_msg + " ~~ Exceeded Timeout") raise RuntimeError('Unreachable') - -# RPC/P2P connection constants and functions -############################################ +########################################################################################## +# RPC/P2P connection constants and functions +########################################################################################## # The maximum number of nodes a single test can spawn MAX_NODES = 8 @@ -305,13 +314,36 @@ def wait_until(predicate, *, err_msg, attempts=float('inf'), timeout=float('inf' PORT_MIN = 11000 # The number of ports to "reserve" for p2p and rpc, each PORT_RANGE = 5000 - +# List to store P2P ports +p2p_ports = [-1, -1, -1, -1, -1, -1, -1, -1] +# List to store RPC ports +rpc_ports = [-1, -1, -1, -1, -1, -1, -1, -1] class PortSeed: # Must be initialized with a unique integer for each process n = None +def find_free_port(): + """ + Ask the system for a free port. + In case of error return error message. + :return: {Tuple} + """ + port = None + error = {} + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: + try: + s.bind(('', 0)) + sock_name = s.getsockname() + if type(sock_name) is tuple and len(sock_name) == 2: + port = sock_name[1] + except socket.error as e: + error = {'errno': e.errno, 'msg': str(e)} + + return port, error + + def get_rpc_proxy(url, node_number, timeout=None, coverage_dir=None): """ Args: @@ -337,16 +369,25 @@ def get_rpc_proxy(url, node_number, timeout=None, coverage_dir=None): def p2p_port(n): - assert (n <= MAX_NODES) - return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) + if p2p_ports[n] is -1: + # Port isn't in the list, find one that is available + p2p_ports[n] = find_free_port()[0] + return p2p_ports[n] + else: + return p2p_ports[n] def rpc_port(n): - return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) + if rpc_ports[n] is -1: + # Port isn't in the list, find one that is available + rpc_ports[n] = find_free_port()[0] + return rpc_ports[n] + else: + return rpc_ports[n] -def rpc_url(datadir, i, rpchost=None): - rpc_u, rpc_p = get_auth_cookie(datadir) +def rpc_url(data_dir, i, rpchost=None): + rpc_u, rpc_p = get_auth_cookie(data_dir) host = '127.0.0.1' port = rpc_port(i) if rpchost: @@ -357,11 +398,11 @@ def rpc_url(datadir, i, rpchost=None): host = rpchost return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port)) +########################################################################################## +# Node functions +########################################################################################## -# Node functions -################ - -def initialize_datadir(dirname, n): +def initialize_data_dir(dirname, n): datadir = os.path.join(dirname, "node" + str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) @@ -391,17 +432,17 @@ def get_auth_cookie(datadir): password = line.split("=")[1].strip("\n") if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")): with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f: - userpass = f.read() - split_userpass = userpass.split(':') - user = split_userpass[0] - password = split_userpass[1] + user_pass = f.read() + split_user_pass = user_pass.split(':') + user = split_user_pass[0] + password = split_user_pass[1] if user is None or password is None: raise ValueError("No RPC credentials") return user, password -def log_filename(dirname, n_node, logname): - return os.path.join(dirname, "node" + str(n_node), "regtest", logname) +def log_filename(dirname, n_node, log_name): + return os.path.join(dirname, "node" + str(n_node), "regtest", log_name) def get_bip9_status(node, key): @@ -479,16 +520,15 @@ def sync_blocks(rpc_connections, *, wait=1, timeout=60): # earlier. max_height = max(x.getblockcount() for x in rpc_connections) start_time = cur_time = time.time() + tips = None while cur_time <= start_time + timeout: tips = [r.waitforblockheight(max_height, int(wait * 1000)) for r in rpc_connections] if all(t["height"] == max_height for t in tips): if all(t["hash"] == tips[0]["hash"] for t in tips): return - raise AssertionError("Block sync failed, mismatched block hashes:{}".format( - "".join("\n {!r}".format(tip) for tip in tips))) + raise AssertionError("Block sync failed, mismatched block hashes:{}".format("".join("\n {!r}".format(tip) for tip in tips))) cur_time = time.time() - raise AssertionError("Block sync to height {} timed out:{}".format( - max_height, "".join("\n {!r}".format(tip) for tip in tips))) + raise AssertionError("Block sync to height {} timed out:{}".format(max_height, "".join("\n {!r}".format(tip) for tip in tips))) def sync_chain(rpc_connections, *, wait=1, timeout=60): @@ -521,9 +561,9 @@ def sync_mempools(rpc_connections, *, wait=1, timeout=60): timeout -= wait raise AssertionError("Mempool sync failed") - -# Transaction/Block functions -############################# +########################################################################################## +# Transaction/Block functions +########################################################################################## def find_output(node, txid, amount): """ diff --git a/test/functional/test_framework/wallet_util.py b/test/functional/test_framework/wallet_util.py new file mode 100755 index 0000000000..aa6bb32884 --- /dev/null +++ b/test/functional/test_framework/wallet_util.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Useful util functions for testing the wallet""" + +from collections import namedtuple + +Key = namedtuple('Key', ['privkey', + 'pubkey', + 'p2pkh_script', + 'p2pkh_addr', + 'p2wpkh_script', + 'p2wpkh_addr', + 'p2sh_p2wpkh_script', + 'p2sh_p2wpkh_redeem_script', + 'p2sh_p2wpkh_addr']) + +Multisig = namedtuple('Multisig', ['privkeys', + 'pubkeys', + 'p2sh_script', + 'p2sh_addr', + 'redeem_script', + 'p2wsh_script', + 'p2wsh_addr', + 'p2sh_p2wsh_script', + 'p2sh_p2wsh_addr']) + + +def test_address(node, address, **kwargs): + """Get address info for `address` and test whether the returned values are as expected.""" + addr_info = node.validateaddress(address) + for key, value in kwargs.items(): + if value is None: + if key in addr_info.keys(): + raise AssertionError("key {} unexpectedly returned in getaddressinfo.".format(key)) + elif addr_info[key] != value: + raise AssertionError("key {} value {} did not match expected value {}".format(key, addr_info[key], value)) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index d53e8754cc..e949b22721 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Run regression test suite. + +""" +Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. @@ -13,7 +15,9 @@ For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:RavenTestFramework.main`. + """ + from collections import deque import argparse import configparser @@ -30,6 +34,7 @@ # Formatting. Default colors to empty strings. BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") + try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) @@ -55,14 +60,8 @@ TEST_EXIT_SKIPPED = 77 EXTENDED_SCRIPTS = [ - # These tests are not run by the travis build process. + # These tests are not run by the build process. # Longest test should go first, to favor running tests in parallel - # 'p2p_acceptblock.py', - # 'feature_rbf.py', - # 'feature_assumevalid.py', - # 'mempool_packages.py', - # 'feature_bip_softforks.py', # use this for future soft fork testing - # 'feature_pruning.py', # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 20m vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv 'feature_fee_estimation.py', # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 5m vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv @@ -70,23 +69,8 @@ ] BASE_SCRIPTS= [ - # Scripts that are run by the travis build process. + # Scripts that are run by the build process. # Longest test should go first, to favor running tests in parallel - # 'p2p_fingerprint.py', TODO - fix mininode rehash methods to use X16R - # 'p2p_invalid_block.py', TODO - fix mininode rehash methods to use X16R - # 'p2p_invalid_tx.py', TODO - fix mininode rehash methods to use X16R - # 'feature_segwit.py', TODO - fix mininode rehash methods to use X16R - # 'p2p_sendheaders.py', TODO - fix mininode rehash methods to use X16R - # 'feature_nulldummy.py', TODO - fix mininode rehash methods to use X16R - # 'mining_basic.py', TODO - fix mininode rehash methods to use X16R - # 'feature_dersig.py', TODO - fix mininode rehash methods to use X16R - # 'feature_cltv.py', TODO - fix mininode rehash methods to use X16R - # 'p2p_fullblock.py', TODO - fix comptool.TestInstance timeout - # 'p2p_compactblocks.py', TODO - refactor to assume segwit is always active - # 'p2p_segwit.py', TODO - refactor to assume segwit is always active - # 'feature_csv_activation.py', TODO - currently testing softfork activations, we need to test the features - # 'wallet_bumpfee.py', TODO - Now fails because we removed RBF - # 'example_test.py', # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 2m vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv 'wallet_backup.py', 'wallet_hd.py', @@ -95,6 +79,7 @@ 'feature_maxuploadtarget.py', # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 45s vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv 'rpc_fundrawtransaction.py', + 'wallet_create_tx.py', # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 30s vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv 'feature_rewards.py', 'wallet_basic.py', @@ -117,28 +102,32 @@ 'mempool_persist.py', 'rpc_timestampindex.py', 'wallet_listreceivedby.py', + 'wallet_reorgsrestore.py', 'interface_rest.py', 'wallet_keypool_topup.py', 'wallet_import_rescan.py', 'wallet_abandonconflict.py', + 'wallet_groups.py', 'rpc_blockchain.py', 'p2p_feefilter.py', 'p2p_leak.py', - 'p2p_versionbits.py', + 'feature_versionbits_warning.py', 'rpc_spentindex.py', 'feature_rawassettransactions.py', 'wallet_importmulti.py', - 'wallet_accounts.py', + 'wallet_labels.py', + 'wallet_import_with_label.py', # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 5s vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv 'wallet_listtransactions.py', 'feature_minchainwork.py', 'wallet_encryption.py', 'feature_listmyassets.py', 'mempool_reorg.py', - 'rpc_merkle_blocks.py', + 'rpc_txoutproof.py', 'feature_reindex.py', 'rpc_decodescript.py', 'wallet_keypool.py', + 'rpc_setban.py', 'wallet_listsinceblock.py', 'wallet_zapwallettxes.py', 'wallet_multiwallet.py', @@ -159,6 +148,7 @@ 'rpc_preciousblock.py', 'feature_notifications.py', 'rpc_net.py', + 'rpc_misc.py', 'interface_raven_cli.py', 'mempool_resurrect.py', 'rpc_signrawtransaction.py', @@ -166,7 +156,9 @@ 'wallet_txn_clone.py --mineblock', 'rpc_signmessage.py', 'rpc_deprecated.py', + 'wallet_coinbase_category.py', 'wallet_txn_doublespend.py', + 'feature_shutdown.py', 'wallet_disable.py', 'interface_http.py', 'mempool_spend_coinbase.py', @@ -174,11 +166,38 @@ 'p2p_mempool.py', 'rpc_named_arguments.py', 'rpc_uptime.py', - 'rpc_assettransfer.py' + 'rpc_assettransfer.py', + 'feature_loadblock.py', + 'p2p_leak_tx.py' # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] +SKIPPED_TESTS = [ + # List of tests that are not going to be run (usually means test is broken) + 'example_test.py', + 'feature_assumevalid.py', + 'feature_bip_softforks.py', # use this for future soft fork testing + 'feature_block.py', #TODO - fix comptool.TestInstance timeout + 'feature_cltv.py', #TODO - fix mininode rehash methods to use X16R + 'feature_csv_activation.py', #TODO - currently testing softfork activations, we need to test the features + 'feature_dersig.py', #TODO - fix mininode rehash methods to use X16R + 'feature_nulldummy.py', #TODO - fix mininode rehash methods to use X16R + 'feature_pruning.py', + 'feature_rbf.py', + 'feature_segwit.py', #TODO - fix mininode rehash methods to use X16R + 'mempool_packages.py', + 'mining_basic.py', #TODO - fix mininode rehash methods to use X16R + 'p2p_compactblocks.py', #TODO - refactor to assume segwit is always active + 'p2p_fingerprint.py', #TODO - fix mininode rehash methods to use X16R + 'p2p_invalid_block.py', #TODO - fix mininode rehash methods to use X16R + 'p2p_invalid_tx.py', #TODO - fix mininode rehash methods to use X16R + 'p2p_segwit.py', #TODO - refactor to assume segwit is always active + 'p2p_sendheaders.py', #TODO - fix mininode rehash methods to use X16R + 'p2p_unrequested_blocks.py', + 'wallet_bumpfee.py', #TODO - Now fails because we removed RBF +] + # Place EXTENDED_SCRIPTS first since it has the 3 longest running tests ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS @@ -192,25 +211,24 @@ def main(): # Parse arguments and pass through unrecognised args - parser = argparse.ArgumentParser(add_help=False, - usage='%(prog)s [test_runner.py options] [script options] [scripts]', - description=__doc__, - epilog='Help text and arguments for individual test script:', - formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument('--ansi', action='store_true', default=sys.stdout.isatty(), help="Use ANSI colors and dots in output (enabled by default when standard output is a TTY)") + parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__, + epilog='Help text and arguments for individual test script:', formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument('--ansi', action='store_true', default=sys.stdout.isatty(), help='Use ANSI colors and dots in output (enabled by default when standard output is a TTY)') parser.add_argument('--combinedlogslen', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.') - parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') - parser.add_argument('--exclude', help='specify a comma-separated-list of scripts to exclude.') - parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') - parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure') - parser.add_argument('--filter', help='filter scripts to run by regular expression') - parser.add_argument('--force', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') - parser.add_argument('--help', action='store_true', help='print help text and exit') - parser.add_argument('--jobs', type=int, default=get_cpu_count(), help='how many test scripts to run in parallel. Default=.' + str(get_cpu_count())) - parser.add_argument('--keepcache', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous test-run.') - parser.add_argument('--onlyextended', action='store_true', help='run only the extended test suite') - parser.add_argument('--quiet', action='store_true', help='only print results summary and failure logs') - parser.add_argument('--tmpdirprefix', default=tempfile.gettempdir(), help="Root directory for data") + parser.add_argument('--coverage', action='store_true', help='Generate a basic coverage report for the RPC interface.') + parser.add_argument('--exclude', metavar='', help='Specify a comma-separated-list of scripts to exclude.') + parser.add_argument('--extended', action='store_true', help='Run the extended test suite in addition to the basic tests.') + parser.add_argument('--failfast', action='store_true', help='Stop execution after the first test failure.') + parser.add_argument('--filter', metavar='', help='Filter scripts to run by regular expression.') + parser.add_argument('--force', action='store_true', help='Run tests even on platforms where they are disabled by default (e.g. windows).') + parser.add_argument('--help', action='store_true', help='Print help text and exit.') + parser.add_argument('--jobs', type=int, metavar='', default=get_cpu_count(), help='How many test scripts to run in parallel. Default=.' + str(get_cpu_count())) + parser.add_argument('--keepcache', action='store_true', help='The default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous test-run.') + parser.add_argument('--list', action='store_true', help='Print list of tests and exit.') + parser.add_argument('--loop', type=int, metavar='n', default=1, help='Run(loop) the tests n number of times.') + parser.add_argument('--onlyextended', action='store_true', help='Run only the extended test suite.') + parser.add_argument('--quiet', action='store_true', help='Only print results summary and failure logs.') + parser.add_argument('--tmpdirprefix', metavar='', default=tempfile.gettempdir(), help='Root directory for data.') # Setup colours for ANSI terminals @@ -224,13 +242,13 @@ def main(): # args to be passed on always start with two dashes; tests are the remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] - passon_args = [arg for arg in unknown_args if arg[:2] == "--"] + pass_on_args = [arg for arg in unknown_args if arg[:2] == "--"] # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" config.read_file(open(configfile, encoding="utf8")) - passon_args.append("--configfile=%s" % configfile) + pass_on_args.append("--configfile=%s" % configfile) # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG @@ -257,82 +275,94 @@ def main(): print("Rerun `configure` with --enable-wallet, --with-cli and --with-daemon and rerun make") sys.exit(0) - # Build list of tests - test_list = [] - if tests: - # Individual tests have been specified. Run specified tests that exist - # in the ALL_SCRIPTS list. Accept names with or without a .py extension. - # Specified tests can contain wildcards, but in that case the supplied - # paths should be coherent, e.g. the same path as that provided to call - # test_runner.py. Examples: - # `test/functional/test_runner.py test/functional/wallet*` - # `test/functional/test_runner.py ./test/functional/wallet*` - # `test_runner.py wallet*` - # but not: - # `test/functional/test_runner.py wallet*` - # Multiple wildcards can be passed: - # `test_runner.py tool* mempool*` - for test in tests: - script = test.split("/")[-1] - script = script + ".py" if ".py" not in script else script - if script in ALL_SCRIPTS: - test_list.append(script) - else: - print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test)) - elif args.extended: - # Include extended tests - test_list += ALL_SCRIPTS - else: - # Run base tests only - test_list += BASE_SCRIPTS - - # Remove the test cases that the user has explicitly asked to exclude. - if args.exclude: - exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')] - for exclude_test in exclude_tests: - # Remove .py and .py --arg from the test list - exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test] - for exclude_item in exclude_list: - test_list.remove(exclude_item) - if not exclude_list: - print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test)) - - if args.filter: - test_list = list(filter(re.compile(args.filter).search, test_list)) - - if not test_list: - print("No valid test scripts specified. Check that your test is in one " - "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") - sys.exit(0) - - if args.help: - # Print help for test_runner.py, then print help of the first script (with args removed) and exit. - parser.print_help() - subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h']) - sys.exit(0) - - check_script_list(config["environment"]["SRCDIR"]) - check_script_prefixes() - - if not args.keepcache: - shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True) - - run_tests( - test_list=test_list, - src_dir=config["environment"]["SRCDIR"], - build_dir=config["environment"]["BUILDDIR"], - exeext=config["environment"]["EXEEXT"], - tmpdir=tmpdir, - use_term_control=args.ansi, - jobs=args.jobs, - enable_coverage=args.coverage, - args=passon_args, - combined_logs_len=args.combinedlogslen, - failfast=args.failfast - ) - - -def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, use_term_control, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False): + # Loop the running of tests + for i in range(0, args.loop): + print("Test Loop ", i+1, "of ", args.loop) + last_loop = False + if i+1 == args.loop: + last_loop = True + + # Build list of tests + test_list = [] + if tests: + # Individual tests have been specified. Run specified tests that exist + # in the ALL_SCRIPTS list. Accept names with or without a .py extension. + # Specified tests can contain wildcards, but in that case the supplied + # paths should be coherent, e.g. the same path as that provided to call + # test_runner.py. Examples: + # `test/functional/test_runner.py test/functional/wallet*` + # `test/functional/test_runner.py ./test/functional/wallet*` + # `test_runner.py wallet*` + # but not: + # `test/functional/test_runner.py wallet*` + # Multiple wildcards can be passed: + # `test_runner.py tool* mempool*` + for test in tests: + script = test.split("/")[-1] + script = script + ".py" if ".py" not in script else script + if script in ALL_SCRIPTS: + test_list.append(script) + else: + print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test)) + elif args.extended: + # Include extended tests + test_list += ALL_SCRIPTS + else: + # Run base tests only + test_list += BASE_SCRIPTS + + # Remove the test cases that the user has explicitly asked to exclude. + if args.exclude: + exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')] + for exclude_test in exclude_tests: + # Remove .py and .py --arg from the test list + exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test] + for exclude_item in exclude_list: + test_list.remove(exclude_item) + if not exclude_list: + print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test)) + + if args.filter: + test_list = list(filter(re.compile(args.filter).search, test_list)) + + if not test_list: + print("No valid test scripts specified. Check that your test is in one " + "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") + sys.exit(0) + + if args.help: + # Print help for test_runner.py, then print help of the first script (with args removed) and exit. + parser.print_help() + subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h']) + sys.exit(0) + + if args.list: + print(ALL_SCRIPTS) + sys.exit(0) + + check_script_list(config["environment"]["SRCDIR"]) + check_script_prefixes() + + if not args.keepcache: + shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True) + + run_tests( + test_list=test_list, + src_dir=config["environment"]["SRCDIR"], + build_dir=config["environment"]["BUILDDIR"], + exeext=config["environment"]["EXEEXT"], + tmpdir=tmpdir, + use_term_control=args.ansi, + jobs=args.jobs, + enable_coverage=args.coverage, + args=pass_on_args, + combined_logs_len=args.combinedlogslen, + failfast=args.failfast, + last_loop=last_loop + ) + + +def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, use_term_control, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, last_loop=False): # Warn if ravend is already running (unix only) if args is None: args = [] @@ -446,7 +476,8 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, use_term_control, j # processes which need to be killed. job_queue.kill_and_join() - sys.exit(not all_passed) + if last_loop: + sys.exit(not all_passed) def print_results(test_results, max_len_name, runtime): @@ -472,6 +503,7 @@ def print_results(test_results, max_len_name, runtime): print(results) +# noinspection PyTypeChecker class TestHandler: """ Trigger the test scripts passed in via the list. @@ -503,10 +535,7 @@ def get_next(self): tmpdir_arg = ["--tmpdir={}".format(test_dir)] self.jobs.append((test, time.time(), - subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + port_seed_arg + tmpdir_arg, - universal_newlines=True, - stdout=log_stdout, - stderr=log_stderr), + subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + port_seed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), test_dir, log_stdout, log_stderr)) @@ -519,7 +548,7 @@ def get_next(self): time.sleep(.5) for job in self.jobs: (name, start_time, proc, test_dir, log_out, log_err) = job - if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > 20 * 60: + if int(time.time() - start_time) > 20 * 60: # Timeout individual tests after 20 minutes (to stop tests hanging and not # providing useful output. proc.send_signal(signal.SIGINT) @@ -538,7 +567,6 @@ def get_next(self): if self.use_term_control: clear_line = '\r' + (' ' * dot_count) + '\r' print(clear_line, end='', flush=True) - dot_count = 0 return TestResult(name, status, int(time.time() - start_time)), test_dir, stdout, stderr if self.use_term_control: @@ -617,19 +645,16 @@ def check_script_list(src_dir): not being run by pull-tester.py.""" script_dir = src_dir + '/test/functional/' python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"]) - missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS))) + missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS + SKIPPED_TESTS))) if len(missed_tests) != 0: - print("%sWARNING!%s The following scripts are not being run:\n%s. \nCheck the test lists in test_runner.py." % (BOLD[1], BOLD[0], "\n".join(missed_tests))) - if os.getenv('TRAVIS') == 'true': - # On travis this warning is an error to prevent merging incomplete commits into master - sys.exit(1) + print("%sWARNING!%s The following scripts are not being run:\n%s \nCheck the test lists in test_runner.py." % (BOLD[1], BOLD[0], "\n".join(missed_tests))) def get_cpu_count(): try: import multiprocessing return multiprocessing.cpu_count() - except: + except ImportError: return 4 diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py index a5afd49b57..dd2817e457 100755 --- a/test/functional/wallet_abandonconflict.py +++ b/test/functional/wallet_abandonconflict.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the abandontransaction RPC. + +""" +Test the abandontransaction RPC. The abandontransaction RPC marks a transaction and all its in-wallet descendants as abandoned which allows their inputs to be respent. It can be @@ -11,8 +13,9 @@ which are not included in a block and are not currently in the mempool. It has no effect on transactions which are already conflicted or abandoned. """ + from test_framework.test_framework import RavenTestFramework -from test_framework.util import (sync_blocks, Decimal, sync_mempools, disconnect_nodes, assert_equal, connect_nodes) +from test_framework.util import sync_blocks, Decimal, sync_mempools, disconnect_nodes, assert_equal, connect_nodes class AbandonConflictTest(RavenTestFramework): def set_test_params(self): @@ -30,9 +33,9 @@ def run_test(self): self.nodes[1].generate(1) sync_blocks(self.nodes) - newbalance = self.nodes[0].getbalance() - assert(balance - newbalance < Decimal("0.01")) #no more than fees lost - balance = newbalance + new_balance = self.nodes[0].getbalance() + assert(balance - new_balance < Decimal("0.01")) #no more than fees lost + balance = new_balance # Disconnect nodes so node0's transactions don't get into node1's mempool disconnect_nodes(self.nodes[0], 1) @@ -42,14 +45,10 @@ def run_test(self): nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10")) nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10")) - inputs =[] + inputs = [{"txid": txA, "vout": nA}, {"txid": txB, "vout": nB}] # spend 10btc outputs from txA and txB - inputs.append({"txid":txA, "vout":nA}) - inputs.append({"txid":txB, "vout":nB}) - outputs = {} + outputs = {self.nodes[0].getnewaddress(): Decimal("14.99998"), self.nodes[1].getnewaddress(): Decimal("5")} - outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998") - outputs[self.nodes[1].getnewaddress()] = Decimal("5") signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) txAB1 = self.nodes[0].sendrawtransaction(signed["hex"]) @@ -57,18 +56,15 @@ def run_test(self): nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998")) #Create a child tx spending AB1 and C - inputs = [] - inputs.append({"txid":txAB1, "vout":nAB}) - inputs.append({"txid":txC, "vout":nC}) - outputs = {} - outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996") + inputs = [{"txid": txAB1, "vout": nAB}, {"txid": txC, "vout": nC}] + outputs = {self.nodes[0].getnewaddress(): Decimal("24.9996")} signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs)) txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"]) # In mempool txs from self should increase balance from change - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996")) - balance = newbalance + new_balance = self.nodes[0].getbalance() + assert_equal(new_balance, balance - Decimal("30") + Decimal("24.9996")) + balance = new_balance # Restart the node with a higher min relay fee so the parent tx is no longer in mempool # TODO: redo with eviction @@ -81,57 +77,55 @@ def run_test(self): # Not in mempool txs from self should only reduce balance # inputs are still spent, but change not received - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("24.9996")) + new_balance = self.nodes[0].getbalance() + assert_equal(new_balance, balance - Decimal("24.9996")) # Unconfirmed received funds that are not in mempool, also shouldn't show # up in unconfirmed balance - unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance() - assert_equal(unconfbalance, newbalance) + unconf_balance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance() + assert_equal(unconf_balance, new_balance) # Also shouldn't show up in listunspent assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]) - balance = newbalance + balance = new_balance # Abandon original transaction and verify inputs are available again # including that the child tx was also abandoned self.nodes[0].abandontransaction(txAB1) - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance + Decimal("30")) - balance = newbalance + new_balance = self.nodes[0].getbalance() + assert_equal(new_balance, balance + Decimal("30")) + balance = new_balance - # Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned + # Verify that even with a low min relay fee, the tx is not re-accepted from wallet on startup once abandoned self.stop_node(0) self.start_node(0, extra_args=["-minrelaytxfee=0.00001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(self.nodes[0].getbalance(), balance) - # But if its received again then it is unabandoned + # But if its received again then it is un-abandoned # And since now in mempool, the change is available # But its child tx remains abandoned self.nodes[0].sendrawtransaction(signed["hex"]) - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998")) - balance = newbalance + new_balance = self.nodes[0].getbalance() + assert_equal(new_balance, balance - Decimal("20") + Decimal("14.99998")) + balance = new_balance - # Send child tx again so its unabandoned + # Send child tx again so its un-abandoned self.nodes[0].sendrawtransaction(signed2["hex"]) - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) - balance = newbalance + new_balance = self.nodes[0].getbalance() + assert_equal(new_balance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) + balance = new_balance # Remove using high relay fee again self.stop_node(0) self.start_node(0, extra_args=["-minrelaytxfee=0.0001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance - Decimal("24.9996")) - balance = newbalance + new_balance = self.nodes[0].getbalance() + assert_equal(new_balance, balance - Decimal("24.9996")) + balance = new_balance # Create a double spend of AB1 by spending again from only A's 10 output # Mine double spend from node 1 - inputs =[] - inputs.append({"txid":txA, "vout":nA}) - outputs = {} - outputs[self.nodes[1].getnewaddress()] = Decimal("9.998") + inputs = [{"txid": txA, "vout": nA}] + outputs = {self.nodes[1].getnewaddress(): Decimal("9.998")} tx = self.nodes[0].createrawtransaction(inputs, outputs) signed = self.nodes[0].signrawtransaction(tx) self.nodes[1].sendrawtransaction(signed["hex"]) @@ -141,19 +135,19 @@ def run_test(self): sync_blocks(self.nodes) # Verify that B and C's 10 RVN outputs are available for spending again because AB1 is now conflicted - newbalance = self.nodes[0].getbalance() - assert_equal(newbalance, balance + Decimal("20")) - balance = newbalance + new_balance = self.nodes[0].getbalance() + assert_equal(new_balance, balance + Decimal("20")) + balance = new_balance # There is currently a minor bug around this and so this test doesn't work. See Issue #7315 # Invalidate the block with the double spend and B's 10 RVN output should no longer be available # Don't think C's should either self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) - newbalance = self.nodes[0].getbalance() - #assert_equal(newbalance, balance - Decimal("10")) + new_balance = self.nodes[0].getbalance() + #assert_equal(new_balance, balance - Decimal("10")) self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer") self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315") - self.log.info(str(balance) + " -> " + str(newbalance) + " ?") + self.log.info(str(balance) + " -> " + str(new_balance) + " ?") if __name__ == '__main__': AbandonConflictTest().main() diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 441f51a770..98eb8aacf9 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the wallet backup features. + +""" +Test the wallet backup features. Test case is: 4 nodes. 1 2 and 3 send transactions between each other, @@ -31,11 +33,11 @@ Shutdown again, restore using importwallet, and confirm again balances are correct. """ + from random import randint import shutil - from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes, Decimal, sync_mempools, sync_blocks, os, assert_equal) +from test_framework.util import connect_nodes, Decimal, sync_mempools, sync_blocks, os, assert_equal class WalletBackupTest(RavenTestFramework): def set_test_params(self): @@ -53,7 +55,7 @@ def setup_network(self, split=False): self.sync_all() def one_send(self, from_node, to_address): - if (randint(1,2) == 1): + if randint(1, 2) == 1: amount = Decimal(randint(1,10)) / Decimal(10) self.nodes[from_node].sendtoaddress(to_address, amount) diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index 71844ee318..ad2dbab5b6 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -3,18 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the wallet.""" + from test_framework.test_framework import RavenTestFramework -from test_framework.util import (connect_nodes_bi, - assert_fee_amount, - assert_equal, - assert_raises_rpc_error, - Decimal, - count_bytes, - sync_mempools, - sync_blocks, - time, - assert_array_result) +from test_framework.util import connect_nodes_bi, assert_fee_amount, assert_equal, assert_raises_rpc_error, Decimal, count_bytes, sync_mempools, sync_blocks, time, assert_array_result class WalletTest(RavenTestFramework): def set_test_params(self): @@ -48,9 +41,9 @@ def run_test(self): self.nodes[0].generate(1) - walletinfo = self.nodes[0].getwalletinfo() - assert_equal(walletinfo['immature_balance'], 5000) - assert_equal(walletinfo['balance'], 0) + wallet_info = self.nodes[0].getwalletinfo() + assert_equal(wallet_info['immature_balance'], 5000) + assert_equal(wallet_info['balance'], 0) self.sync_all([self.nodes[0:3]]) self.nodes[1].generate(101) @@ -101,8 +94,8 @@ def run_test(self): # but 10 will go to node2 and the rest will go to node0 balance = self.nodes[0].getbalance() assert_equal({txout1['value'], txout2['value']}, {10, balance}) - walletinfo = self.nodes[0].getwalletinfo() - assert_equal(walletinfo['immature_balance'], 0) + wallet_info = self.nodes[0].getwalletinfo() + assert_equal(wallet_info['immature_balance'], 0) # Have node0 mine a block, thus it will collect its own fee. self.nodes[0].generate(1) @@ -221,7 +214,7 @@ def run_test(self): raw_tx = self.nodes[1].createrawtransaction(inputs, outputs) raw_tx = raw_tx.replace("c04fbbde19", "0000000000") #replace 1111.11 with 0.0 (int32) - dec_raw_tx = self.nodes[1].decoderawtransaction(raw_tx) + self.nodes[1].decoderawtransaction(raw_tx) signed_raw_tx = self.nodes[1].signrawtransaction(raw_tx) dec_raw_tx = self.nodes[1].decoderawtransaction(signed_raw_tx['hex']) zero_value_txid = dec_raw_tx['txid'] @@ -280,7 +273,7 @@ def run_test(self): sync_blocks(self.nodes[0:3]) node_2_bal += 2 - #tx should be added to balance because after restarting the nodes tx should be broadcastet + #tx should be added to balance because after restarting the nodes tx should be broadcasted assert_equal(self.nodes[2].getbalance(), node_2_bal) #send a tx with value in a string (PR#6380 +) @@ -344,7 +337,7 @@ def run_test(self): blocks = self.nodes[0].generate(2) self.sync_all([self.nodes[0:3]]) balance_nodes = [self.nodes[i].getbalance() for i in range(3)] - block_count = self.nodes[0].getblockcount() + self.nodes[0].getblockcount() # Check modes: # - True: unicode escaped as \u.... @@ -381,11 +374,11 @@ def run_test(self): # Get all non-zero utxos together chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()] - singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True) + single_tx_id = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True) self.nodes[0].generate(1) node0_balance = self.nodes[0].getbalance() # Split into two chains - rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')}) + rawtx = self.nodes[0].createrawtransaction([{"txid":single_tx_id, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')}) signedtx = self.nodes[0].signrawtransaction(rawtx) self.nodes[0].sendrawtransaction(signedtx["hex"]) self.nodes[0].generate(1) diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py index cf9af01b6c..6cdaa3db16 100755 --- a/test/functional/wallet_bumpfee.py +++ b/test/functional/wallet_bumpfee.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the bumpfee RPC. + +""" +Test the bumpfee RPC. Verifies that the bumpfee RPC creates replacement transactions successfully when its preconditions are met, and returns appropriate errors in other cases. @@ -15,20 +17,12 @@ make assumptions about execution order. """ +import io from feature_segwit import send_to_witness from test_framework.test_framework import RavenTestFramework -from test_framework import blocktools +from test_framework.blocktools import create_block, create_coinbase from test_framework.mininode import CTransaction -from test_framework.util import (connect_nodes_bi, - assert_equal, - Decimal, - sync_mempools, - assert_raises_rpc_error, - assert_greater_than, - bytes_to_hex_str, - hex_str_to_bytes) - -import io +from test_framework.util import connect_nodes_bi, assert_equal, Decimal, sync_mempools, assert_raises_rpc_error, assert_greater_than, bytes_to_hex_str, hex_str_to_bytes # Sequence number that is BIP 125 opt-in and BIP 68-compliant BIP125_SEQUENCE_NUMBER = 0xfffffffd @@ -72,8 +66,8 @@ def run_test(self): dest_address = peer_node.getnewaddress() test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address) test_segwit_bumpfee_succeeds(rbf_node, dest_address) - test_nonrbf_bumpfee_fails(peer_node, dest_address) - test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address) + test_non_rbf_bumpfee_fails(peer_node, dest_address) + test_not_mine_bumpfee_fails(rbf_node, peer_node, dest_address) test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address) test_small_output_fails(rbf_node, dest_address) test_dust_to_fee(rbf_node, dest_address) @@ -87,25 +81,25 @@ def run_test(self): def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address): - rbfid = spend_one_input(rbf_node, dest_address) - rbftx = rbf_node.gettransaction(rbfid) + rbf_id = spend_one_input(rbf_node, dest_address) + rbf_tx = rbf_node.gettransaction(rbf_id) sync_mempools((rbf_node, peer_node)) - assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool() - bumped_tx = rbf_node.bumpfee(rbfid) + assert rbf_id in rbf_node.getrawmempool() and rbf_id in peer_node.getrawmempool() + bumped_tx = rbf_node.bumpfee(rbf_id) assert_equal(bumped_tx["errors"], []) - assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0 + assert bumped_tx["fee"] - abs(rbf_tx["fee"]) > 0 # check that bumped_tx propagates, original tx was evicted and has a wallet conflict sync_mempools((rbf_node, peer_node)) assert bumped_tx["txid"] in rbf_node.getrawmempool() assert bumped_tx["txid"] in peer_node.getrawmempool() - assert rbfid not in rbf_node.getrawmempool() - assert rbfid not in peer_node.getrawmempool() - oldwtx = rbf_node.gettransaction(rbfid) - assert len(oldwtx["walletconflicts"]) > 0 + assert rbf_id not in rbf_node.getrawmempool() + assert rbf_id not in peer_node.getrawmempool() + old_w_tx = rbf_node.gettransaction(rbf_id) + assert len(old_w_tx["walletconflicts"]) > 0 # check wallet transaction replaces and replaced_by values - bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"]) - assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"]) - assert_equal(bumpedwtx["replaces_txid"], rbfid) + bumped_w_tx = rbf_node.gettransaction(bumped_tx["txid"]) + assert_equal(old_w_tx["replaced_by_txid"], bumped_tx["txid"]) + assert_equal(bumped_w_tx["replaces_txid"], rbf_id) def test_segwit_bumpfee_succeeds(rbf_node, dest_address): @@ -115,7 +109,7 @@ def test_segwit_bumpfee_succeeds(rbf_node, dest_address): segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001")) segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress()) rbf_node.addwitnessaddress(segwit_out["address"]) - segwitid = send_to_witness( + segwit_id = send_to_witness( use_p2wsh=False, node=rbf_node, utxo=segwit_in, @@ -124,30 +118,30 @@ def test_segwit_bumpfee_succeeds(rbf_node, dest_address): amount=Decimal("0.0009"), sign=True) - rbfraw = rbf_node.createrawtransaction([{ - 'txid': segwitid, + rbf_raw = rbf_node.createrawtransaction([{ + 'txid': segwit_id, 'vout': 0, "sequence": BIP125_SEQUENCE_NUMBER }], {dest_address: Decimal("0.0005"), rbf_node.getrawchangeaddress(): Decimal("0.0003")}) - rbfsigned = rbf_node.signrawtransaction(rbfraw) - rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"]) - assert rbfid in rbf_node.getrawmempool() + rbf_signed = rbf_node.signrawtransaction(rbf_raw) + rbf_id = rbf_node.sendrawtransaction(rbf_signed["hex"]) + assert rbf_id in rbf_node.getrawmempool() - bumped_tx = rbf_node.bumpfee(rbfid) + bumped_tx = rbf_node.bumpfee(rbf_id) assert bumped_tx["txid"] in rbf_node.getrawmempool() - assert rbfid not in rbf_node.getrawmempool() + assert rbf_id not in rbf_node.getrawmempool() -def test_nonrbf_bumpfee_fails(peer_node, dest_address): +def test_non_rbf_bumpfee_fails(peer_node, dest_address): # cannot replace a non RBF transaction (from node which did not enable RBF) - not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000")) - assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid) + not_rbf_id = peer_node.sendtoaddress(dest_address, Decimal("0.00090000")) + assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbf_id) -def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address): +def test_not_mine_bumpfee_fails(rbf_node, peer_node, dest_address): # cannot bump fee unless the tx has only inputs that we own. - # here, the rbftx has a peer_node coin and then adds a rbf_node input + # here, the rbf_tx has a peer_node coin and then adds a rbf_node input # Note that this test depends upon the RPC code checking input ownership prior to change outputs # (since it can't use fundrawtransaction, it lacks a proper change output) utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)] @@ -161,9 +155,9 @@ def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address): rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val}) signedtx = rbf_node.signrawtransaction(rawtx) signedtx = peer_node.signrawtransaction(signedtx["hex"]) - rbfid = rbf_node.sendrawtransaction(signedtx["hex"]) + rbf_id = rbf_node.sendrawtransaction(signedtx["hex"]) assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet", - rbf_node.bumpfee, rbfid) + rbf_node.bumpfee, rbf_id) def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address): @@ -178,31 +172,31 @@ def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address) def test_small_output_fails(rbf_node, dest_address): # cannot bump fee with a too-small output - rbfid = spend_one_input(rbf_node, dest_address) - rbf_node.bumpfee(rbfid, {"totalFee": 50000}) + rbf_id = spend_one_input(rbf_node, dest_address) + rbf_node.bumpfee(rbf_id, {"totalFee": 50000}) - rbfid = spend_one_input(rbf_node, dest_address) - assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001}) + rbf_id = spend_one_input(rbf_node, dest_address) + assert_raises_rpc_error(-4, "Change output is too small", rbf_node.bumpfee, rbf_id, {"totalFee": 50001}) def test_dust_to_fee(rbf_node, dest_address): # check that if output is reduced to dust, it will be converted to fee # the bumped tx sets fee=49,900, but it converts to 50,000 - rbfid = spend_one_input(rbf_node, dest_address) - fulltx = rbf_node.getrawtransaction(rbfid, 1) - bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900}) + rbf_id = spend_one_input(rbf_node, dest_address) + full_tx = rbf_node.getrawtransaction(rbf_id, 1) + bumped_tx = rbf_node.bumpfee(rbf_id, {"totalFee": 49900}) full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1) assert_equal(bumped_tx["fee"], Decimal("0.00050000")) - assert_equal(len(fulltx["vout"]), 2) + assert_equal(len(full_tx["vout"]), 2) assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated def test_settxfee(rbf_node, dest_address): # check that bumpfee reacts correctly to the use of settxfee (paytxfee) - rbfid = spend_one_input(rbf_node, dest_address) + rbf_id = spend_one_input(rbf_node, dest_address) requested_feerate = Decimal("0.00025000") rbf_node.settxfee(requested_feerate) - bumped_tx = rbf_node.bumpfee(rbfid) + bumped_tx = rbf_node.bumpfee(rbf_id) actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["size"] # Assert that the difference between the requested feerate and the actual # feerate of the bumped transaction is small. @@ -212,69 +206,69 @@ def test_settxfee(rbf_node, dest_address): def test_rebumping(rbf_node, dest_address): # check that re-bumping the original tx fails, but bumping the bumper succeeds - rbfid = spend_one_input(rbf_node, dest_address) - bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000}) - assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000}) + rbf_id = spend_one_input(rbf_node, dest_address) + bumped = rbf_node.bumpfee(rbf_id, {"totalFee": 2000}) + assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbf_id, {"totalFee": 3000}) rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000}) def test_rebumping_not_replaceable(rbf_node, dest_address): # check that re-bumping a non-replaceable bump tx fails - rbfid = spend_one_input(rbf_node, dest_address) - bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False}) + rbf_id = spend_one_input(rbf_node, dest_address) + bumped = rbf_node.bumpfee(rbf_id, {"totalFee": 10000, "replaceable": False}) assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"], {"totalFee": 20000}) def test_unconfirmed_not_spendable(rbf_node, rbf_node_address): # check that unconfirmed outputs from bumped transactions are not spendable - rbfid = spend_one_input(rbf_node, rbf_node_address) - rbftx = rbf_node.gettransaction(rbfid)["hex"] - assert rbfid in rbf_node.getrawmempool() - bumpid = rbf_node.bumpfee(rbfid)["txid"] - assert bumpid in rbf_node.getrawmempool() - assert rbfid not in rbf_node.getrawmempool() + rbf_id = spend_one_input(rbf_node, rbf_node_address) + rbf_tx = rbf_node.gettransaction(rbf_id)["hex"] + assert rbf_id in rbf_node.getrawmempool() + bump_id = rbf_node.bumpfee(rbf_id)["txid"] + assert bump_id in rbf_node.getrawmempool() + assert rbf_id not in rbf_node.getrawmempool() # check that outputs from the bump transaction are not spendable # due to the replaces_txid check in CWallet::AvailableCoins - assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], []) + assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bump_id], []) # submit a block with the rbf tx to clear the bump tx out of the mempool, # then call abandon to make sure the wallet doesn't attempt to resubmit the # bump tx, then invalidate the block so the rbf tx will be put back in the # mempool. this makes it possible to check whether the rbf tx outputs are # spendable before the rbf tx is confirmed. - block = submit_block_with_tx(rbf_node, rbftx) - rbf_node.abandontransaction(bumpid) + block = submit_block_with_tx(rbf_node, rbf_tx) + rbf_node.abandontransaction(bump_id) rbf_node.invalidateblock(block.hash) - assert bumpid not in rbf_node.getrawmempool() - assert rbfid in rbf_node.getrawmempool() + assert bump_id not in rbf_node.getrawmempool() + assert rbf_id in rbf_node.getrawmempool() # check that outputs from the rbf tx are not spendable before the # transaction is confirmed, due to the replaced_by_txid check in # CWallet::AvailableCoins - assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], []) + assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbf_id], []) # check that the main output from the rbf tx is spendable after confirmed rbf_node.generate(1) assert_equal( sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False) - if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1) + if t["txid"] == rbf_id and t["address"] == rbf_node_address and t["spendable"]), 1) def test_bumpfee_metadata(rbf_node, dest_address): - rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value") - bumped_tx = rbf_node.bumpfee(rbfid) + rbf_id = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value") + bumped_tx = rbf_node.bumpfee(rbf_id) bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"]) assert_equal(bumped_wtx["comment"], "comment value") assert_equal(bumped_wtx["to"], "to value") def test_locked_wallet_fails(rbf_node, dest_address): - rbfid = spend_one_input(rbf_node, dest_address) + rbf_id = spend_one_input(rbf_node, dest_address) rbf_node.walletlock() assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.", - rbf_node.bumpfee, rbfid) + rbf_node.bumpfee, rbf_id) def spend_one_input(node, dest_address): @@ -295,7 +289,7 @@ def submit_block_with_tx(node, tx): tip = node.getbestblockhash() height = node.getblockcount() + 1 block_time = node.getblockheader(tip)["mediantime"] + 1 - block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time) + block = create_block(int(tip, 16), create_coinbase(height), block_time) block.vtx.append(ctx) block.rehash() block.hashMerkleRoot = block.calc_merkle_root() diff --git a/test/functional/wallet_coinbase_category.py b/test/functional/wallet_coinbase_category.py new file mode 100755 index 0000000000..57ee9fef59 --- /dev/null +++ b/test/functional/wallet_coinbase_category.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2018 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" +Test coinbase transactions return the correct categories. +Tests listtransactions, listsinceblock, and gettransaction. +""" + +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_array_result + +class CoinbaseCategoryTest(RavenTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def assert_category(self, category, address, txid, skip): + assert_array_result(self.nodes[0].listtransactions(skip=skip), + {"address": address}, + {"category": category}) + assert_array_result(self.nodes[0].listsinceblock()["transactions"], + {"address": address}, + {"category": category}) + assert_array_result(self.nodes[0].gettransaction(txid)["details"], + {"address": address}, + {"category": category}) + + def run_test(self): + # Generate one block to an address + address = self.nodes[0].getnewaddress() + self.nodes[0].generatetoaddress(1, address) + hash_data = self.nodes[0].getbestblockhash() + txid = self.nodes[0].getblock(hash_data)["tx"][0] + + # Coinbase transaction is immature after 1 confirmation + self.assert_category("immature", address, txid, 0) + + # Mine another 99 blocks on top + self.nodes[0].generate(99) + # Coinbase transaction is still immature after 100 confirmations + self.assert_category("immature", address, txid, 99) + + # Mine one more block + self.nodes[0].generate(1) + # Coinbase transaction is now matured, so category is "generate" + self.assert_category("generate", address, txid, 100) + + # Orphan block that paid to address + self.nodes[0].invalidateblock(hash_data) + # Coinbase transaction is now orphaned + self.assert_category("orphan", address, txid, 100) + +if __name__ == '__main__': + CoinbaseCategoryTest().main() diff --git a/test/functional/wallet_create_tx.py b/test/functional/wallet_create_tx.py new file mode 100755 index 0000000000..cf4a99da7a --- /dev/null +++ b/test/functional/wallet_create_tx.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018-2019 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import time +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, assert_raises_rpc_error +from test_framework.blocktools import REGTEST_GENISIS_BLOCK_TIME + +class CreateTxWalletTest(RavenTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + + def run_test(self): + self.log.info('Create some old blocks') + self.nodes[0].setmocktime(REGTEST_GENISIS_BLOCK_TIME) + self.nodes[0].generate(200) + self.nodes[0].setmocktime(0) + + self.test_anti_fee_sniping() + self.test_tx_size_too_large() + + def test_anti_fee_sniping(self): + self.log.info('Check that we have some (old) blocks and that anti-fee-sniping is disabled') + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200) + tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1))['hex']) + attempts = 0 + while tx['locktime'] < 200: + # for some reason sometimes we don't get the correct locktime on first attempt, retry... + tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1))['hex']) + if attempts == 20: + self.log.debug("Exceeded ~10 seconds waiting for tx locktime == 200.") + break + time.sleep(0.5) + attempts += 1 + assert_equal(tx['locktime'], 200) + + self.log.info('Check that anti-fee-sniping is enabled when we mine a recent block') + self.nodes[0].generate(1) + tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1))['hex']) + assert 0 < tx['locktime'] <= 201 + + def test_tx_size_too_large(self): + # More than 10kB of outputs, so that we hit -maxtxfee with a high feerate + outputs = {self.nodes[0].getnewaddress(): 0.000025 for _ in range(400)} + raw_tx = self.nodes[0].createrawtransaction(inputs=[], outputs=outputs) + + self.log.info('Check maxtxfee in combination with -minrelaytxfee=0.01, -maxtxfee=0.1') + self.restart_node(0, extra_args=['-minrelaytxfee=0.01', '-maxtxfee=0.1']) + assert_raises_rpc_error(-6, "Transaction too large for fee policy", lambda: self.nodes[0].sendmany(fromaccount="", amounts=outputs),) + assert_raises_rpc_error(-4, "Transaction too large for fee policy", lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),) + + self.log.info('Check maxtxfee in combination with -mintxfee=0.01, -maxtxfee=0.1') + self.restart_node(0, extra_args=['-mintxfee=0.01', '-maxtxfee=0.1']) + assert_raises_rpc_error(-6, "Transaction too large for fee policy", lambda: self.nodes[0].sendmany(fromaccount="", amounts=outputs),) + assert_raises_rpc_error(-4, "Transaction too large for fee policy", lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),) + + self.log.info('Check maxtxfee in combination with -paytxfee=0.01, -maxtxfee=0.1') + self.restart_node(0, extra_args=['-paytxfee=0.01', '-maxtxfee=0.1']) + assert_raises_rpc_error(-6, "Transaction too large for fee policy", lambda: self.nodes[0].sendmany(fromaccount="", amounts=outputs),) + assert_raises_rpc_error(-4, "Transaction too large for fee policy", lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),) + + + self.log.info('Check maxtxfee in combination with settxfee') + self.restart_node(0, extra_args=['-maxtxfee=0.1']) + self.nodes[0].settxfee(0.01) + assert_raises_rpc_error(-6, "Transaction too large for fee policy", lambda: self.nodes[0].sendmany(fromaccount="", amounts=outputs),) + assert_raises_rpc_error(-4, "Transaction too large for fee policy", lambda: self.nodes[0].fundrawtransaction(hexstring=raw_tx),) + self.nodes[0].settxfee(0) + + +if __name__ == '__main__': + CreateTxWalletTest().main() diff --git a/test/functional/wallet_disable.py b/test/functional/wallet_disable.py index 249259d101..a31b306f58 100755 --- a/test/functional/wallet_disable.py +++ b/test/functional/wallet_disable.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test a node with the -disablewallet option. + +""" +Test a node with the -disablewallet option. - Test that validateaddress RPC works when running with -disablewallet - Test that it is not possible to mine to an invalid address. diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py index d44cb44910..b53f29a176 100755 --- a/test/functional/wallet_dump.py +++ b/test/functional/wallet_dump.py @@ -3,13 +3,12 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the dumpwallet RPC.""" import os - from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error) - +from test_framework.util import assert_equal, assert_raises_rpc_error def read_dump(file_name, addrs, hd_master_addr_old): """ @@ -86,8 +85,7 @@ def run_test (self): result = self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump") assert_equal(result['filename'], os.path.abspath(tmpdir + "/node0/wallet.unencrypted.dump")) - found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \ - read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None) + found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None) assert_equal(found_addr, test_addr_count) # all keys must be in the dump assert_equal(found_addr_chg, 50) # 50 blocks where mined assert_equal(found_addr_rsv, 90*2) # 90 keys plus 100% internal keys @@ -100,8 +98,7 @@ def run_test (self): self.nodes[0].keypoolrefill() self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump") - found_addr, found_addr_chg, found_addr_rsv, _ = \ - read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc) + found_addr, found_addr_chg, found_addr_rsv, _ = read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc) assert_equal(found_addr, test_addr_count) assert_equal(found_addr_chg, 90*2 + 50) # old reserve keys are marked as change now assert_equal(found_addr_rsv, 90*2) diff --git a/test/functional/wallet_encryption.py b/test/functional/wallet_encryption.py index 45fd631995..1b54a0eba1 100755 --- a/test/functional/wallet_encryption.py +++ b/test/functional/wallet_encryption.py @@ -3,12 +3,12 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test Wallet encryption""" import time - from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error) +from test_framework.util import assert_equal, assert_raises_rpc_error class WalletEncryptionTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py new file mode 100755 index 0000000000..399abb80c6 --- /dev/null +++ b/test/functional/wallet_groups.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Test wallet group functionality.""" + +from test_framework.test_framework import RavenTestFramework +from test_framework.mininode import CTransaction, from_hex, to_hex +from test_framework.util import assert_approx, assert_equal + +class WalletGroupTest(RavenTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 3 + self.rpc_timeout = 120 + + + def run_test(self): + # Mine some coins + self.nodes[0].generate(110) + + # Get some addresses from the two nodes + addr1 = [self.nodes[1].getnewaddress() for _ in range(3)] + addr2 = [self.nodes[2].getnewaddress() for _ in range(3)] + addrs = addr1 + addr2 + + # Send 1 + 0.5 coin to each address + [self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs] + [self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs] + + self.nodes[0].generate(1) + self.sync_all() + + # For each node, send 0.2 coins back to 0; + # - node[1] should pick one 0.5 UTXO and leave the rest + # - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a + # given address, and leave the rest + txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) + tx1 = self.nodes[1].getrawtransaction(txid1, True) + # txid1 should have 1 input and 2 outputs + assert_equal(1, len(tx1["vin"])) + assert_equal(2, len(tx1["vout"])) + # one output should be 0.2, the other should be ~0.3 + v = [vout["value"] for vout in tx1["vout"]] + v.sort() + assert_approx(v[0], 0.2) + assert_approx(v[1], 0.3, 0.01) + + txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) + tx2 = self.nodes[2].getrawtransaction(txid2, True) + # txid2 should have 1 inputs and 2 outputs + assert_equal(1, len(tx2["vin"])) + assert_equal(2, len(tx2["vout"])) + # one output should be 0.2, the other should be ~0.3 + v = [vout["value"] for vout in tx2["vout"]] + v.sort() + assert_approx(v[0], 0.2) + assert_approx(v[1], 0.3, 0.1) + + # Empty out node2's wallet + self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True) + self.sync_all() + self.nodes[0].generate(1) + + # Fill node2's wallet with 10000 outputs corresponding to the same + # scriptPubKey + for i in range(5): + inputs = [{"txid":"0"*64, "vout":0}] + outputs = {addr2[0]: 0.05} + raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) + tx = from_hex(CTransaction(), raw_tx) + tx.vin = [] + tx.vout = [tx.vout[0]] * 2000 + funded_tx = self.nodes[0].fundrawtransaction(to_hex(tx)) + signed_tx = self.nodes[0].signrawtransaction(funded_tx['hex']) + self.nodes[0].sendrawtransaction(signed_tx['hex']) + self.nodes[0].generate(1) + + self.sync_all() + + # Check that we can create a transaction that only requires ~100 of our + # utxos, without pulling in all outputs and creating a transaction that + # is way too big. + assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5) + +if __name__ == '__main__': + WalletGroupTest().main () diff --git a/test/functional/wallet_hd.py b/test/functional/wallet_hd.py index 9a6fc15e09..e362678fff 100755 --- a/test/functional/wallet_hd.py +++ b/test/functional/wallet_hd.py @@ -3,12 +3,13 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test Hierarchical Deterministic wallet function.""" -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, connect_nodes_bi) import shutil import os +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, connect_nodes_bi class WalletHDTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py index a67d8f077f..93e0c9eea0 100755 --- a/test/functional/wallet_import_rescan.py +++ b/test/functional/wallet_import_rescan.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test wallet import RPCs. + +""" +Test wallet import RPCs. Test rescan behavior of importaddress, importpubkey, importprivkey, and importmulti RPCs with different types of keys and rescan options. @@ -20,18 +22,21 @@ happened previously. """ -from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times) - import collections import enum import itertools +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times +# noinspection PyArgumentList Call = enum.Enum("Call", "single multi") +# noinspection PyArgumentList Data = enum.Enum("Data", "address pub priv") +# noinspection PyArgumentList Rescan = enum.Enum("Rescan", "no yes late_timestamp") +# noinspection PyUnboundLocalVariable,PyUnresolvedReferences class Variant(collections.namedtuple("Variant", "call data rescan prune")): """Helper for importing one key and verifying scanned transactions.""" diff --git a/test/functional/wallet_import_with_label.py b/test/functional/wallet_import_with_label.py new file mode 100755 index 0000000000..29c3a21389 --- /dev/null +++ b/test/functional/wallet_import_with_label.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# Copyright (c) 2018 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" +Test the behavior of RPC importprivkey on set and unset labels of +addresses. +It tests different cases in which an address is imported with importaddress +with or without a label and then its private key is imported with importprivkey +with and without a label. +""" + +from test_framework.test_framework import RavenTestFramework +from test_framework.wallet_util import test_address + +class ImportWithLabel(RavenTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + + def run_test(self): + """Main test logic""" + + self.log.info("Test importaddress with label and importprivkey without label.") + self.log.info("Import a watch-only address with a label.") + address = self.nodes[0].getnewaddress() + label = "Test Label" + self.nodes[1].importaddress(address, label) + test_address(self.nodes[1], address, iswatchonly=True, ismine=False, account=label) + + self.log.info("Import the watch-only address's private key without a label and...") + self.log.info("the address should keep its label.") + priv_key = self.nodes[0].dumpprivkey(address) + self.nodes[1].importprivkey(priv_key) + #test_address(self.nodes[1], address, account=label) + + self.log.info("Test importaddress without label and importprivkey with label.") + self.log.info("Import a watch-only address without a label.") + address2 = self.nodes[0].getnewaddress() + self.nodes[1].importaddress(address2) + test_address(self.nodes[1], address2, iswatchonly=True, ismine=False, account="") + + self.log.info( "Import the watch-only address's private key with a label and...") + self.log.info("the address should have its label updated.") + priv_key2 = self.nodes[0].dumpprivkey(address2) + label2 = "Test Label 2" + self.nodes[1].importprivkey(priv_key2, label2) + test_address(self.nodes[1], address2, account=label2) + + self.log.info("Test importaddress with label and importprivkey with label.") + self.log.info("Import a watch-only address with a label.") + address3 = self.nodes[0].getnewaddress() + label3_addr = "Test Label 3 for importaddress" + self.nodes[1].importaddress(address3, label3_addr) + test_address(self.nodes[1], address3, iswatchonly=True, ismine=False, account=label3_addr) + + self.log.info("Import the watch-only address's private key with a label and...") + self.log.info("the address should have its label updated.") + priv_key3 = self.nodes[0].dumpprivkey(address3) + label3_priv = "Test Label 3 for importprivkey" + self.nodes[1].importprivkey(priv_key3, label3_priv) + test_address(self.nodes[1], address3, account=label3_priv) + + self.log.info("Test importprivkey won't label new dests with the same label...") + self.log.info("as others labeled dests for the same key.") + self.log.info("Import a watch-only address with a label.") + address4 = self.nodes[0].getnewaddress("") + label4_addr = "Test Label 4 for importaddress" + self.nodes[1].importaddress(address4, label4_addr) + test_address(self.nodes[1], address4, iswatchonly=True, ismine=False, account=label4_addr) + + self.log.info("Import the watch-only address's private key without a label and...") + self.log.info("New destinations for the key should have an empty label.") + self.log.info("New destinations for the key should have an empty label.") + priv_key4 = self.nodes[0].dumpprivkey(address4) + self.nodes[1].importprivkey(priv_key4) + embedded_addr = self.nodes[1].validateaddress(address4)['address'] + test_address(self.nodes[1], embedded_addr, account="") + + self.stop_nodes() + + +if __name__ == "__main__": + ImportWithLabel().main() diff --git a/test/functional/wallet_importmulti.py b/test/functional/wallet_importmulti.py index 1bdb6aeeb9..667e6a76c9 100755 --- a/test/functional/wallet_importmulti.py +++ b/test/functional/wallet_importmulti.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the importmulti RPC.""" + from test_framework.test_framework import RavenTestFramework from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error @@ -246,9 +248,9 @@ def run_test (self): assert_equal(address_assert['isscript'], True) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['timestamp'], timestamp) - p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] - assert_equal(p2shunspent['spendable'], False) - assert_equal(p2shunspent['solvable'], False) + p2sh_unspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] + assert_equal(p2sh_unspent['spendable'], False) + assert_equal(p2sh_unspent['solvable'], False) # P2SH + Redeem script @@ -257,7 +259,7 @@ def run_test (self): sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']]) self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] @@ -273,9 +275,9 @@ def run_test (self): address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) assert_equal(address_assert['timestamp'], timestamp) - p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] - assert_equal(p2shunspent['spendable'], False) - assert_equal(p2shunspent['solvable'], True) + p2sh_unspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] + assert_equal(p2sh_unspent['spendable'], False) + assert_equal(p2sh_unspent['solvable'], True) # P2SH + Redeem script + Private Keys + !Watchonly @@ -284,7 +286,7 @@ def run_test (self): sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']]) self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] @@ -301,9 +303,9 @@ def run_test (self): address_assert = self.nodes[1].validateaddress(multi_sig_script['address']) assert_equal(address_assert['timestamp'], timestamp) - p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] - assert_equal(p2shunspent['spendable'], False) - assert_equal(p2shunspent['solvable'], True) + p2sh_unspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0] + assert_equal(p2sh_unspent['spendable'], False) + assert_equal(p2sh_unspent['solvable'], True) # P2SH + Redeem script + Private Keys + Watchonly sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) @@ -311,7 +313,7 @@ def run_test (self): sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['address'], sig_address_2['address'], sig_address_3['pubkey']]) self.nodes[1].generate(100) - transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) + self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime'] diff --git a/test/functional/wallet_importprunedfunds.py b/test/functional/wallet_importprunedfunds.py index fc5cb23289..933395b7d6 100755 --- a/test/functional/wallet_importprunedfunds.py +++ b/test/functional/wallet_importprunedfunds.py @@ -3,9 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the importprunedfunds and removeprunedfunds RPCs.""" + from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error, Decimal) +from test_framework.util import assert_equal, assert_raises_rpc_error, Decimal class ImportPrunedFundsTest(RavenTestFramework): def set_test_params(self): @@ -24,7 +26,8 @@ def run_test(self): address2 = self.nodes[0].getnewaddress() # privkey address3 = self.nodes[0].getnewaddress() - address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey + # Using privkey + address3_privkey = self.nodes[0].dumpprivkey(address3) #Check only one address address_info = self.nodes[0].validateaddress(address1) @@ -49,38 +52,38 @@ def run_test(self): assert_equal(address_info['ismine'], False) #Send funds to self - txnid1 = self.nodes[0].sendtoaddress(address1, 0.1) + txn_id1 = self.nodes[0].sendtoaddress(address1, 0.1) self.nodes[0].generate(1) - rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex'] - proof1 = self.nodes[0].gettxoutproof([txnid1]) + raw_txn1 = self.nodes[0].gettransaction(txn_id1)['hex'] + proof1 = self.nodes[0].gettxoutproof([txn_id1]) - txnid2 = self.nodes[0].sendtoaddress(address2, 0.05) + txn_id2 = self.nodes[0].sendtoaddress(address2, 0.05) self.nodes[0].generate(1) - rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex'] - proof2 = self.nodes[0].gettxoutproof([txnid2]) + raw_txn2 = self.nodes[0].gettransaction(txn_id2)['hex'] + proof2 = self.nodes[0].gettxoutproof([txn_id2]) - txnid3 = self.nodes[0].sendtoaddress(address3, 0.025) + txn_id3 = self.nodes[0].sendtoaddress(address3, 0.025) self.nodes[0].generate(1) - rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex'] - proof3 = self.nodes[0].gettxoutproof([txnid3]) + raw_txn3 = self.nodes[0].gettransaction(txn_id3)['hex'] + proof3 = self.nodes[0].gettxoutproof([txn_id3]) self.sync_all() #Import with no affiliated address - assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1) + assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, raw_txn1, proof1) balance1 = self.nodes[1].getbalance("", 0, True) assert_equal(balance1, Decimal(0)) #Import with affiliated address with no rescan self.nodes[1].importaddress(address2, "add2", False) - self.nodes[1].importprunedfunds(rawtxn2, proof2) + self.nodes[1].importprunedfunds(raw_txn2, proof2) balance2 = self.nodes[1].getbalance("add2", 0, True) assert_equal(balance2, Decimal('0.05')) #Import with private key with no rescan self.nodes[1].importprivkey(privkey=address3_privkey, label="add3", rescan=False) - self.nodes[1].importprunedfunds(rawtxn3, proof3) + self.nodes[1].importprunedfunds(raw_txn3, proof3) balance3 = self.nodes[1].getbalance("add3", 0, False) assert_equal(balance3, Decimal('0.025')) balance3 = self.nodes[1].getbalance("*", 0, True) @@ -98,16 +101,16 @@ def run_test(self): assert_equal(address_info['ismine'], True) #Remove transactions - assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1) + assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txn_id1) balance1 = self.nodes[1].getbalance("*", 0, True) assert_equal(balance1, Decimal('0.075')) - self.nodes[1].removeprunedfunds(txnid2) + self.nodes[1].removeprunedfunds(txn_id2) balance2 = self.nodes[1].getbalance("*", 0, True) assert_equal(balance2, Decimal('0.025')) - self.nodes[1].removeprunedfunds(txnid3) + self.nodes[1].removeprunedfunds(txn_id3) balance3 = self.nodes[1].getbalance("*", 0, True) assert_equal(balance3, Decimal('0.0')) diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py index fb768c9eef..e7ed86f9fe 100755 --- a/test/functional/wallet_keypool.py +++ b/test/functional/wallet_keypool.py @@ -3,10 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the wallet keypool and interaction with wallet encryption/locking.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error, time) +from test_framework.util import assert_equal, assert_raises_rpc_error, time class KeyPoolTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/wallet_keypool_topup.py b/test/functional/wallet_keypool_topup.py index 36846d40c2..88c5fb0edb 100755 --- a/test/functional/wallet_keypool_topup.py +++ b/test/functional/wallet_keypool_topup.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test HD Wallet keypool restore function. + +""" +Test HD Wallet keypool restore function. Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks. @@ -11,10 +13,10 @@ - Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110. - Stop node1, clear the datadir, move wallet file back into the datadir and restart node1. - connect node1 to node0. Verify that they sync and node1 receives its funds.""" -import shutil +import shutil from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, connect_nodes_bi, sync_blocks) +from test_framework.util import assert_equal, connect_nodes_bi, sync_blocks class KeypoolRestoreTest(RavenTestFramework): def set_test_params(self): @@ -36,6 +38,8 @@ def run_test(self): self.log.info("Generate keys for wallet") + addr_oldpool = [] + addr_extpool = [] for _ in range(90): addr_oldpool = self.nodes[1].getnewaddress() for _ in range(20): diff --git a/test/functional/wallet_accounts.py b/test/functional/wallet_labels.py similarity index 99% rename from test/functional/wallet_accounts.py rename to test/functional/wallet_labels.py index d5ce3ec926..64683b529c 100755 --- a/test/functional/wallet_accounts.py +++ b/test/functional/wallet_labels.py @@ -3,8 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test account RPCs. +""" +Test account RPCs. RPCs tested are: - getaccountaddress - getaddressesbyaccount diff --git a/test/functional/wallet_listreceivedby.py b/test/functional/wallet_listreceivedby.py index 1e4f5342bd..2d16bc9f99 100755 --- a/test/functional/wallet_listreceivedby.py +++ b/test/functional/wallet_listreceivedby.py @@ -3,16 +3,19 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the listreceivedbyaddress RPC.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_array_result, Decimal) +from test_framework.util import assert_array_result, Decimal def get_sub_array_from_array(object_array, to_match): - ''' - Finds and returns a sub array from an array of arrays. - to_match should be a unique idetifier of a sub array - ''' + """ + Finds and returns a sub array from an array of arrays. + :param object_array: Array to search + :param to_match: Unique identifier of a sub array + :return Array: array containing sub array + """ for item in object_array: all_match = True for key,value in to_match.items(): @@ -23,15 +26,17 @@ def get_sub_array_from_array(object_array, to_match): return item return [] + +# noinspection PyTypeChecker,PyTypeChecker,PyTypeChecker,PyTypeChecker class ReceivedByTest(RavenTestFramework): def set_test_params(self): self.num_nodes = 2 self.enable_mocktime() def run_test(self): - ''' + """ listreceivedbyaddress Test - ''' + """ # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) @@ -66,30 +71,30 @@ def run_test(self): ''' # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() - txid = self.nodes[0].sendtoaddress(addr, 0.1) + self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() - #Check balance is 0 because of 0 confirmations + # Check balance is 0 because of 0 confirmations balance = self.nodes[1].getreceivedbyaddress(addr) if balance != Decimal("0.0"): - raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance)) + raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f" % balance) - #Check balance is 0.1 + # Check balance is 0.1 balance = self.nodes[1].getreceivedbyaddress(addr,0) if balance != Decimal("0.1"): - raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance)) + raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f" % balance) - #Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress + # Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress self.nodes[1].generate(10) self.sync_all() balance = self.nodes[1].getreceivedbyaddress(addr) if balance != Decimal("0.1"): - raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance)) + raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f" % balance) ''' listreceivedbyaccount + getreceivedbyaccount Test ''' - #set pre-state + # set pre-state addrArr = self.nodes[1].getnewaddress() account = self.nodes[1].getaccount(addrArr) received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account}) @@ -97,7 +102,7 @@ def run_test(self): raise AssertionError("No accounts found in node") balance_by_account = self.nodes[1].getreceivedbyaccount(account) - txid = self.nodes[0].sendtoaddress(addr, 0.1) + self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # listreceivedbyaccount should return received_by_account_json because of 0 confirmations @@ -108,34 +113,34 @@ def run_test(self): # getreceivedbyaddress should return same balance because of 0 confirmations balance = self.nodes[1].getreceivedbyaccount(account) if balance != balance_by_account: - raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance)) + raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f" % balance) self.nodes[1].generate(10) self.sync_all() # listreceivedbyaccount should return updated account balance assert_array_result(self.nodes[1].listreceivedbyaccount(), - {"account":account}, - {"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))}) + {"account":account}, + {"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))}) # getreceivedbyaddress should return updates balance balance = self.nodes[1].getreceivedbyaccount(account) if balance != balance_by_account + Decimal("0.1"): - raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance)) + raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f" % balance) - #Create a new account named "mynewaccount" that has a 0 balance + # Create a new account named "mynewaccount" that has a 0 balance self.nodes[1].getaccountaddress("mynewaccount") received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"}) if len(received_by_account_json) == 0: raise AssertionError("No accounts found in node") - # Test includeempty of listreceivedbyaccount + # Test include empty of listreceivedbyaccount if received_by_account_json["amount"] != Decimal("0.0"): raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"])) # Test getreceivedbyaccount for 0 amount accounts balance = self.nodes[1].getreceivedbyaccount("mynewaccount") if balance != Decimal("0.0"): - raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance)) + raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f" % balance) if __name__ == '__main__': ReceivedByTest().main() diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py index 84f6634d25..d36eab030f 100755 --- a/test/functional/wallet_listsinceblock.py +++ b/test/functional/wallet_listsinceblock.py @@ -3,6 +3,7 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the listsincelast RPC.""" from test_framework.test_framework import RavenTestFramework @@ -23,7 +24,7 @@ def run_test(self): self.test_double_send() def test_reorg(self): - ''' + """ `listsinceblock` did not behave correctly when handed a block that was no longer in the main chain: @@ -49,7 +50,7 @@ def test_reorg(self): range bb1-bb4. This test only checks that [tx0] is present. - ''' + """ # Split network into two self.split_network() @@ -60,7 +61,7 @@ def test_reorg(self): # generate on both sides lastblockhash = self.nodes[1].generate(6)[5] self.nodes[2].generate(7) - self.log.debug('lastblockhash=%s' % (lastblockhash)) + self.log.debug('lastblockhash=%s' % lastblockhash) self.sync_all([self.nodes[:2], self.nodes[2:]]) @@ -76,7 +77,7 @@ def test_reorg(self): assert found def test_double_spend(self): - ''' + """ This tests the case where the same UTXO is spent twice on two separate blocks as part of a reorg. @@ -103,7 +104,7 @@ def test_double_spend(self): asked for in listsinceblock, and to iterate back over existing blocks up until the fork point, and to include all transactions that relate to the node wallet. - ''' + """ self.sync_all() @@ -159,7 +160,7 @@ def test_double_spend(self): assert 'removed' not in lsbres2 def test_double_send(self): - ''' + """ This tests the case where the same transaction is submitted twice on two separate blocks as part of a reorg. The former will vanish and the latter will appear as the true transaction (with confirmations dropping @@ -182,7 +183,7 @@ def test_double_send(self): present in a different block. 3. It is listed with a confirmations count of 2 (bb3, bb4), not 3 (aa1, aa2, aa3). - ''' + """ self.sync_all() diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py index 9465e62927..0c701c79c9 100755 --- a/test/functional/wallet_listtransactions.py +++ b/test/functional/wallet_listtransactions.py @@ -3,14 +3,14 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the listtransactions API.""" +from io import BytesIO from decimal import Decimal from test_framework.test_framework import RavenTestFramework -from test_framework.util import (hex_str_to_bytes, assert_array_result, sync_mempools, assert_equal, bytes_to_hex_str) -from test_framework.mininode import (CTransaction, COIN) -from io import BytesIO - +from test_framework.util import hex_str_to_bytes, assert_array_result, sync_mempools, assert_equal, bytes_to_hex_str +from test_framework.mininode import CTransaction, COIN def from_hex(hexstring): @@ -151,7 +151,7 @@ def get_unconfirmed_utxo_entry(node, txid_to_match): inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}] outputs = {self.nodes[1].getnewaddress(): 0.998} tx3 = self.nodes[0].createrawtransaction(inputs, outputs) - tx3_modified = txFromHex(tx3) + tx3_modified = from_hex(tx3) tx3_modified.vin[0].nSequence = 0 tx3 = bytes_to_hex_str(tx3_modified.serialize()) tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex'] diff --git a/test/functional/wallet_reorgsrestore.py b/test/functional/wallet_reorgsrestore.py new file mode 100755 index 0000000000..b953a741c6 --- /dev/null +++ b/test/functional/wallet_reorgsrestore.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +# Copyright (c) 2019 The Bitcoin Core developers +# Copyright (c) 2017-2019 The Raven Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" +Test tx status in case of reorgs while wallet being shutdown. +Wallet txn status rely on block connection/disconnection for its +accuracy. In case of reorgs happening while wallet being shutdown +block updates are not going to be received. At wallet loading, we +check against chain if confirmed txn are still in chain and change +their status if block in which they have been included has been +disconnected. +""" + +from decimal import Decimal +import os +import shutil +from test_framework.test_framework import RavenTestFramework +from test_framework.util import assert_equal, connect_nodes, disconnect_nodes, sync_blocks + +class ReorgsRestoreTest(RavenTestFramework): + def set_test_params(self): + self.num_nodes = 3 + + def run_test(self): + # Send a tx from which to conflict outputs later + txid_conflict_from = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) + self.nodes[0].generate(1) + sync_blocks(self.nodes) + + # Disconnect node1 from others to reorg its chain later + disconnect_nodes(self.nodes[0], 1) + disconnect_nodes(self.nodes[1], 2) + connect_nodes(self.nodes[0], 2) + + # Send a tx to be unconfirmed later + txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) + tx = self.nodes[0].gettransaction(txid) + self.nodes[0].generate(4) + tx_before_reorg = self.nodes[0].gettransaction(txid) + assert_equal(tx_before_reorg["confirmations"], 4) + + # Disconnect node0 from node2 to broadcast a conflict on their respective chains + disconnect_nodes(self.nodes[0], 2) + n_a = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10")) + inputs = [{"txid": txid_conflict_from, "vout": n_a}] + outputs_1 = dict() + outputs_2 = dict() + + # Create a conflicted tx broadcast on node0 chain and conflicting tx broadcast on node1 chain. Both spend from txid_conflict_from + outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.98") + outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.98") + conflicted = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs_1)) + conflicting = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs_2)) + + conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"]) + self.nodes[0].generate(1) + conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"]) + self.nodes[2].generate(9) + + # Reconnect node0 and node2 and check that conflicted_txid is effectively conflicted + connect_nodes(self.nodes[0], 2) + sync_blocks([self.nodes[0], self.nodes[2]]) + conflicted = self.nodes[0].gettransaction(conflicted_txid) + conflicting = self.nodes[0].gettransaction(conflicting_txid) + assert_equal(conflicted["confirmations"], -9) + assert_equal(conflicted["walletconflicts"][0], conflicting["txid"]) + + # Node0 wallet is shutdown + self.stop_node(0) + self.start_node(0) + + # The block chain re-orgs and the tx is included in a different block + self.nodes[1].generate(9) + self.nodes[1].sendrawtransaction(tx["hex"]) + self.nodes[1].generate(1) + self.nodes[1].sendrawtransaction(conflicted["hex"]) + self.nodes[1].generate(1) + + # Node0 wallet file is loaded on longest sync'ed node1 + self.stop_node(1) + self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak')) + shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallet.dat')) + self.start_node(1) + tx_after_reorg = self.nodes[1].gettransaction(txid) + # Check that normal confirmed tx is confirmed again but with different blockhash + assert_equal(tx_after_reorg["confirmations"], 2) + assert(tx_before_reorg["blockhash"] != tx_after_reorg["blockhash"]) + conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid) + # Check that conflicted tx is confirmed again with blockhash different than previously conflicting tx + assert_equal(conflicted_after_reorg["confirmations"], 1) + assert(conflicting["blockhash"] != conflicted_after_reorg["blockhash"]) + +if __name__ == '__main__': + ReorgsRestoreTest().main() diff --git a/test/functional/wallet_resendtransactions.py b/test/functional/wallet_resendtransactions.py index 849e0854c4..e5ba78688b 100755 --- a/test/functional/wallet_resendtransactions.py +++ b/test/functional/wallet_resendtransactions.py @@ -3,10 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test resendwallettransactions RPC.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error) +from test_framework.util import assert_equal, assert_raises_rpc_error class ResendWalletTransactionsTest(RavenTestFramework): def set_test_params(self): diff --git a/test/functional/wallet_txn_clone.py b/test/functional/wallet_txn_clone.py index 8b2d6c138b..cb00db3cd9 100755 --- a/test/functional/wallet_txn_clone.py +++ b/test/functional/wallet_txn_clone.py @@ -3,10 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (disconnect_nodes, assert_equal, sync_blocks, connect_nodes) +from test_framework.util import disconnect_nodes, assert_equal, sync_blocks, connect_nodes class TxnMallTest(RavenTestFramework): def set_test_params(self): @@ -78,7 +79,7 @@ def run_test(self): assert_equal(tx1_clone["complete"], True) # Have node0 mine a block, if requested: - if (self.options.mine_block): + if self.options.mine_block: self.nodes[0].generate(1) sync_blocks(self.nodes[0:2]) @@ -132,7 +133,7 @@ def run_test(self): # Check node0's total balance; should be same as before the clone, + 100 RVN for 2 matured, # less possible orphaned matured subsidy expected += 10000 - if (self.options.mine_block): + if self.options.mine_block: expected -= 5000 assert_equal(self.nodes[0].getbalance(), expected) assert_equal(self.nodes[0].getbalance("*", 0), expected) diff --git a/test/functional/wallet_txn_doublespend.py b/test/functional/wallet_txn_doublespend.py index e55045538d..3020df720e 100755 --- a/test/functional/wallet_txn_doublespend.py +++ b/test/functional/wallet_txn_doublespend.py @@ -3,10 +3,11 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. + """Test the wallet accounts properly when there is a double-spend conflict.""" from test_framework.test_framework import RavenTestFramework -from test_framework.util import (disconnect_nodes, assert_equal, Decimal, sync_blocks, find_output, connect_nodes) +from test_framework.util import disconnect_nodes, assert_equal, Decimal, sync_blocks, find_output, connect_nodes class TxnMallTest(RavenTestFramework): def set_test_params(self): @@ -48,17 +49,11 @@ def run_test(self): # First: use raw transaction API to send 1240 RVN to node1_address, # but don't broadcast: doublespend_fee = Decimal('-.02') - rawtx_input_0 = {} - rawtx_input_0["txid"] = fund_foo_txid - rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 121900) - rawtx_input_1 = {} - rawtx_input_1["txid"] = fund_bar_txid - rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 2900) + rawtx_input_0 = {"txid": fund_foo_txid, "vout": find_output(self.nodes[0], fund_foo_txid, 121900)} + rawtx_input_1 = {"txid": fund_bar_txid, "vout": find_output(self.nodes[0], fund_bar_txid, 2900)} inputs = [rawtx_input_0, rawtx_input_1] change_address = self.nodes[0].getnewaddress() - outputs = {} - outputs[node1_address] = 124000 - outputs[change_address] = 124800 - 124000 + doublespend_fee + outputs = {node1_address: 124000, change_address: 124800 - 124000 + doublespend_fee} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) doublespend = self.nodes[0].signrawtransaction(rawtx) assert_equal(doublespend["complete"], True) @@ -68,7 +63,7 @@ def run_test(self): txid2 = self.nodes[0].sendfrom("bar", node1_address, 2000, 0) # Have node0 mine a block: - if (self.options.mine_block): + if self.options.mine_block: self.nodes[0].generate(1) sync_blocks(self.nodes[0:2]) diff --git a/test/functional/wallet_zapwallettxes.py b/test/functional/wallet_zapwallettxes.py index 1ea61b270c..3be55c1df5 100755 --- a/test/functional/wallet_zapwallettxes.py +++ b/test/functional/wallet_zapwallettxes.py @@ -3,7 +3,9 @@ # Copyright (c) 2017-2019 The Raven Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test the zapwallettxes functionality. + +""" +Test the zapwallettxes functionality. - start two ravend nodes - create two transactions on node 0 - one is confirmed and one is unconfirmed. @@ -11,12 +13,13 @@ transactions are still available. - restart node 0 with zapwallettxes and persistmempool, and verify that both the confirmed and the unconfirmed transactions are still available. -- restart node 0 with just zapwallettxed and verify that the confirmed +- restart node 0 with just zapwallettxes and verify that the confirmed transactions are still available, but that the unconfirmed transaction has been zapped. """ + from test_framework.test_framework import RavenTestFramework -from test_framework.util import (assert_equal, assert_raises_rpc_error, wait_until) +from test_framework.util import assert_equal, assert_raises_rpc_error, wait_until class ZapWalletTXesTest (RavenTestFramework): def set_test_params(self):