diff --git a/plenum/test/batching_3pc/catch-up/test_3pc_paused_during_catch_up.py b/plenum/test/batching_3pc/catch-up/test_3pc_paused_during_catch_up.py index 3658677801..e8bcf2b782 100644 --- a/plenum/test/batching_3pc/catch-up/test_3pc_paused_during_catch_up.py +++ b/plenum/test/batching_3pc/catch-up/test_3pc_paused_during_catch_up.py @@ -3,6 +3,7 @@ from plenum.test.test_node import getNonPrimaryReplicas from plenum.test.helper import sdk_send_random_requests + def test_sdk_no_ordering_during_syncup(tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): non_primary_replica = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0] diff --git a/plenum/test/batching_3pc/catch-up/test_state_reverted_before_catchup.py b/plenum/test/batching_3pc/catch-up/test_state_reverted_before_catchup.py index 7a490ab93b..a25370a1bc 100644 --- a/plenum/test/batching_3pc/catch-up/test_state_reverted_before_catchup.py +++ b/plenum/test/batching_3pc/catch-up/test_state_reverted_before_catchup.py @@ -2,7 +2,7 @@ from plenum.test.delayers import cDelay from plenum.test.test_node import getNonPrimaryReplicas from plenum.test.batching_3pc.helper import checkNodesHaveSameRoots -from plenum.test.helper import sdk_signed_random_requests, sdk_send_and_check,\ +from plenum.test.helper import sdk_signed_random_requests, sdk_send_and_check, \ sdk_send_random_requests, sdk_get_replies @@ -46,7 +46,6 @@ def test_unordered_state_reverted_before_catchup( reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize) sdk_get_replies(looper, reqs, timeout=40) - committed_ledger_during_3pc = non_primary_node.getLedger( ledger_id).tree.root_hash uncommitted_ledger_during_3pc = non_primary_node.getLedger( diff --git a/plenum/test/batching_3pc/conftest.py b/plenum/test/batching_3pc/conftest.py index fdafc6bcb8..4f35a7364a 100644 --- a/plenum/test/batching_3pc/conftest.py +++ b/plenum/test/batching_3pc/conftest.py @@ -1,8 +1,6 @@ import pytest from plenum.test.conftest import getValueFromModule -from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ - client1, wallet1, client1Connected @pytest.fixture(scope="module") @@ -15,9 +13,3 @@ def reset(): request.addfinalizer(reset) return tconf - - -@pytest.fixture(scope="module") -def client(tconf, looper, txnPoolNodeSet, client1, - client1Connected): - return client1Connected diff --git a/plenum/test/batching_3pc/test_basic_batching.py b/plenum/test/batching_3pc/test_basic_batching.py index 1006cc3648..4471e71e7d 100644 --- a/plenum/test/batching_3pc/test_basic_batching.py +++ b/plenum/test/batching_3pc/test_basic_batching.py @@ -22,7 +22,7 @@ def testRequestStaticValidation(tconf, looper, txnPoolNodeSet, node.doStaticValidation(req) -def test3PCOverBatchWithThresholdReqs(tconf, looper, txnPoolNodeSet, client, +def test3PCOverBatchWithThresholdReqs(tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ Check that 3 phase commit happens when threshold number of requests are diff --git a/plenum/test/batching_3pc/test_batching_scenarios.py b/plenum/test/batching_3pc/test_batching_scenarios.py index 513f8211dd..3a7c11482d 100644 --- a/plenum/test/batching_3pc/test_batching_scenarios.py +++ b/plenum/test/batching_3pc/test_batching_scenarios.py @@ -22,7 +22,7 @@ def testPrePrepareProcessedInOrder(perf_chk_patched, looper, txnPoolNodeSet, """ tconf = perf_chk_patched pr, otherR = getPrimaryReplica(txnPoolNodeSet, instId=0), \ - getNonPrimaryReplicas(txnPoolNodeSet, instId=0) + getNonPrimaryReplicas(txnPoolNodeSet, instId=0) otherNodes = [r.node for r in otherR] ppsToDelay = 2 delayeds = 0 diff --git a/plenum/test/blacklist/test_blacklist_client.py b/plenum/test/blacklist/test_blacklist_client.py index f98ab7a18d..b35d3ce784 100644 --- a/plenum/test/blacklist/test_blacklist_client.py +++ b/plenum/test/blacklist/test_blacklist_client.py @@ -12,15 +12,15 @@ def setup(client1): # noinspection PyIncorrectDocstring,PyUnusedLocal,PyShadowingNames -def testDoNotBlacklistClient(setup, looper, nodeSet, up, client1, sent1): +def testDoNotBlacklistClient(setup, looper, txnPoolNodeSet, client1, sent1): """ Client should be not be blacklisted by node on sending an unsigned request """ # No node should blacklist the client def chk(): - for node in nodeSet: + for node in txnPoolNodeSet: assert not node.isClientBlacklisted(client1.name) - timeout = waits.expectedClientToPoolConnectionTimeout(len(nodeSet)) + timeout = waits.expectedClientToPoolConnectionTimeout(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout)) diff --git a/plenum/test/blacklist/test_blacklist_node_on_multiple_nominations.py b/plenum/test/blacklist/test_blacklist_node_on_multiple_nominations.py index e442282cf5..575885285e 100644 --- a/plenum/test/blacklist/test_blacklist_node_on_multiple_nominations.py +++ b/plenum/test/blacklist/test_blacklist_node_on_multiple_nominations.py @@ -10,12 +10,11 @@ # noinspection PyIncorrectDocstring,PyUnusedLocal,PyShadowingNames @pytest.mark.skip(reason="SOV-540. Implementation changed.") -def testBlacklistNodeOnMultipleNominations(looper, keySharedNodes, ready): +def testBlacklistNodeOnMultipleNominations(looper, txnPoolNodeSet, ready): """ A node that sends multiple nominations must be blacklisted by other nodes """ - nodeSet = keySharedNodes - A, B, C, D = nodeSet.nodes.values() + A, B, C, D = txnPoolNodeSet # B sends more than 2 nominations for i in range(3): @@ -26,5 +25,5 @@ def chk(): for node in A, C, D: assert node.isNodeBlacklisted(B.name) - timeout = waits.expectedPoolNominationTimeout(len(nodeSet.nodes)) + timeout = waits.expectedPoolNominationTimeout(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout)) diff --git a/plenum/test/blacklist/test_blacklist_node_on_multiple_primary_declarations.py b/plenum/test/blacklist/test_blacklist_node_on_multiple_primary_declarations.py index db2ae44244..1aabcb1887 100644 --- a/plenum/test/blacklist/test_blacklist_node_on_multiple_primary_declarations.py +++ b/plenum/test/blacklist/test_blacklist_node_on_multiple_primary_declarations.py @@ -12,14 +12,12 @@ # noinspection PyIncorrectDocstring @pytest.mark.skip(reason="SOV-541. Implementation changed.") def testBlacklistNodeOnMultiplePrimaryDeclarations(looper, - keySharedNodes, - ready): + txnPoolNodeSet): """ A node that sends multiple primary declarations must be blacklisted by other nodes """ - nodeSet = keySharedNodes - A, B, C, D = nodeSet.nodes.values() + A, B, C, D = txnPoolNodeSet # B sends more than 2 primary declarations for i in range(3): @@ -30,5 +28,5 @@ def chk(): for node in A, C, D: assert node.isNodeBlacklisted(B.name) - timeout = waits.expectedPoolNominationTimeout(len(nodeSet.nodes)) + timeout = waits.expectedPoolNominationTimeout(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout)) diff --git a/plenum/test/bls/conftest.py b/plenum/test/bls/conftest.py index a5d496e4d7..5c96c4ac2c 100644 --- a/plenum/test/bls/conftest.py +++ b/plenum/test/bls/conftest.py @@ -4,7 +4,6 @@ from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.common.util import get_utc_epoch from plenum.test.bls.helper import generate_state_root -from plenum.test.pool_transactions.conftest import looper participants = ["Node1", "Node2", "Node3"] signature = "somefakesignaturesomefakesignaturesomefakesignature" diff --git a/plenum/test/bls/helper.py b/plenum/test/bls/helper.py index a50a13a3d5..b7f20a9dbf 100644 --- a/plenum/test/bls/helper.py +++ b/plenum/test/bls/helper.py @@ -65,6 +65,45 @@ def sdk_check_bls_multi_sig_after_send(looper, txnPoolNodeSet, assert multi_sigs.count(multi_sigs[0]) == len(multi_sigs) +def sdk_check_bls_multi_sig_after_send(looper, txnPoolNodeSet, + sdk_pool_handle, sdk_wallet_handle, + saved_multi_sigs_count): + # at least two because first request could have no + # signature since state can be clear + number_of_requests = 3 + + # 1. send requests + # Using loop to avoid 3pc batching + state_roots = [] + for i in range(number_of_requests): + sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, + sdk_wallet_handle, 1) + waitNodeDataEquality(looper, txnPoolNodeSet[0], *txnPoolNodeSet[:-1]) + state_roots.append( + state_roots_serializer.serialize( + bytes(txnPoolNodeSet[0].getState(DOMAIN_LEDGER_ID).committedHeadHash))) + + # 2. get all saved multi-sigs + multi_sigs_for_batch = [] + for state_root in state_roots: + multi_sigs = [] + for node in txnPoolNodeSet: + multi_sig = node.bls_bft.bls_store.get(state_root) + if multi_sig: + multi_sigs.append(multi_sig) + multi_sigs_for_batch.append(multi_sigs) + + # 3. check how many multi-sigs are saved + for multi_sigs in multi_sigs_for_batch: + assert len(multi_sigs) == saved_multi_sigs_count, \ + "{} != {}".format(len(multi_sigs), saved_multi_sigs_count) + + # 3. check that bls multi-sig is the same for all nodes we get PrePrepare for (that is for all expect the last one) + for multi_sigs in multi_sigs_for_batch[:-1]: + if multi_sigs: + assert multi_sigs.count(multi_sigs[0]) == len(multi_sigs) + + def process_commits_for_key(key, pre_prepare, bls_bfts): for sender_bls_bft in bls_bfts: commit = create_commit_bls_sig( diff --git a/plenum/test/bls/test_make_proof.py b/plenum/test/bls/test_make_proof.py index 6cd9b30890..9961cd9724 100644 --- a/plenum/test/bls/test_make_proof.py +++ b/plenum/test/bls/test_make_proof.py @@ -1,5 +1,4 @@ from base58 import b58encode -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_random_and_check from plenum.common.types import f from plenum.common.constants import ROOT_HASH diff --git a/plenum/test/checkpoints/conftest.py b/plenum/test/checkpoints/conftest.py index 7ff4584dd8..f0545bc5a2 100644 --- a/plenum/test/checkpoints/conftest.py +++ b/plenum/test/checkpoints/conftest.py @@ -1,9 +1,6 @@ import pytest from plenum.test.conftest import getValueFromModule -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper, nodeThetaAdded, \ - stewardAndWallet1, steward1, stewardWallet from plenum.test.batching_3pc.conftest import tconf diff --git a/plenum/test/checkpoints/helper.py b/plenum/test/checkpoints/helper.py index 771949b501..3acf349aad 100644 --- a/plenum/test/checkpoints/helper.py +++ b/plenum/test/checkpoints/helper.py @@ -1,10 +1,10 @@ from plenum.test.helper import assertEquality -def chkChkpoints(nodes, total: int, stableIndex: int=None): +def chkChkpoints(nodes, total: int, stableIndex: int = None): for node in nodes: for r in node.replicas: - assert len(r.checkpoints) == total, '{} checkpoints {}, whereas total {}'.\ + assert len(r.checkpoints) == total, '{} checkpoints {}, whereas total {}'. \ format(r, len(r.checkpoints), total) if stableIndex is not None: assert r.checkpoints.values()[stableIndex].isStable, r.name diff --git a/plenum/test/checkpoints/test_checkpoint_bounds_after_catchup.py b/plenum/test/checkpoints/test_checkpoint_bounds_after_catchup.py index ec4a5237c6..87c4bde305 100644 --- a/plenum/test/checkpoints/test_checkpoint_bounds_after_catchup.py +++ b/plenum/test/checkpoints/test_checkpoint_bounds_after_catchup.py @@ -8,9 +8,8 @@ def test_upper_bound_of_checkpoint_after_catchup_is_divisible_by_chk_freq( chkFreqPatched, looper, txnPoolNodeSet, steward1, stewardWallet, - client1, wallet1, client1Connected, tdir, client_tdir, tconf, + client1, wallet1, tdir, client_tdir, tconf, allPluginsPath): - sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 4) _, _, epsilon = addNewStewardAndNode(looper, steward1, stewardWallet, diff --git a/plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py b/plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py index 5209eb52b5..ce154503bd 100644 --- a/plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py +++ b/plenum/test/checkpoints/test_checkpoint_stabilization_after_catchup.py @@ -10,9 +10,8 @@ def test_second_checkpoint_after_catchup_can_be_stabilized( chkFreqPatched, looper, txnPoolNodeSet, steward1, stewardWallet, - client1, wallet1, client1Connected, tdir, client_tdir, tconf, + client1, wallet1, tdir, client_tdir, tconf, allPluginsPath): - _, _, epsilon = addNewStewardAndNode(looper, steward1, stewardWallet, 'EpsilonSteward', 'Epsilon', tdir, client_tdir, tconf, diff --git a/plenum/test/checkpoints/test_message_outside_watermark.py b/plenum/test/checkpoints/test_message_outside_watermark.py index 8011b38964..f1e7201409 100644 --- a/plenum/test/checkpoints/test_message_outside_watermark.py +++ b/plenum/test/checkpoints/test_message_outside_watermark.py @@ -9,6 +9,7 @@ CHK_FREQ = 5 LOG_SIZE = 3 * CHK_FREQ + def test_non_primary_recvs_3phase_message_outside_watermarks(chkFreqPatched, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_logsize): """ diff --git a/plenum/test/checkpoints/test_message_outside_watermark1.py b/plenum/test/checkpoints/test_message_outside_watermark1.py index a0b871ec7b..75db9acd0b 100644 --- a/plenum/test/checkpoints/test_message_outside_watermark1.py +++ b/plenum/test/checkpoints/test_message_outside_watermark1.py @@ -9,7 +9,6 @@ from plenum.test.view_change.conftest import perf_chk_patched from plenum.test.helper import sdk_send_random_and_check - TestRunningTimeLimitSec = 300 PerfCheckFreq = 30 diff --git a/plenum/test/checkpoints/test_ordering_after_catchup.py b/plenum/test/checkpoints/test_ordering_after_catchup.py index 21f0b62a85..09ad59d2dc 100644 --- a/plenum/test/checkpoints/test_ordering_after_catchup.py +++ b/plenum/test/checkpoints/test_ordering_after_catchup.py @@ -14,7 +14,6 @@ def add_new_node(looper, pool_nodes, steward, steward_wallet, tdir, client_tdir, tconf, all_plugins_path): - name = randomString(6) node_name = "Node-" + name new_steward_name = "Steward-" + name diff --git a/plenum/test/checkpoints/test_view_change_after_checkpoint.py b/plenum/test/checkpoints/test_view_change_after_checkpoint.py index 1b76cdb9e2..d0d57e8936 100644 --- a/plenum/test/checkpoints/test_view_change_after_checkpoint.py +++ b/plenum/test/checkpoints/test_view_change_after_checkpoint.py @@ -7,7 +7,6 @@ from stp_core.loop.eventually import eventually from plenum.test.helper import sdk_send_batches_of_random_and_check - CHK_FREQ = 5 diff --git a/plenum/test/cli/conftest.py b/plenum/test/cli/conftest.py index af0ad4303a..525351bf33 100644 --- a/plenum/test/cli/conftest.py +++ b/plenum/test/cli/conftest.py @@ -63,6 +63,7 @@ def createAllNodes(request, cli): def stopNodes(): for node in cli.nodes.values(): node.stop() + request.addfinalizer(stopNodes) @@ -85,8 +86,10 @@ def be(ctx): Fixture that is a 'be' function that closes over the test context. 'be' allows to change the current cli in the context. """ + def _(cli): ctx['current_cli'] = cli + return _ diff --git a/plenum/test/cli/helper.py b/plenum/test/cli/helper.py index 7c4da66266..c5b7140b77 100644 --- a/plenum/test/cli/helper.py +++ b/plenum/test/cli/helper.py @@ -7,7 +7,6 @@ import time - import plenum.cli.cli as cli from plenum.client.wallet import Wallet from plenum.common.constants import PRIMARY_SELECTION_PREFIX, CURRENT_PROTOCOL_VERSION @@ -97,7 +96,7 @@ def lastCmdOutput(self): self.printeds[: (len(self.printeds) - self.lastPrintIndex)])] printedTokens = [token[1] for tokens in reversed(self.printedTokens[:( - len(self.printedTokens) - self.lastPrintedTokenIndex)]) + len(self.printedTokens) - self.lastPrintedTokenIndex)]) for token in tokens.get('tokens', []) if len(token) > 1] pt = ''.join(printedTokens) return '\n'.join(printeds + [pt]).strip() @@ -169,12 +168,12 @@ def chk(): print("checking for {}".format(nodeName)) print(msgs) assert "{} added replica {}:0 to instance 0 (master)" \ - .format(nodeName, nodeName) in msgs + .format(nodeName, nodeName) in msgs assert "{} added replica {}:1 to instance 1 (backup)" \ - .format(nodeName, nodeName) in msgs + .format(nodeName, nodeName) in msgs assert "{}{} listening for other nodes at {}:{}" \ - .format(CONNECTION_PREFIX, nodeName, - *cli.nodes[nodeName].nodestack.ha) \ + .format(CONNECTION_PREFIX, nodeName, + *cli.nodes[nodeName].nodestack.ha) \ in msgs startUpTimeout = waits.expectedNodeStartUpTimeout() @@ -192,7 +191,7 @@ def checkAllNodesUp(cli): msgs = {stmt['msg'] for stmt in cli.printeds} expected = PRIMARY_SELECTION_PREFIX + \ - "{nm}:{inst} selected primary {pri}" " for instance {inst} (view 0)" + "{nm}:{inst} selected primary {pri}" " for instance {inst} (view 0)" assert len(cli.nodes) > 0 for nm, node in cli.nodes.items(): assert node @@ -304,12 +303,12 @@ def newCLI(looper, basedir, ledger_base_dir, nodeClass=TestNode, clientClass=TestClient, config=None, - partition: str=None, + partition: str = None, unique_name=None, logFileName=None, name=None, agentCreator=None, - nodes_chroot: str=None): + nodes_chroot: str = None): if partition: recorder = Recorder(partition) else: @@ -348,7 +347,7 @@ def checkCmdValid(cli, cmd): assert 'Invalid command' not in cli.lastCmdOutput -def newKeyPair(cli: TestCli, alias: str=None): +def newKeyPair(cli: TestCli, alias: str = None): cmd = "new key {}".format(alias) if alias else "new key" idrs = set() if cli.activeWallet: @@ -471,7 +470,7 @@ def checkBalance(balance, data): def waitForReply(cli, nodeCount, replyChecker, customTimeout=None): timeout = customTimeout or \ - waits.expectedTransactionExecutionTime(nodeCount) + waits.expectedTransactionExecutionTime(nodeCount) cli.looper.run(eventually(checkReply, cli, nodeCount, replyChecker, timeout=timeout)) @@ -500,8 +499,8 @@ def assertCliTokens(matchedVars, tokens): if expectedValue is not None: assert matchedValue is not None, \ - "Key '{}' not found in machedVars (matchedValue={})".\ - format(key, matchedValue) + "Key '{}' not found in machedVars (matchedValue={})". \ + format(key, matchedValue) expectedValueLen = len(expectedValue) if expectedValue else 0 matchedValueLen = len(matchedValue) if matchedValue else 0 @@ -509,9 +508,9 @@ def assertCliTokens(matchedVars, tokens): assert matchedValue == expectedValue, \ "Value not matched for key '{}', " \ "\nexpectedValue (length: {}): {}, " \ - "\nactualValue (length: {}): {}".\ - format(key, expectedValueLen, expectedValue, - matchedValueLen, matchedValue) + "\nactualValue (length: {}): {}". \ + format(key, expectedValueLen, expectedValue, + matchedValueLen, matchedValue) def doByCtx(ctx): @@ -594,12 +593,15 @@ def chk(obj, parity=True): raise AttributeError("only str, callable, or " "collections of str and callable " "are allowed") + chk(expect) chk(not_expect, False) + if within: cli.looper.run(eventually(check, timeout=within)) else: check() + return _ @@ -613,12 +615,11 @@ def checkPermissions(path, mode): def checkWalletRestored(cli, expectedWalletKeyName, expectedIdentifiers): - cli.lastCmdOutput == "Saved wallet {} restored".format( expectedWalletKeyName) assert cli._activeWallet.name == expectedWalletKeyName assert len(cli._activeWallet.identifiers) == \ - expectedIdentifiers + expectedIdentifiers def getOldIdentifiersForActiveWallet(cli): @@ -646,7 +647,7 @@ def createAndAssertNewKeyringCreation(do, name, expectedMsgs=None): def useAndAssertKeyring(do, name, expectedName=None, expectedMsgs=None): keyringName = expectedName or name finalExpectedMsgs = expectedMsgs or \ - ['Active wallet set to "{}"'.format(keyringName)] + ['Active wallet set to "{}"'.format(keyringName)] do('use wallet {}'.format(name), expect=finalExpectedMsgs ) @@ -655,7 +656,7 @@ def useAndAssertKeyring(do, name, expectedName=None, expectedMsgs=None): def saveAndAssertKeyring(do, name, expectedName=None, expectedMsgs=None): keyringName = expectedName or name finalExpectedMsgs = expectedMsgs or \ - ['Active wallet "{}" saved'.format(keyringName)] + ['Active wallet "{}" saved'.format(keyringName)] do('save wallet'.format(name), expect=finalExpectedMsgs ) diff --git a/plenum/test/cli/test_basic_node_commands.py b/plenum/test/cli/test_basic_node_commands.py index 24322227e4..eb03f49f75 100644 --- a/plenum/test/cli/test_basic_node_commands.py +++ b/plenum/test/cli/test_basic_node_commands.py @@ -23,13 +23,13 @@ def testNodeNames(be, do, cli, validNodeNames): # Create a node with a name of an already created node be(cli) do("new node {}".format(lastNodeName), expect=[ - "Node {} already exists.".format(lastNodeName)]) + "Node {} already exists.".format(lastNodeName)]) assert len(cli.nodes) == 4 # Create a node with invalid name randName = randomString(10) do("new node {}".format(randName), expect=[ - "Invalid node name '{}'. ".format(randName)]) + "Invalid node name '{}'. ".format(randName)]) args = cli.printedTokens[-1] token, _ = args['tokens'][0] # An error token should be printed @@ -44,15 +44,15 @@ def testCreateNodeWhenClientExistsWithoutKey(be, do, cli, validNodeNames): clientName = "testc1" be(cli) do("new client {}".format(clientName), expect=[ - "Active client set to {}".format(clientName)]) + "Active client set to {}".format(clientName)]) do("new node {}".format(validNodeNames[0]), expect=[ - "No key present in wallet"], within=2) + "No key present in wallet"], within=2) def testCreateNodeWhenClientExistsWithKey(be, do, cli, validNodeNames): clientName = "testc2" be(cli) do("new client {}".format(clientName), expect=[ - "Active client set to {}".format(clientName)]) + "Active client set to {}".format(clientName)]) do("new key", expect=["Current DID set to "]) addNodes(be, do, cli, validNodeNames) diff --git a/plenum/test/cli/test_cli_startup.py b/plenum/test/cli/test_cli_startup.py index 0c9595e106..ae7e9722cc 100644 --- a/plenum/test/cli/test_cli_startup.py +++ b/plenum/test/cli/test_cli_startup.py @@ -11,8 +11,8 @@ def assertPrintsDefaultClientAndIdentifier(cli): assert cli.printeds[1]['msg'] == "Current wallet set to {walletName}". \ format(walletName=dc.name) assert cli.printeds[0]['msg'] == \ - "Current identifier set to {alias} ({cryptonym})". \ - format(alias=dc.name, cryptonym=verstr) + "Current identifier set to {alias} ({cryptonym})". \ + format(alias=dc.name, cryptonym=verstr) def printedMessages(cli): diff --git a/plenum/test/cli/test_keyring.py b/plenum/test/cli/test_keyring.py index d709d3069e..467886976a 100644 --- a/plenum/test/cli/test_keyring.py +++ b/plenum/test/cli/test_keyring.py @@ -4,7 +4,7 @@ def createNewKeyring(name, cli): assert 'Active wallet set to "{}"'.format(name) in cli.lastCmdOutput assert 'New wallet {} created'.format(name) in cli.lastCmdOutput assert not oldKeyring or ( - oldKeyring and oldKeyring.name != cli._activeWallet.name) + oldKeyring and oldKeyring.name != cli._activeWallet.name) assert cli.activeWallet.name == name assert len(cli._activeWallet.identifiers) == 0 diff --git a/plenum/test/cli/test_prompt.py b/plenum/test/cli/test_prompt.py index 969fe7bbcb..dd4622e21b 100644 --- a/plenum/test/cli/test_prompt.py +++ b/plenum/test/cli/test_prompt.py @@ -1,5 +1,3 @@ - - def checkPrompt(cli, checkWith: str): promptTokens = cli.cli.application.layout.children[1].children[ 0].content.content.get_tokens("") diff --git a/plenum/test/client/test_client.py b/plenum/test/client/test_client.py index ba319f948d..fa293d6e97 100644 --- a/plenum/test/client/test_client.py +++ b/plenum/test/client/test_client.py @@ -12,7 +12,7 @@ from plenum.test import waits from plenum.test.helper import checkResponseCorrectnessFromNodes, \ randomOperation, checkLastClientReqForNode, getRepliesFromClientInbox, \ - sendRandomRequest, waitForSufficientRepliesForRequests, assertLength, \ + sendRandomRequest, waitForSufficientRepliesForRequests, assertLength, \ sendReqsToNodesAndVerifySuffReplies from plenum.test.test_client import genTestClient @@ -29,7 +29,6 @@ 'verification key from disk', 'got error while verifying message'] # warnings - logger = getlogger() @@ -60,6 +59,7 @@ def testClientShouldNotBeAbleToConnectToNodesNodeStack(pool): """ Client should not be able to connect to nodes in the node's nodestack """ + async def go(ctx): nodestacksVersion = {k: v.ha for k, v in ctx.nodeset.nodeReg.items()} client1, _ = genTestClient( @@ -195,7 +195,7 @@ def testReplyWhenRepliesFromExactlyFPlusOneNodesAreSame(looper, # noinspection PyIncorrectDocstring -def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1): +def testReplyWhenRequestAlreadyExecuted(looper, txnPoolNodeSet, client1, sent1): """ When a request has already been executed the previously executed reply will be sent again to the client. An acknowledgement will not be sent @@ -211,7 +211,7 @@ def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1): for part in message_parts: client1.nodestack._enqueueIntoAllRemotes(part, None) - + def chk(): assertLength([response for response in client1.inBox if (response[0].get(f.RESULT.nm) and @@ -222,84 +222,3 @@ def chk(): responseTimeout = waits.expectedTransactionExecutionTime(nodeCount) looper.run(eventually(chk, retryWait=1, timeout=responseTimeout)) - - -# noinspection PyIncorrectDocstring -def testReplyMatchesRequest(looper, nodeSet, client_tdir, up): - ''' - This tests does check following things: - - wallet works correctly when used by multiple clients - - clients do receive responses for exactly the same request they sent - ''' - - def makeClient(id): - client, wallet = genTestClient(nodeSet, - tmpdir=client_tdir, - name="client-{}".format(id)) - looper.add(client) - looper.run(client.ensureConnectedToNodes()) - return client, wallet - - # creating clients - numOfClients = 3 - numOfRequests = 1 - - clients = set() - sharedWallet = None - for i in range(numOfClients): - client, wallet = makeClient(i) - if sharedWallet is None: - sharedWallet = wallet - clients.add(client) - - for i in range(1, numOfRequests + 1): - # sending requests - requests = {} - for client in clients: - op = randomOperation() - req = sharedWallet.signOp(op) - - request = client.submitReqs(req)[0][0] - requests[client] = (request.reqId, request.operation['amount']) - - # checking results - responseTimeout = waits.expectedTransactionExecutionTime(nodeCount) - for client, (reqId, sentAmount) in requests.items(): - looper.run(eventually(checkResponseRecvdFromNodes, - client, - nodeCount, - reqId, - retryWait=1, - timeout=responseTimeout)) - - print("Expected amount for request {} is {}". - format(reqId, sentAmount)) - - # This looks like it fails on some python versions - # replies = [r[0]['result']['amount'] - # for r in client.inBox - # if r[0]['op'] == 'REPLY' - # and r[0]['result']['reqId'] == reqId] - - replies = [] - for r in client.inBox: - if r[0]['op'] == 'REPLY' and r[0]['result']['reqId'] == reqId: - if 'amount' not in r[0]['result']: - logger.debug('{} cannot find amount in {}'. - format(client, r[0]['result'])) - replies.append(r[0]['result']['amount']) - - assert all(replies[0] == r for r in replies) - assert replies[0] == sentAmount - - -def testReplyReceivedOnlyByClientWhoSentRequest(looper, nodeSet, client_tdir, - client1, wallet1): - newClient, _ = genTestClient(nodeSet, tmpdir=client_tdir) - looper.add(newClient) - looper.run(newClient.ensureConnectedToNodes()) - client1InboxSize = len(client1.inBox) - newClientInboxSize = len(newClient.inBox) - sendReqsToNodesAndVerifySuffReplies(looper, wallet1, newClient, 1) - assert len(client1.inBox) == client1InboxSize - assert len(newClient.inBox) > newClientInboxSize diff --git a/plenum/test/client/test_client_authn.py b/plenum/test/client/test_client_authn.py index ea94178abd..1bb6bc882f 100644 --- a/plenum/test/client/test_client_authn.py +++ b/plenum/test/client/test_client_authn.py @@ -5,7 +5,6 @@ from plenum.common.signer_simple import SimpleSigner from plenum.server.client_authn import SimpleAuthNr - idr = '5G72199XZB7wREviUbQma7' msg_str = "42 (forty-two) is the natural number that succeeds 41 and precedes 43." diff --git a/plenum/test/client/test_client_can_send.py b/plenum/test/client/test_client_can_send.py index 68a8404174..e7f2bdb6ff 100644 --- a/plenum/test/client/test_client_can_send.py +++ b/plenum/test/client/test_client_can_send.py @@ -6,8 +6,6 @@ from plenum.test.helper import random_requests from plenum.test.pool_transactions.helper import buildPoolClientAndWallet from stp_core.loop.eventually import eventually -from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ - client1, wallet1, client1Connected, steward1, stewardWallet, stewardAndWallet1 def new_client(poolTxnClientData, tdirWithPoolTxns): diff --git a/plenum/test/client/test_client_observer.py b/plenum/test/client/test_client_observer.py index a5f2073284..08cab0549e 100644 --- a/plenum/test/client/test_client_observer.py +++ b/plenum/test/client/test_client_observer.py @@ -3,7 +3,7 @@ from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies -def test_observer_registration(looper, nodeSet, up, client1): +def test_observer_registration(looper, txnPoolNodeSet, client1): def callable1(*args, **kwargs): print(1) print(args) @@ -48,7 +48,7 @@ def callable2(*args, **kwargs): assert len(client1._observers) == 1 -def test_observer_execution(looper, nodeSet, up, client1, wallet1): +def test_observer_execution(looper, txnPoolNodeSet, client1, wallet1): resp1 = [] resp2 = [] diff --git a/plenum/test/client/test_client_request_nack.py b/plenum/test/client/test_client_request_nack.py index 53643bd92e..5e1b97edad 100644 --- a/plenum/test/client/test_client_request_nack.py +++ b/plenum/test/client/test_client_request_nack.py @@ -17,8 +17,8 @@ def verify(operation): @pytest.fixture(scope="module") -def restrictiveVerifier(nodeSet): - for n in nodeSet: +def restrictiveVerifier(txnPoolNodeSet): + for n in txnPoolNodeSet: n.opVerifiers = [TestVerifier()] @@ -35,13 +35,12 @@ def testRequestFullRoundTrip(restrictiveVerifier, client1, sent1, looper, - nodeSet): - + txnPoolNodeSet): update = {'reason': 'client request invalid: InvalidClientRequest() ' '[caused by amount too high\nassert 999 <= 100]'} coros2 = [partial(checkReqNack, client1, node, sent1.identifier, sent1.reqId, update) - for node in nodeSet] + for node in txnPoolNodeSet] timeout = waits.expectedReqAckQuorumTime() looper.run(eventuallyAll(*coros2, totalTimeout=timeout)) diff --git a/plenum/test/client/test_client_resends_not_confirmed_request.py b/plenum/test/client/test_client_resends_not_confirmed_request.py index 0a08874bb7..b5c09ac2f0 100644 --- a/plenum/test/client/test_client_resends_not_confirmed_request.py +++ b/plenum/test/client/test_client_resends_not_confirmed_request.py @@ -18,7 +18,7 @@ def test_client_resends_not_confirmed_request(looper, client1, wallet1, - nodeSet): + txnPoolNodeSet): """ Check that client resends request to all nodes if it was previously sent to one node but reply cannot be verified diff --git a/plenum/test/client/test_client_retry.py b/plenum/test/client/test_client_retry.py index f38fc85de6..128fb12900 100644 --- a/plenum/test/client/test_client_retry.py +++ b/plenum/test/client/test_client_retry.py @@ -13,14 +13,14 @@ whitelist = ['AlphaC unable to send message', ] -def testClientRetryRequestWhenAckNotReceived(looper, nodeSet, client1, wallet1): +def testClientRetryRequestWhenAckNotReceived(looper, txnPoolNodeSet, client1, wallet1): """ The client gets disconnected from node say Alpha but does not know it. It sends request to all nodes including Alpha, expects ACK and REPLY from Alpha too, does not get it, so reconnects to Alpha and sends request again and gets REPLY """ - alpha = nodeSet.Alpha + alpha = txnPoolNodeSet[0] skipped = False origPr = alpha.processRequest @@ -37,7 +37,7 @@ def skipReqOnce(msg, remoteName): req = sendRandomRequest(wallet1, client1) def chkAcks(): - for node in nodeSet: + for node in txnPoolNodeSet: if node != alpha: checkReqAck(client1, node, *req.key) else: @@ -50,14 +50,14 @@ def chkAcks(): wait_for_replies(looper, client1, idr, reqId, 4) -def testClientRetryRequestWhenReplyNotReceived(looper, nodeSet, client1, +def testClientRetryRequestWhenReplyNotReceived(looper, txnPoolNodeSet, client1, wallet1, tconf): """ A node say Alpha sends ACK but doesn't send REPLY. The client resends the request and gets REPLY """ - alpha = nodeSet.Alpha + alpha = txnPoolNodeSet[0] skipped = False origTrans = alpha.transmitToClient @@ -70,7 +70,7 @@ def skipReplyOnce(msg, remoteName): alpha.transmitToClient = skipReplyOnce req = sendRandomRequest(wallet1, client1) - coros = [partial(checkReqAck, client1, node, *req.key) for node in nodeSet] + coros = [partial(checkReqAck, client1, node, *req.key) for node in txnPoolNodeSet] timeout = waits.expectedReqAckQuorumTime() start = time.perf_counter() looper.run(eventuallyAll(*coros, retryWait=.5, totalTimeout=timeout)) @@ -86,14 +86,14 @@ def skipReplyOnce(msg, remoteName): wait_for_replies(looper, client1, idr, reqId, 4) -def testClientNotRetryRequestWhenReqnackReceived(looper, nodeSet, client1, wallet1): +def testClientNotRetryRequestWhenReqnackReceived(looper, txnPoolNodeSet, client1, wallet1): """ A node sends REQNACK. The client does not resend Request. """ - numOfNodes = len(nodeSet) + numOfNodes = len(txnPoolNodeSet) - alpha = nodeSet.Alpha + alpha = txnPoolNodeSet[0] origProcReq = alpha.processRequest origTrans = alpha.transmitToClient @@ -148,7 +148,7 @@ def reset(): def testClientNotRetryingRequestAfterMaxTriesDone(looper, - nodeSet, + txnPoolNodeSet, client1, wallet1, withFewerRetryReq): @@ -158,7 +158,7 @@ def testClientNotRetryingRequestAfterMaxTriesDone(looper, configuration and no more """ - alpha = nodeSet.Alpha + alpha = txnPoolNodeSet[0] origTrans = alpha.transmitToClient def dontTransmitReply(msg, remoteName): @@ -175,8 +175,8 @@ def dontTransmitReply(msg, remoteName): # +1 because we have to wait one more retry timeout to make sure what # client cleaned his buffers (expectingAcksFor, expectingRepliesFor) retryTime = withFewerRetryReq.CLIENT_REPLY_TIMEOUT * \ - (withFewerRetryReq.CLIENT_MAX_RETRY_REPLY + 1) - timeout = waits.expectedTransactionExecutionTime(len(nodeSet)) + retryTime + (withFewerRetryReq.CLIENT_MAX_RETRY_REPLY + 1) + timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) + retryTime looper.runFor(timeout) @@ -184,7 +184,7 @@ def dontTransmitReply(msg, remoteName): wait_for_replies(looper, client1, idr, reqId, 3) assert client1.spylog.count(client1.resendRequests.__name__) == \ - (totalResends + withFewerRetryReq.CLIENT_MAX_RETRY_REPLY) + (totalResends + withFewerRetryReq.CLIENT_MAX_RETRY_REPLY) assert req.key not in client1.expectingAcksFor assert req.key not in client1.expectingRepliesFor alpha.transmitToClient = origTrans diff --git a/plenum/test/client/test_client_sends_get_request_to_one_node.py b/plenum/test/client/test_client_sends_get_request_to_one_node.py index 57db7c093c..0ea6a1d61e 100644 --- a/plenum/test/client/test_client_sends_get_request_to_one_node.py +++ b/plenum/test/client/test_client_sends_get_request_to_one_node.py @@ -4,7 +4,6 @@ from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually from plenum.test.client.conftest import passThroughReqAcked1 -from plenum.test.helper import stopNodes, send_signed_requests from plenum.test.helper import stopNodes, send_signed_requests from plenum.test.malicious_behaviors_client import \ @@ -20,7 +19,7 @@ def test_client_sends_get_request_to_one_node(looper, client1, wallet1, - nodeSet): + txnPoolNodeSet): """ Check that client sends read only request to one node only """ @@ -52,7 +51,7 @@ def sign_and_send(op): def test_client_can_send_get_request_to_one_node(looper, client1, wallet1, - nodeSet): + txnPoolNodeSet): """ Check that read only request can be sent without having connection to all nodes @@ -61,7 +60,7 @@ def test_client_can_send_get_request_to_one_node(looper, wallet = wallet1 logger.info("Stopping nodes") - nodes_to_stop = list(nodeSet)[1:] + nodes_to_stop = list(txnPoolNodeSet)[1:] stopNodes(nodes_to_stop, looper) def sign_and_send(op): diff --git a/plenum/test/client/test_client_sends_to_f_plus_one_nodes.py b/plenum/test/client/test_client_sends_to_f_plus_one_nodes.py index a6d7ca3656..b94b227189 100644 --- a/plenum/test/client/test_client_sends_to_f_plus_one_nodes.py +++ b/plenum/test/client/test_client_sends_to_f_plus_one_nodes.py @@ -5,24 +5,23 @@ genDoesntSendRequestToSomeNodes from plenum.test.node_catchup.helper import waitNodeDataEquality - nodeCount = 4 clientFault = genDoesntSendRequestToSomeNodes("AlphaC") reqAcked1 = passThroughReqAcked1 -def testReplyWhenRequestSentToMoreThanFPlusOneNodes(looper, nodeSet, +def testReplyWhenRequestSentToMoreThanFPlusOneNodes(looper, txnPoolNodeSet, fClient, replied1, wallet1): """ Alpha would not be sent request but other nodes will be, so Alpha will just rely on propagates from other nodes """ - alpha = nodeSet.Alpha - other_nodes = [n for n in nodeSet if n != alpha] + alpha = txnPoolNodeSet[0] + other_nodes = [n for n in txnPoolNodeSet if n != alpha] def chk(req_count=1): - for node in nodeSet: + for node in txnPoolNodeSet: prc_req = node.processRequest.__name__ prc_ppg = node.processPropagate.__name__ if node != alpha: diff --git a/plenum/test/client/test_client_sends_to_less_nodes.py b/plenum/test/client/test_client_sends_to_less_nodes.py index 74d149edb5..5facd46649 100644 --- a/plenum/test/client/test_client_sends_to_less_nodes.py +++ b/plenum/test/client/test_client_sends_to_less_nodes.py @@ -10,7 +10,7 @@ # noinspection PyIncorrectDocstring -def testReplyWhenRequestSentToLessThanFPlusOneNodes(looper, nodeSet, +def testReplyWhenRequestSentToLessThanFPlusOneNodes(looper, txnPoolNodeSet, fClient: Client, replied1): """ In a system with no faulty nodes, even if the client sends the request to diff --git a/plenum/test/client/test_core_authn.py b/plenum/test/client/test_core_authn.py index 1cdb645150..7875348fd5 100644 --- a/plenum/test/client/test_core_authn.py +++ b/plenum/test/client/test_core_authn.py @@ -7,7 +7,6 @@ from plenum.common.types import f from plenum.server.client_authn import CoreAuthNr - idr = '5G72199XZB7wREviUbQma7' msg_str = "42 (forty-two) is the natural number that succeeds 41 and precedes 43." diff --git a/plenum/test/client/test_faulty_client_with_faulty_node.py b/plenum/test/client/test_faulty_client_with_faulty_node.py index 47f3138d9e..c0cc8e766c 100644 --- a/plenum/test/client/test_faulty_client_with_faulty_node.py +++ b/plenum/test/client/test_faulty_client_with_faulty_node.py @@ -17,18 +17,18 @@ @pytest.fixture(scope="module") -def nodeChangesRequest(nodeSet): - alpha = nodeSet.Alpha +def nodeChangesRequest(txnPoolNodeSet): + alpha = txnPoolNodeSet[0] # Alpha should not be blacklisted for Invalid Signature by all other nodes whitelistNode(alpha.name, - [node for node in nodeSet if node != alpha], + [node for node in txnPoolNodeSet if node != alpha], InvalidSignature.code) - makeNodeFaulty(alpha, changesRequest,) + makeNodeFaulty(alpha, changesRequest, ) # noinspection PyIncorrectDocstring,PyUnusedLocal,PyShadowingNames -def testReplyUnaffectedByFaultyNode(looper, nodeSet, nodeChangesRequest, +def testReplyUnaffectedByFaultyNode(looper, txnPoolNodeSet, nodeChangesRequest, fClient, replied1): """ Client is malicious - sends requests to Alpha and Beta only diff --git a/plenum/test/client/test_protocol_version.py b/plenum/test/client/test_protocol_version.py index ad37a07c60..da3b89426e 100644 --- a/plenum/test/client/test_protocol_version.py +++ b/plenum/test/client/test_protocol_version.py @@ -3,10 +3,7 @@ from plenum.common.request import Request from plenum.test.helper import waitForSufficientRepliesForRequests, \ send_signed_requests, checkReqNackWithReason, random_request_objects, \ - sign_request_objects, signed_random_requests, random_requests -# noinspection PyUnresolvedReferences -from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ - client1, wallet1, client1Connected + sign_request_objects, random_requests from stp_core.loop.eventually import eventually @@ -14,8 +11,9 @@ def request_num(request): return int(request.param) + def test_request_no_protocol_version(looper, txnPoolNodeSet, - client1, client1Connected, + client1, wallet1, request_num): reqs = random_request_objects(request_num, protocol_version=None) @@ -28,9 +26,9 @@ def test_request_no_protocol_version(looper, txnPoolNodeSet, def test_version_not_set_by_default(looper, txnPoolNodeSet, - client1, client1Connected, - wallet1, - request_num): + client1, + wallet1, + request_num): req_dicts = random_requests(request_num) reqs = [Request(operation=op) for op in req_dicts] for req in reqs: @@ -44,7 +42,7 @@ def test_version_not_set_by_default(looper, txnPoolNodeSet, def test_request_with_correct_version(looper, - txnPoolNodeSet, client1, client1Connected, + txnPoolNodeSet, client1, wallet1, request_num): reqs = random_request_objects(request_num, protocol_version=CURRENT_PROTOCOL_VERSION) @@ -57,7 +55,7 @@ def test_request_with_correct_version(looper, def test_request_with_invalid_version(looper, txnPoolNodeSet, - client1, client1Connected, + client1, wallet1, request_num): reqs = random_request_objects(request_num, protocol_version=-1) diff --git a/plenum/test/client/test_state_proof_verified.py b/plenum/test/client/test_state_proof_verified.py index adfd93ca43..3f3568bb4a 100644 --- a/plenum/test/client/test_state_proof_verified.py +++ b/plenum/test/client/test_state_proof_verified.py @@ -3,10 +3,6 @@ from plenum.test.helper import sendRandomRequest, \ checkResponseCorrectnessFromNodes from stp_core.loop.eventually import eventually -# noinspection PyUnresolvedReferences -from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ - client1, wallet1, client1Connected - nodeCount = 4 nodes_wth_bls = 4 @@ -21,7 +17,7 @@ def check_proved_reply_received(client, identifier, request_id): def test_state_proof_checked_in_client_request(looper, txnPoolNodeSet, - client1, client1Connected, wallet1): + client1, wallet1): """ Checks that client cat use state proofs instead of quorum for replies. diff --git a/plenum/test/common/test_config_util.py b/plenum/test/common/test_config_util.py index 5bbd58a161..fb0ceb33a9 100644 --- a/plenum/test/common/test_config_util.py +++ b/plenum/test/common/test_config_util.py @@ -4,7 +4,6 @@ from plenum.common.config_util import extend_with_external_config, \ extend_with_default_external_config - TEST_NETWORK_NAME = 'test_network' GENERAL_CONFIG_FILE_NAME = 'test_config.py' USER_CONFIG_FILE_NAME = 'user_config.py' diff --git a/plenum/test/common/test_hook_mananger.py b/plenum/test/common/test_hook_mananger.py index 9443fc8e91..2fd1da65fa 100644 --- a/plenum/test/common/test_hook_mananger.py +++ b/plenum/test/common/test_hook_mananger.py @@ -12,7 +12,7 @@ def test_hook_registration(): manager.register_hook(9, lambda x, y: print(x, y)) for i in hook_ids: assert len(manager.hooks[i]) == 0 - manager.register_hook(i, lambda x, y: print(+y+i)) + manager.register_hook(i, lambda x, y: print(+y + i)) assert len(manager.hooks[i]) == 1 diff --git a/plenum/test/common/test_parse_ledger.py b/plenum/test/common/test_parse_ledger.py index 0f35fc3ca5..d451a4a779 100644 --- a/plenum/test/common/test_parse_ledger.py +++ b/plenum/test/common/test_parse_ledger.py @@ -3,7 +3,8 @@ from ledger.compact_merkle_tree import CompactMerkleTree from ledger.ledger import Ledger -from plenum.common.constants import TXN_TYPE, TARGET_NYM, DATA, NAME, ALIAS, SERVICES, VALIDATOR, IDENTIFIER, NODE_PORT, CLIENT_PORT, NODE_IP +from plenum.common.constants import TXN_TYPE, TARGET_NYM, DATA, NAME, ALIAS, SERVICES, VALIDATOR, IDENTIFIER, NODE_PORT, \ + CLIENT_PORT, NODE_IP from plenum.common.stack_manager import TxnStackManager errMsg1 = 'Invalid verkey. Rebuild pool transactions.' diff --git a/plenum/test/common/test_pool_file_raises_descriptive_error.py b/plenum/test/common/test_pool_file_raises_descriptive_error.py index 6aa9425eac..cbc601a69b 100644 --- a/plenum/test/common/test_pool_file_raises_descriptive_error.py +++ b/plenum/test/common/test_pool_file_raises_descriptive_error.py @@ -5,13 +5,12 @@ from plenum.common.stack_manager import TxnStackManager from json.decoder import JSONDecodeError - errMsg = 'Pool transaction file corrupted. Rebuild pool transactions.' whitelist = [errMsg] class DummyLedger(Ledger): - def getAllTxn(self, frm: int=None, to: int=None): + def getAllTxn(self, frm: int = None, to: int = None): raise JSONDecodeError('', '', 0) diff --git a/plenum/test/common/test_prepare_batch.py b/plenum/test/common/test_prepare_batch.py index db0f226a52..cf2be2860e 100644 --- a/plenum/test/common/test_prepare_batch.py +++ b/plenum/test/common/test_prepare_batch.py @@ -1,6 +1,5 @@ from plenum.common.prepare_batch import split_messages_on_batches, SPLIT_STEPS_LIMIT - LEN_LIMIT_BYTES = 100 SERIALIZATION_OTHER_HEAD_BYTES = 10 MAX_ONE_MSG_LEN = LEN_LIMIT_BYTES - SERIALIZATION_OTHER_HEAD_BYTES @@ -55,5 +54,5 @@ def test_one_msg_almost_excesses_limit_split_fails(): def test_excesses_limit_of_split_steps_split_fails(): - msgs = [b'1' * MAX_ONE_MSG_LEN] * 2**(SPLIT_STEPS_LIMIT + 1) + msgs = [b'1' * MAX_ONE_MSG_LEN] * 2 ** (SPLIT_STEPS_LIMIT + 1) assert split_ut(msgs) is None diff --git a/plenum/test/conftest.py b/plenum/test/conftest.py index 6deb24a707..d8821244a1 100644 --- a/plenum/test/conftest.py +++ b/plenum/test/conftest.py @@ -229,18 +229,6 @@ def allPluginsPath(): return [getPluginPath('stats_consumer')] -@pytest.fixture(scope="module") -def keySharedNodes(startedNodes): - return startedNodes - - -@pytest.fixture(scope="module") -def startedNodes(nodeSet, looper): - for n in nodeSet: - n.start(looper.loop) - return nodeSet - - @pytest.fixture(scope="module") def whitelist(request): return getValueFromModule(request, "whitelist", []) @@ -326,15 +314,6 @@ def node_config_helper_class(): return PNodeConfigHelper -@pytest.yield_fixture(scope="module") -def nodeSet(request, tdir, tconf, nodeReg, allPluginsPath, patchPluginManager): - primaryDecider = getValueFromModule(request, "PrimaryDecider", None) - with TestNodeSet(tconf, nodeReg=nodeReg, tmpdir=tdir, - primaryDecider=primaryDecider, - pluginPaths=allPluginsPath) as ns: - yield ns - - def _tdir(tdir_fact): return tdir_fact.mktemp('').strpath @@ -419,16 +398,8 @@ def nodeReg(request) -> Dict[str, HA]: @pytest.yield_fixture(scope="module") -def unstartedLooper(nodeSet): - with Looper(nodeSet, autoStart=False) as l: - yield l - - -@pytest.fixture(scope="module") -def looper(unstartedLooper): - unstartedLooper.autoStart = True - unstartedLooper.startall() - return unstartedLooper +def looper(txnPoolNodesLooper): + yield txnPoolNodesLooper @pytest.fixture(scope="function") @@ -436,44 +407,57 @@ def pool(tdir_for_func, tconf_for_func): return Pool(tmpdir=tdir_for_func, config=tconf_for_func) -@pytest.fixture(scope="module") -def ready(looper, keySharedNodes): - looper.run(checkNodesConnected(keySharedNodes)) - return keySharedNodes - - -@pytest.fixture(scope="module") -def up(looper, ready): - ensureElectionsDone(looper=looper, nodes=ready) - - # noinspection PyIncorrectDocstring @pytest.fixture(scope="module") -def ensureView(nodeSet, looper, up): +def ensureView(txnPoolNodeSet, looper): """ - Ensure that all the nodes in the nodeSet are in the same view. + Ensure that all the nodes in the txnPoolNodeSet are in the same view. """ - return waitForViewChange(looper, nodeSet) + return waitForViewChange(looper, txnPoolNodeSet) @pytest.fixture("module") -def delayed_perf_chk(nodeSet): +def delayed_perf_chk(txnPoolNodeSet): d = 20 - for node in nodeSet: + for node in txnPoolNodeSet: node.delayCheckPerformance(d) return d @pytest.fixture(scope="module") -def clientAndWallet1(looper, nodeSet, client_tdir, up): - client, wallet = genTestClient(nodeSet, tmpdir=client_tdir) +def stewardWallet(stewardAndWallet1): + return stewardAndWallet1[1] + + +@pytest.fixture(scope="module") +def clientAndWallet1(txnPoolNodeSet, poolTxnClientData, tdirWithClientPoolTxns, client_tdir): + client, wallet = buildPoolClientAndWallet(poolTxnClientData, + client_tdir) yield client, wallet client.stop() @pytest.fixture(scope="module") -def client1(clientAndWallet1, looper): - client, _ = clientAndWallet1 +def stewardAndWallet1(looper, txnPoolNodeSet, poolTxnStewardData, + tdirWithClientPoolTxns, client_tdir): + client, wallet = buildPoolClientAndWallet(poolTxnStewardData, + client_tdir) + yield client, wallet + client.stop() + + +@pytest.fixture(scope="module") +def steward1(looper, txnPoolNodeSet, stewardAndWallet1): + steward, wallet = stewardAndWallet1 + looper.add(steward) + ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward, + *txnPoolNodeSet) + return steward + + +@pytest.fixture(scope="module") +def client1(looper, clientAndWallet1): + client = clientAndWallet1[0] looper.add(client) looper.run(client.ensureConnectedToNodes()) return client @@ -481,8 +465,7 @@ def client1(clientAndWallet1, looper): @pytest.fixture(scope="module") def wallet1(clientAndWallet1): - _, wallet = clientAndWallet1 - return wallet + return clientAndWallet1[1] @pytest.fixture(scope="module") @@ -498,13 +481,13 @@ def sent1(client1, request1): @pytest.fixture(scope="module") -def reqAcked1(looper, nodeSet, client1, sent1, faultyNodes): - numerOfNodes = len(nodeSet) +def reqAcked1(looper, txnPoolNodeSet, client1, sent1, faultyNodes): + numerOfNodes = len(txnPoolNodeSet) # Wait until request received by all nodes propTimeout = waits.expectedClientToPoolRequestDeliveryTime(numerOfNodes) coros = [partial(checkLastClientReqForNode, node, sent1) - for node in nodeSet] + for node in txnPoolNodeSet] # looper.run(eventuallyAll(*coros, # totalTimeout=propTimeout, # acceptableFails=faultyNodes)) @@ -518,7 +501,7 @@ def reqAcked1(looper, nodeSet, client1, sent1, faultyNodes): client1, node, sent1.identifier, - sent1.reqId) for node in nodeSet] + sent1.reqId) for node in txnPoolNodeSet] ackTimeout = waits.expectedReqAckQuorumTime() # looper.run(eventuallyAll(*coros2, # totalTimeout=ackTimeout, @@ -550,47 +533,46 @@ def faultyNodes(request): @pytest.fixture(scope="module") def propagated1(looper, - nodeSet, - up, + txnPoolNodeSet, reqAcked1, faultyNodes): - checkPropagated(looper, nodeSet, reqAcked1, faultyNodes) + checkPropagated(looper, txnPoolNodeSet, reqAcked1, faultyNodes) return reqAcked1 @pytest.fixture(scope="module") -def preprepared1(looper, nodeSet, propagated1, faultyNodes): +def preprepared1(looper, txnPoolNodeSet, propagated1, faultyNodes): checkPrePrepared(looper, - nodeSet, + txnPoolNodeSet, propagated1, - range(getNoInstances(len(nodeSet))), + range(getNoInstances(len(txnPoolNodeSet))), faultyNodes) return propagated1 @pytest.fixture(scope="module") -def prepared1(looper, nodeSet, client1, preprepared1, faultyNodes): +def prepared1(looper, txnPoolNodeSet, client1, preprepared1, faultyNodes): checkPrepared(looper, - nodeSet, + txnPoolNodeSet, preprepared1, - range(getNoInstances(len(nodeSet))), + range(getNoInstances(len(txnPoolNodeSet))), faultyNodes) return preprepared1 @pytest.fixture(scope="module") -def committed1(looper, nodeSet, client1, prepared1, faultyNodes): +def committed1(looper, txnPoolNodeSet, client1, prepared1, faultyNodes): checkCommitted(looper, - nodeSet, + txnPoolNodeSet, prepared1, - range(getNoInstances(len(nodeSet))), + range(getNoInstances(len(txnPoolNodeSet))), faultyNodes) return prepared1 @pytest.fixture(scope="module") -def replied1(looper, nodeSet, client1, committed1, wallet1, faultyNodes): - numOfNodes = len(nodeSet) +def replied1(looper, txnPoolNodeSet, client1, committed1, wallet1, faultyNodes): + numOfNodes = len(txnPoolNodeSet) numOfInstances = getNoInstances(numOfNodes) quorum = numOfInstances * (numOfNodes - faultyNodes) @@ -599,7 +581,7 @@ def checkOrderedCount(): wallet1.defaultId, committed1.reqId, instId) - for node in nodeSet for instId in range(numOfInstances)] + for node in txnPoolNodeSet for instId in range(numOfInstances)] assert resp.count(True) >= quorum orderingTimeout = waits.expectedOrderingTime(numOfInstances) diff --git a/plenum/test/delayers.py b/plenum/test/delayers.py index f1da56fa9d..911b909471 100644 --- a/plenum/test/delayers.py +++ b/plenum/test/delayers.py @@ -15,6 +15,7 @@ DEFAULT_DELAY = 600 + def delayer(seconds, op, senderFilter=None, instFilter: int = None): def inner(rx): msg, frm = rx @@ -75,42 +76,42 @@ def inner(action_pair): return inner -def nom_delay(delay: float = DEFAULT_DELAY, inst_id=None, sender_filter: str=None): +def nom_delay(delay: float = DEFAULT_DELAY, inst_id=None, sender_filter: str = None): # Delayer of NOMINATE requests return delayerMsgTuple( delay, Nomination, instFilter=inst_id, senderFilter=sender_filter) -def prim_delay(delay: float = DEFAULT_DELAY, inst_id=None, sender_filter: str=None): +def prim_delay(delay: float = DEFAULT_DELAY, inst_id=None, sender_filter: str = None): # Delayer of PRIMARY requests return delayerMsgTuple( delay, Primary, instFilter=inst_id, senderFilter=sender_filter) -def rel_delay(delay: float = DEFAULT_DELAY, inst_id=None, sender_filter: str=None): +def rel_delay(delay: float = DEFAULT_DELAY, inst_id=None, sender_filter: str = None): # Delayer of REELECTION requests return delayerMsgTuple( delay, Reelection, instFilter=inst_id, senderFilter=sender_filter) -def ppgDelay(delay: float = DEFAULT_DELAY, sender_filter: str=None): +def ppgDelay(delay: float = DEFAULT_DELAY, sender_filter: str = None): # Delayer of PROPAGATE requests return delayerMsgTuple(delay, Propagate, senderFilter=sender_filter) -def ppDelay(delay: float = DEFAULT_DELAY, instId: int=None, sender_filter: str=None): +def ppDelay(delay: float = DEFAULT_DELAY, instId: int = None, sender_filter: str = None): # Delayer of PRE-PREPARE requests from a particular instance return delayerMsgTuple(delay, PrePrepare, instFilter=instId, senderFilter=sender_filter) -def pDelay(delay: float = DEFAULT_DELAY, instId: int=None, sender_filter: str=None): +def pDelay(delay: float = DEFAULT_DELAY, instId: int = None, sender_filter: str = None): # Delayer of PREPARE requests from a particular instance return delayerMsgTuple( delay, Prepare, instFilter=instId, senderFilter=sender_filter) -def cDelay(delay: float = DEFAULT_DELAY, instId: int=None, sender_filter: str=None): +def cDelay(delay: float = DEFAULT_DELAY, instId: int = None, sender_filter: str = None): # Delayer of COMMIT requests from a particular instance return delayerMsgTuple( delay, Commit, instFilter=instId, senderFilter=sender_filter) @@ -151,7 +152,7 @@ def req_delay(delay: float = DEFAULT_DELAY): return delayerMsgTuple(delay, Request) -def msg_req_delay(delay: float = DEFAULT_DELAY, types_to_delay: List=None): +def msg_req_delay(delay: float = DEFAULT_DELAY, types_to_delay: List = None): # Delayer of MessageReq messages def specific_msgs(msg): if isinstance( @@ -163,7 +164,7 @@ def specific_msgs(msg): return specific_msgs -def msg_rep_delay(delay: float = DEFAULT_DELAY, types_to_delay: List=None): +def msg_rep_delay(delay: float = DEFAULT_DELAY, types_to_delay: List = None): # Delayer of MessageRep messages def specific_msgs(msg): if isinstance( @@ -199,9 +200,9 @@ def delay(what, frm, to, howlong): "to type {} for {} not supported".format(type(t), t)) -def delayNonPrimaries(nodeSet, instId, delay): +def delayNonPrimaries(txnPoolNodeSet, instId, delay): from plenum.test.test_node import getNonPrimaryReplicas - nonPrimReps = getNonPrimaryReplicas(nodeSet, instId) + nonPrimReps = getNonPrimaryReplicas(txnPoolNodeSet, instId) for r in nonPrimReps: r.node.nodeIbStasher.delay(ppDelay(delay, instId)) return nonPrimReps @@ -216,7 +217,7 @@ def delay_messages(typ, nodes, inst_id, delay=None, else: RuntimeError('Unknown type') assert delay is not None or ( - min_delay is not None and max_delay is not None) + min_delay is not None and max_delay is not None) for node in nodes: if delay: d = delay diff --git a/plenum/test/exceptions.py b/plenum/test/exceptions.py index 133a432035..263a925454 100644 --- a/plenum/test/exceptions.py +++ b/plenum/test/exceptions.py @@ -1,5 +1,6 @@ class NotFullyConnected(Exception): pass + class TestException(Exception): pass diff --git a/plenum/test/grouped_load_scheduling.py b/plenum/test/grouped_load_scheduling.py index 382473817f..7664267cb9 100644 --- a/plenum/test/grouped_load_scheduling.py +++ b/plenum/test/grouped_load_scheduling.py @@ -54,4 +54,5 @@ def batch_generator(self): def grouper(p): name = self.collection[p] return '::'.join(name.split('::')[:-1]) + return itertools.groupby(self.pending, key=grouper) diff --git a/plenum/test/helper.py b/plenum/test/helper.py index bf7ebf6aea..0b58068065 100644 --- a/plenum/test/helper.py +++ b/plenum/test/helper.py @@ -8,14 +8,14 @@ from shutil import copyfile from sys import executable from time import sleep -from typing import Tuple, Iterable, Dict, Optional, NamedTuple, List, Any, Sequence, Union +from typing import Tuple, Iterable, Dict, Optional, List, Any, Sequence, Union import pytest from psutil import Popen import json import asyncio -from indy.ledger import sign_and_submit_request, sign_request, submit_request, build_nym_request +from indy.ledger import sign_and_submit_request, sign_request, submit_request from indy.error import ErrorCode, IndyError from ledger.genesis_txn.genesis_txn_file_util import genesis_txn_file @@ -28,11 +28,13 @@ from plenum.common.messages.node_messages import Reply, PrePrepare, Prepare, Commit from plenum.common.types import f from plenum.common.util import getNoInstances, get_utc_epoch +from plenum.common.config_helper import PNodeConfigHelper from plenum.common.request import Request from plenum.server.node import Node from plenum.test import waits from plenum.test.msgs import randomMsg -from plenum.test.spy_helpers import getLastClientReqReceivedForNode, getAllArgs, getAllReturnVals +from plenum.test.spy_helpers import getLastClientReqReceivedForNode, getAllArgs, getAllReturnVals, \ + getAllMsgReceivedForNode from plenum.test.test_client import TestClient, genTestClient from plenum.test.test_node import TestNode, TestReplica, TestNodeSet, \ checkNodesConnected, ensureElectionsDone, NodeRef, getPrimaryReplica @@ -331,83 +333,78 @@ def buildCompletedTxnFromReply(request, reply: Reply) -> Dict: return txn -async def msgAll(nodes: TestNodeSet): +async def msgAll(nodes): # test sending messages from every node to every other node # TODO split send and check so that the messages can be sent concurrently - for p in permutations(nodes.nodeNames, 2): - await sendMessageAndCheckDelivery(nodes, p[0], p[1]) + for p in permutations(nodes, 2): + await sendMessageAndCheckDelivery(p[0], p[1]) -def sendMessage(nodes: TestNodeSet, - frm: NodeRef, - to: NodeRef, +def sendMessage(sender: Node, + reciever: Node, msg: Optional[Tuple] = None): """ Sends message from one node to another :param nodes: - :param frm: sender - :param to: recepient + :param sender: sender + :param reciever: recepient :param msg: optional message - by default random one generated :return: """ - logger.debug("Sending msg from {} to {}".format(frm, to)) + logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name)) msg = msg if msg else randomMsg() - sender = nodes.getNode(frm) - rid = sender.nodestack.getRemote(nodes.getNodeName(to)).uid + rid = sender.nodestack.getRemote(reciever.name).uid sender.nodestack.send(msg, rid) -async def sendMessageAndCheckDelivery(nodes: TestNodeSet, - frm: NodeRef, - to: NodeRef, +async def sendMessageAndCheckDelivery(sender: Node, + reciever: Node, msg: Optional[Tuple] = None, method=None, customTimeout=None): """ Sends message from one node to another and checks that it was delivered - :param nodes: - :param frm: sender - :param to: recepient + :param sender: sender + :param reciever: recepient :param msg: optional message - by default random one generated :param customTimeout: :return: """ - logger.debug("Sending msg from {} to {}".format(frm, to)) + logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name)) msg = msg if msg else randomMsg() - sender = nodes.getNode(frm) - rid = sender.nodestack.getRemote(nodes.getNodeName(to)).uid + rid = sender.nodestack.getRemote(reciever.name).uid sender.nodestack.send(msg, rid) timeout = customTimeout or waits.expectedNodeToNodeMessageDeliveryTime() - await eventually(checkMessageReceived, msg, nodes, to, method, + await eventually(checkMessageReceived, msg, reciever, method, retryWait=.1, timeout=timeout, ratchetSteps=10) -def sendMessageToAll(nodes: TestNodeSet, - frm: NodeRef, +def sendMessageToAll(nodes, + sender: Node, msg: Optional[Tuple] = None): """ Sends message from one node to all others :param nodes: - :param frm: sender + :param sender: sender :param msg: optional message - by default random one generated :return: """ for node in nodes: - if node != frm: - sendMessage(nodes, frm, node, msg) + if node != sender: + sendMessage(sender, node, msg) -async def sendMessageAndCheckDeliveryToAll(nodes: TestNodeSet, - frm: NodeRef, +async def sendMessageAndCheckDeliveryToAll(nodes, + sender: Node, msg: Optional[Tuple] = None, method=None, customTimeout=None): @@ -415,7 +412,7 @@ async def sendMessageAndCheckDeliveryToAll(nodes: TestNodeSet, Sends message from one node to all other and checks that it was delivered :param nodes: - :param frm: sender + :param sender: sender :param msg: optional message - by default random one generated :param customTimeout: :return: @@ -423,22 +420,33 @@ async def sendMessageAndCheckDeliveryToAll(nodes: TestNodeSet, customTimeout = customTimeout or waits.expectedNodeToAllNodesMessageDeliveryTime( len(nodes)) for node in nodes: - if node != frm: - await sendMessageAndCheckDelivery(nodes, frm, node, msg, method, customTimeout) + if node != sender: + await sendMessageAndCheckDelivery(sender, node, msg, method, customTimeout) break -def checkMessageReceived(msg, nodes, to, method: str = None): - allMsgs = nodes.getAllMsgReceived(to, method) +def checkMessageReceived(msg, receiver, method: str = None): + allMsgs = getAllMsgReceivedForNode(receiver, method) assert msg in allMsgs -def addNodeBack(nodeSet: TestNodeSet, +def addNodeBack(node_set, looper: Looper, - nodeName: str) -> TestNode: - node = nodeSet.addNode(nodeName) - looper.add(node) - return node + node: Node, + tconf, + tdir) -> TestNode: + config_helper = PNodeConfigHelper(node.name, tconf, chroot=tdir) + restartedNode = TestNode(node.name, + config_helper=config_helper, + config=tconf, + ha=node.nodestack.ha, + cliha=node.clientstack.ha) + for node in node_set: + if node.name != restartedNode.name: + node.nodestack.reconnectRemoteWithName(restartedNode.name) + node_set.append(restartedNode) + looper.add(restartedNode) + return restartedNode def checkPropagateReqCountOfNode(node: TestNode, identifier: str, reqId: int): @@ -468,9 +476,9 @@ def checkRequestNotReturnedToNode(node: TestNode, identifier: str, reqId: int, assert not requestReturnedToNode(node, identifier, reqId, instId) -def check_request_is_not_returned_to_nodes(nodeSet, request): - instances = range(getNoInstances(len(nodeSet))) - for node, inst_id in itertools.product(nodeSet, instances): +def check_request_is_not_returned_to_nodes(txnPoolNodeSet, request): + instances = range(getNoInstances(len(txnPoolNodeSet))) + for node, inst_id in itertools.product(txnPoolNodeSet, instances): checkRequestNotReturnedToNode(node, request.identifier, request.reqId, @@ -645,16 +653,16 @@ def checkViewNoForNodes(nodes: Iterable[TestNode], expectedViewNo: int = None): return vNo -def waitForViewChange(looper, nodeSet, expectedViewNo=None, +def waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=None, customTimeout=None): """ Waits for nodes to come to same view. Raises exception when time is out """ - timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodeSet)) + timeout = customTimeout or waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) return looper.run(eventually(checkViewNoForNodes, - nodeSet, + txnPoolNodeSet, expectedViewNo, timeout=timeout)) diff --git a/plenum/test/input_validation/conftest.py b/plenum/test/input_validation/conftest.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/plenum/test/input_validation/fields_validation/test_base58_field.py b/plenum/test/input_validation/fields_validation/test_base58_field.py index cf169d87ba..1985d34dec 100644 --- a/plenum/test/input_validation/fields_validation/test_base58_field.py +++ b/plenum/test/input_validation/fields_validation/test_base58_field.py @@ -48,4 +48,4 @@ def test_invalid_symbols_truncated_output(): b58_by_len(20)[slice(0, -len(INVALID_CHARS))] + INVALID_CHARS) assert res assert (res == 'should not contain the following chars ' - '{} (truncated)'.format(sorted(set(INVALID_CHARS))[:10])) + '{} (truncated)'.format(sorted(set(INVALID_CHARS))[:10])) diff --git a/plenum/test/input_validation/fields_validation/test_ledger_id_field.py b/plenum/test/input_validation/fields_validation/test_ledger_id_field.py index 105001633b..478512d930 100644 --- a/plenum/test/input_validation/fields_validation/test_ledger_id_field.py +++ b/plenum/test/input_validation/fields_validation/test_ledger_id_field.py @@ -2,7 +2,6 @@ from plenum.common.constants import POOL_LEDGER_ID, DOMAIN_LEDGER_ID from plenum import PLUGIN_LEDGER_IDS - validator = LedgerIdField() diff --git a/plenum/test/input_validation/fields_validation/test_time_among_field.py b/plenum/test/input_validation/fields_validation/test_time_among_field.py index 587d75dd0c..9f7320ec42 100644 --- a/plenum/test/input_validation/fields_validation/test_time_among_field.py +++ b/plenum/test/input_validation/fields_validation/test_time_among_field.py @@ -18,4 +18,4 @@ def test_empty_node_id(): def test_long_id(): - assert validator.validate(("NNooddee11::00", 1)) \ No newline at end of file + assert validator.validate(("NNooddee11::00", 1)) diff --git a/plenum/test/input_validation/fields_validation/test_version_field.py b/plenum/test/input_validation/fields_validation/test_version_field.py index 9b1a9e0e16..0b65c22860 100644 --- a/plenum/test/input_validation/fields_validation/test_version_field.py +++ b/plenum/test/input_validation/fields_validation/test_version_field.py @@ -3,7 +3,6 @@ from plenum.common.messages.fields import VersionField from plenum.config import VERSION_FIELD_LIMIT - validator = VersionField(components_number=(2, 3,), max_length=VERSION_FIELD_LIMIT) diff --git a/plenum/test/input_validation/helper.py b/plenum/test/input_validation/helper.py index c8f253adea..b256309694 100644 --- a/plenum/test/input_validation/helper.py +++ b/plenum/test/input_validation/helper.py @@ -56,10 +56,10 @@ class NonEmptyStringField(TestFieldBase): class HexString64Field(TestFieldBase): # TODO implement negative_test_cases = ( - #'', - #'fba333c13994f63edd900cdc625b88d0dcee6dda7df2c6e9b5bcd5c1072c04f', # 63 characters - #'77fba333c13994f63edd900cdc625b88d0dcee6dda7df2c6e9b5bcd5c1072c04f', # 65 characters - #'xfba333c13994f63edd900cdc625b88d0dcee6dda7df2c6e9b5bcd5c1072c04f', # first char is 'x' + # '', + # 'fba333c13994f63edd900cdc625b88d0dcee6dda7df2c6e9b5bcd5c1072c04f', # 63 characters + # '77fba333c13994f63edd900cdc625b88d0dcee6dda7df2c6e9b5bcd5c1072c04f', # 65 characters + # 'xfba333c13994f63edd900cdc625b88d0dcee6dda7df2c6e9b5bcd5c1072c04f', # first char is 'x' ) positive_test_cases = ( '7fba333c13994f63edd900cdc625b88d0dcee6dda7df2c6e9b5bcd5c1072c04f', # lower case @@ -138,16 +138,16 @@ class RequestIdrField(TestFieldBase): def negative_test_cases(self): return [ [[self.idr_field.positive_test_cases[0], - self.ts_field.negative_test_cases[0]]], + self.ts_field.negative_test_cases[0]]], [[self.idr_field.negative_test_cases[0], - self.ts_field.positive_test_cases[0]]], + self.ts_field.positive_test_cases[0]]], ] @property def positive_test_cases(self): return [ [[self.idr_field.positive_test_cases[0], - self.ts_field.positive_test_cases[0]]], + self.ts_field.positive_test_cases[0]]], ] @@ -160,16 +160,16 @@ class TieAmongField(TestFieldBase): def negative_test_cases(self): return [ [self.name_field.positive_test_cases[0], - self.ts_field.negative_test_cases[0]], + self.ts_field.negative_test_cases[0]], [self.name_field.negative_test_cases[0], - self.ts_field.positive_test_cases[0]], + self.ts_field.positive_test_cases[0]], ] @property def positive_test_cases(self): return [ [self.name_field.positive_test_cases[0], - self.ts_field.positive_test_cases[0]], + self.ts_field.positive_test_cases[0]], ] @@ -247,7 +247,7 @@ def positive_test_cases(self): class MessageDescriptor(TestFieldBase): - field_types = (dict, ) + field_types = (dict,) def __init__(self, klass, fields, optional_fields=None, name=None): self.klass = klass diff --git a/plenum/test/input_validation/message_validation/test_catchuprep_message.py b/plenum/test/input_validation/message_validation/test_catchuprep_message.py index 51229e92ad..96b8560ed7 100644 --- a/plenum/test/input_validation/message_validation/test_catchuprep_message.py +++ b/plenum/test/input_validation/message_validation/test_catchuprep_message.py @@ -3,7 +3,6 @@ from plenum.common.messages.fields import \ IterableField, LedgerIdField, MapField - EXPECTED_ORDERED_FIELDS = OrderedDict([ ("ledgerId", LedgerIdField), ("txns", AnyValueField), diff --git a/plenum/test/input_validation/message_validation/test_catchupreq_message.py b/plenum/test/input_validation/message_validation/test_catchupreq_message.py index c49482a5f7..f69c7b8529 100644 --- a/plenum/test/input_validation/message_validation/test_catchupreq_message.py +++ b/plenum/test/input_validation/message_validation/test_catchupreq_message.py @@ -4,7 +4,6 @@ from plenum.common.messages.fields import \ NonNegativeNumberField, LedgerIdField - EXPECTED_ORDERED_FIELDS = OrderedDict([ ("ledgerId", LedgerIdField), ("seqNoStart", NonNegativeNumberField), diff --git a/plenum/test/input_validation/message_validation/test_checkpoint_message.py b/plenum/test/input_validation/message_validation/test_checkpoint_message.py index 18e2d6a499..f886fc31a9 100644 --- a/plenum/test/input_validation/message_validation/test_checkpoint_message.py +++ b/plenum/test/input_validation/message_validation/test_checkpoint_message.py @@ -4,7 +4,6 @@ from plenum.common.messages.fields import \ NonNegativeNumberField, LimitedLengthStringField - EXPECTED_ORDERED_FIELDS = OrderedDict([ ("instId", NonNegativeNumberField), ("viewNo", NonNegativeNumberField), diff --git a/plenum/test/input_validation/message_validation/test_threepcstate_message.py b/plenum/test/input_validation/message_validation/test_threepcstate_message.py index ba24fe1178..f26669fc2f 100644 --- a/plenum/test/input_validation/message_validation/test_threepcstate_message.py +++ b/plenum/test/input_validation/message_validation/test_threepcstate_message.py @@ -3,7 +3,6 @@ from plenum.common.messages.fields import \ NonNegativeNumberField, IterableField - EXPECTED_ORDERED_FIELDS = OrderedDict([ ("instId", NonNegativeNumberField), ("messages", IterableField), diff --git a/plenum/test/instances/test_instance_cannot_become_active_with_less_than_four_servers.py b/plenum/test/instances/test_instance_cannot_become_active_with_less_than_four_servers.py index 65a57edfee..c521c924c0 100644 --- a/plenum/test/instances/test_instance_cannot_become_active_with_less_than_four_servers.py +++ b/plenum/test/instances/test_instance_cannot_become_active_with_less_than_four_servers.py @@ -1,20 +1,24 @@ from typing import Iterable import pytest + +from plenum.test.node_request.helper import get_node_by_name from stp_core.loop.eventually import eventually from stp_core.common.log import getlogger -from stp_core.loop.looper import Looper from plenum.common.startable import Status from plenum.test.greek import genNodeNames from plenum.test.helper import addNodeBack, ordinal -from plenum.test.test_node import TestNodeSet, checkNodesConnected, \ +from plenum.test.test_node import checkNodesConnected, \ checkNodeRemotes from plenum.test.test_stack import CONNECTED, JOINED_NOT_ALLOWED from plenum.test import waits -from plenum.test.pool_transactions.conftest import looper logger = getlogger() +nodeCount = 13 +f = 4 +minimumNodesToBeUp = nodeCount - f + @pytest.fixture(scope="function", autouse=True) def limitTestRunningTime(): @@ -23,65 +27,64 @@ def limitTestRunningTime(): # noinspection PyIncorrectDocstring def testProtocolInstanceCannotBecomeActiveWithLessThanFourServers( - tconf_for_func, tdir_for_func): + txnPoolNodeSet, looper, tconf, tdir): """ A protocol instance must have at least 4 nodes to come up. The status of the nodes will change from starting to started only after the addition of the fourth node to the system. """ - nodeCount = 13 - f = 4 - minimumNodesToBeUp = nodeCount - f nodeNames = genNodeNames(nodeCount) - with TestNodeSet(tconf_for_func, names=nodeNames, tmpdir=tdir_for_func) as nodeSet: - with Looper(nodeSet) as looper: - - # helpers - - def genExpectedStates(connecteds: Iterable[str]): - return { - nn: CONNECTED if nn in connecteds else JOINED_NOT_ALLOWED - for nn in nodeNames} - - def checkNodeStatusRemotesAndF(expectedStatus: Status, - nodeIdx: int): - for node in nodeSet.nodes.values(): - checkNodeRemotes(node, - genExpectedStates(nodeNames[:nodeIdx + 1])) - assert node.status == expectedStatus - - def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status): - logger.info("Add back the {} node and see status of {}". - format(ordinal(nodeIdx + 1), expectedStatus)) - addNodeBack(nodeSet, looper, nodeNames[nodeIdx]) - - timeout = waits.expectedNodeStartUpTimeout() + \ - waits.expectedPoolInterconnectionTime(len(nodeSet)) - # TODO: Probably it's better to modify waits.* functions - timeout *= 1.5 - looper.run(eventually(checkNodeStatusRemotesAndF, - expectedStatus, - nodeIdx, - retryWait=1, timeout=timeout)) - - logger.debug("Sharing keys") - looper.run(checkNodesConnected(nodeSet)) - - logger.debug("Remove all the nodes") - for n in nodeNames: - looper.removeProdable(nodeSet.nodes[n]) - nodeSet.removeNode(n) - - looper.runFor(10) - - logger.debug("Add nodes back one at a time") - for i in range(nodeCount): - nodes = i + 1 - if nodes < minimumNodesToBeUp: - expectedStatus = Status.starting - elif nodes < nodeCount: - expectedStatus = Status.started_hungry - else: - expectedStatus = Status.started - addNodeBackAndCheck(i, expectedStatus) + current_node_set = list(txnPoolNodeSet) + + def genExpectedStates(connecteds: Iterable[str]): + return { + nn: CONNECTED if nn in connecteds else JOINED_NOT_ALLOWED + for nn in nodeNames} + + def checkNodeStatusRemotesAndF(expectedStatus: Status, + nodeIdx: int): + for node in current_node_set: + checkNodeRemotes(node, + genExpectedStates(nodeNames[:nodeIdx + 1])) + assert node.status == expectedStatus + + def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status): + logger.info("Add back the {} node and see status of {}". + format(ordinal(nodeIdx + 1), expectedStatus)) + addNodeBack( + current_node_set, looper, + get_node_by_name(txnPoolNodeSet, nodeNames[nodeIdx]), + tconf, tdir) + looper.run(checkNodesConnected(current_node_set)) + timeout = waits.expectedNodeStartUpTimeout() + \ + waits.expectedPoolInterconnectionTime(len(current_node_set)) + # TODO: Probably it's better to modify waits.* functions + timeout *= 1.5 + looper.run(eventually(checkNodeStatusRemotesAndF, + expectedStatus, + nodeIdx, + retryWait=1, timeout=timeout)) + + logger.debug("Sharing keys") + looper.run(checkNodesConnected(current_node_set)) + + logger.debug("Remove all the nodes") + for n in nodeNames: + node_n = get_node_by_name(current_node_set, n) + looper.removeProdable(node_n) + node_n.stop() + current_node_set.remove(node_n) + + looper.runFor(10) + + logger.debug("Add nodes back one at a time") + for i in range(nodeCount): + nodes = i + 1 + if nodes < minimumNodesToBeUp: + expectedStatus = Status.starting + elif nodes < nodeCount: + expectedStatus = Status.started_hungry + else: + expectedStatus = Status.started + addNodeBackAndCheck(i, expectedStatus) diff --git a/plenum/test/instances/test_msgs_from_slow_instances.py b/plenum/test/instances/test_msgs_from_slow_instances.py index 533a90bf12..d390c3d2e7 100644 --- a/plenum/test/instances/test_msgs_from_slow_instances.py +++ b/plenum/test/instances/test_msgs_from_slow_instances.py @@ -6,7 +6,6 @@ from plenum.test.delayers import delayerMsgTuple from plenum.test.test_node import TestNode from plenum.test import waits -from plenum.test.pool_transactions.conftest import looper from plenum.test.node_request.conftest import committed1, \ prepared1, preprepared1, propagated1, reqAcked1, \ sent1, noRetryReq, faultyNodes diff --git a/plenum/test/instances/test_multiple_commit.py b/plenum/test/instances/test_multiple_commit.py index d36c476701..feac6ea9e2 100644 --- a/plenum/test/instances/test_multiple_commit.py +++ b/plenum/test/instances/test_multiple_commit.py @@ -12,7 +12,6 @@ sendDuplicate3PhaseMsg from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica from plenum.test import waits -from plenum.test.pool_transactions.conftest import looper from plenum.test.node_request.conftest import committed1, \ prepared1, preprepared1, propagated1, reqAcked1, \ sent1, noRetryReq, faultyNodes diff --git a/plenum/test/instances/test_multiple_instance_change_msgs.py b/plenum/test/instances/test_multiple_instance_change_msgs.py index 1e825dee7d..1a2a9ee966 100644 --- a/plenum/test/instances/test_multiple_instance_change_msgs.py +++ b/plenum/test/instances/test_multiple_instance_change_msgs.py @@ -13,20 +13,20 @@ @pytest.mark.skip(reason="INDY-80. Not yet implemented") -def testMultipleInstanceChangeMsgsMarkNodeAsSuspicious(looper, nodeSet, up): - maliciousNode = nodeSet.Alpha +def testMultipleInstanceChangeMsgsMarkNodeAsSuspicious(looper, txnPoolNodeSet): + maliciousNode = txnPoolNodeSet[0] for i in range(0, 5): maliciousNode.send(maliciousNode.view_changer._create_instance_change_msg(i, 0)) def chk(instId): - for node in nodeSet: + for node in txnPoolNodeSet: if node.name != maliciousNode.name: args = getAllArgs(node, ViewChanger.process_instance_change_msg) assert len(args) == 5 for arg in args: assert arg['frm'] == maliciousNode.name - numOfNodes = len(nodeSet) + numOfNodes = len(txnPoolNodeSet) instanceChangeTimeout = waits.expectedPoolViewChangeStartedTimeout( numOfNodes) @@ -35,7 +35,7 @@ def chk(instId): timeout=instanceChangeTimeout)) def g(): - for node in nodeSet: + for node in txnPoolNodeSet: if node.name != maliciousNode.name: frm, reason, code = getAllArgs(node, Node.reportSuspiciousNode) assert frm == maliciousNode.name diff --git a/plenum/test/instances/test_multiple_pre_prepare.py b/plenum/test/instances/test_multiple_pre_prepare.py index e3e80b7c43..b326c054ce 100644 --- a/plenum/test/instances/test_multiple_pre_prepare.py +++ b/plenum/test/instances/test_multiple_pre_prepare.py @@ -12,7 +12,6 @@ sendDuplicate3PhaseMsg from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica from plenum.test import waits -from plenum.test.pool_transactions.conftest import looper from plenum.test.node_request.conftest import committed1, \ prepared1, preprepared1, propagated1, reqAcked1, \ sent1, noRetryReq, faultyNodes diff --git a/plenum/test/instances/test_multiple_prepare.py b/plenum/test/instances/test_multiple_prepare.py index 2ef335035e..ae34618a77 100644 --- a/plenum/test/instances/test_multiple_prepare.py +++ b/plenum/test/instances/test_multiple_prepare.py @@ -11,7 +11,6 @@ sendDuplicate3PhaseMsg from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica from plenum.test import waits -from plenum.test.pool_transactions.conftest import looper from plenum.test.node_request.conftest import committed1, \ prepared1, preprepared1, propagated1, reqAcked1, \ sent1, noRetryReq, faultyNodes diff --git a/plenum/test/instances/test_pre_prepare_digest.py b/plenum/test/instances/test_pre_prepare_digest.py index 6c698d0bab..cc528504b7 100644 --- a/plenum/test/instances/test_pre_prepare_digest.py +++ b/plenum/test/instances/test_pre_prepare_digest.py @@ -12,7 +12,6 @@ send3PhaseMsgWithIncorrectDigest from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica from plenum.test import waits -from plenum.test.pool_transactions.conftest import looper from plenum.test.node_request.conftest import committed1, \ prepared1, preprepared1, propagated1, reqAcked1, \ sent1, noRetryReq, faultyNodes diff --git a/plenum/test/instances/test_prepare_digest.py b/plenum/test/instances/test_prepare_digest.py index a726c91bfe..b9692a8629 100644 --- a/plenum/test/instances/test_prepare_digest.py +++ b/plenum/test/instances/test_prepare_digest.py @@ -11,7 +11,6 @@ send3PhaseMsgWithIncorrectDigest from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica from plenum.test import waits -from plenum.test.pool_transactions.conftest import looper from plenum.test.node_request.conftest import committed1, \ prepared1, preprepared1, propagated1, reqAcked1, \ sent1, noRetryReq, faultyNodes diff --git a/plenum/test/logging/conftest.py b/plenum/test/logging/conftest.py index 534a8a73d5..24c7b5f642 100644 --- a/plenum/test/logging/conftest.py +++ b/plenum/test/logging/conftest.py @@ -39,14 +39,14 @@ def wrapper(levels=None, files=None, funcs=None, msgs=None): class TestingFilter(logging.Filter): def filter(self, record): return ( - (levels is None or - record.levelname in levels) and - (files is None or - record.filename in files) and - (funcs is None or - record.funcName in funcs) and - (reMsgs is None or - reMsgs.search(record.getMessage()) is not None) + (levels is None or + record.levelname in levels) and + (files is None or + record.filename in files) and + (funcs is None or + record.funcName in funcs) and + (reMsgs is None or + reMsgs.search(record.getMessage()) is not None) ) def tester(record): diff --git a/plenum/test/logging/test_logging_txn_state.py b/plenum/test/logging/test_logging_txn_state.py index b3ee33dd23..c86cc7a8ea 100644 --- a/plenum/test/logging/test_logging_txn_state.py +++ b/plenum/test/logging/test_logging_txn_state.py @@ -9,13 +9,11 @@ from plenum.common.constants import DOMAIN_LEDGER_ID, STEWARD_STRING -from plenum.test.pool_transactions.conftest import looper from plenum.test.pool_transactions.helper import prepare_nym_request, \ sdk_sign_and_send_prepared_request from plenum.test import waits from plenum.test.helper import sdk_send_random_and_check, sdk_get_and_check_replies - ERORR_MSG = "something went wrong" diff --git a/plenum/test/malicious_behaviors_client.py b/plenum/test/malicious_behaviors_client.py index 6e9395342d..a926458b8b 100644 --- a/plenum/test/malicious_behaviors_client.py +++ b/plenum/test/malicious_behaviors_client.py @@ -82,5 +82,6 @@ def sendsUnsignedRequest(client) -> Client: def evilSign(self, msg, signer) -> Mapping: logger.debug("EVIL: client doesn't sign any of the requests") return msg + client.nodestack.sign = types.MethodType(evilSign, client) return client diff --git a/plenum/test/malicious_behaviors_node.py b/plenum/test/malicious_behaviors_node.py index a5ca4e0888..d9fcd2c3c9 100644 --- a/plenum/test/malicious_behaviors_node.py +++ b/plenum/test/malicious_behaviors_node.py @@ -44,11 +44,11 @@ def evilCreatePropagate(self, return node -def delaysPrePrepareProcessing(node, delay: float=30, instId: int=None): +def delaysPrePrepareProcessing(node, delay: float = 30, instId: int = None): node.nodeIbStasher.delay(ppDelay(delay=delay, instId=instId)) -def delaysCommitProcessing(node, delay: float=30, instId: int=None): +def delaysCommitProcessing(node, delay: float = 30, instId: int = None): node.nodeIbStasher.delay(cDelay(delay=delay, instId=instId)) @@ -57,7 +57,7 @@ def delaysCommitProcessing(node, delay: float=30, instId: int=None): def sendDuplicate3PhaseMsg( node: TestNode, msgType: ThreePhaseMsg, - count: int=2, + count: int = 2, instId=None): def evilSendPrePrepareRequest(self, ppReq: PrePrepare): logger.debug("EVIL: Sending duplicate pre-prepare message: {}". @@ -118,7 +118,7 @@ def malign3PhaseSendingMethod(replica: TestReplica, msgType: ThreePhaseMsg, common.error.error("Not a 3 phase message") -def malignInstancesOfNode(node: TestNode, malignMethod, instId: int=None): +def malignInstancesOfNode(node: TestNode, malignMethod, instId: int = None): if instId is not None: malignMethod(replica=node.replicas[instId]) else: @@ -129,7 +129,7 @@ def malignInstancesOfNode(node: TestNode, malignMethod, instId: int=None): def send3PhaseMsgWithIncorrectDigest(node: TestNode, msgType: ThreePhaseMsg, - instId: int=None): + instId: int = None): def evilSendPrePrepareRequest(self, ppReq: PrePrepare): logger.debug("EVIL: Creating pre-prepare message for request : {}". format(ppReq)) @@ -183,6 +183,7 @@ def newGenerateReply(self, viewNo: int, req: Request) -> Reply: reply.result[f.SIG.nm] = "incorrect signature" reply.result["declaration"] = "All your base are belong to us." return reply + node.generateReply = types.MethodType(newGenerateReply, node) diff --git a/plenum/test/monitoring/test_avg_latency.py b/plenum/test/monitoring/test_avg_latency.py index 045cd12eeb..2a8177cb51 100644 --- a/plenum/test/monitoring/test_avg_latency.py +++ b/plenum/test/monitoring/test_avg_latency.py @@ -1,7 +1,5 @@ from stp_core.common.log import getlogger from plenum.test.helper import sdk_send_random_and_check -from plenum.test.pool_transactions.conftest import looper - nodeCount = 4 logger = getlogger() diff --git a/plenum/test/monitoring/test_instance_change_with_Delta.py b/plenum/test/monitoring/test_instance_change_with_Delta.py index 4e4b1b52b6..46b9523f40 100644 --- a/plenum/test/monitoring/test_instance_change_with_Delta.py +++ b/plenum/test/monitoring/test_instance_change_with_Delta.py @@ -12,7 +12,6 @@ from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually - nodeCount = 7 whitelist = ["discarding message"] @@ -24,6 +23,7 @@ verify a view change happens """ + @pytest.fixture def logger(): logger = getlogger() @@ -32,9 +32,11 @@ def logger(): yield logger logger.root.setLevel(old_value) + # autouse and inject before others in all tests pytestmark = pytest.mark.usefixtures("logger") + def latestPerfChecks(nodes): """ Returns spylog entry for most recent checkPerformance executions for a set @@ -64,8 +66,8 @@ def ensureAnotherPerfCheck(): @pytest.fixture(scope="module") -def step1(looper, nodeSet, up, wallet1, client1): - startedNodes = nodeSet +def step1(looper, txnPoolNodeSet, wallet1, client1): + startedNodes = txnPoolNodeSet """ stand up a pool of nodes and send 5 requests to client """ diff --git a/plenum/test/monitoring/test_instance_change_with_req_Lambda.py b/plenum/test/monitoring/test_instance_change_with_req_Lambda.py index b51a7a58c5..dcbd3edd9b 100644 --- a/plenum/test/monitoring/test_instance_change_with_req_Lambda.py +++ b/plenum/test/monitoring/test_instance_change_with_req_Lambda.py @@ -6,7 +6,6 @@ from plenum.test.test_node import getPrimaryReplica from plenum.test.spy_helpers import getAllReturnVals from plenum.test.helper import sdk_send_random_and_check -from plenum.test.pool_transactions.conftest import looper nodeCount = 7 whitelist = ["discarding message"] @@ -26,9 +25,8 @@ @pytest.fixture() def setup(looper, tconf, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): - sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 5) + sdk_pool_handle, sdk_wallet_client, 5) P = getPrimaryReplica(txnPoolNodeSet) # set LAMBDA smaller than the production config to make the test faster @@ -55,8 +53,8 @@ def specificPrePrepare(msg): P.outBoxTestStasher.delay(specificPrePrepare) # TODO select or create a timeout for this case in 'waits' sdk_send_random_and_check(looper, txnPoolNodeSet, - sdk_pool_handle, sdk_wallet_client, 5, - customTimeoutPerReq=tconf.TestRunningTimeLimitSec) + sdk_pool_handle, sdk_wallet_client, 5, + customTimeoutPerReq=tconf.TestRunningTimeLimitSec) return adict(nodes=txnPoolNodeSet) diff --git a/plenum/test/monitoring/test_monitor_attributes.py b/plenum/test/monitoring/test_monitor_attributes.py index cd1afd65d4..73dc047b9e 100644 --- a/plenum/test/monitoring/test_monitor_attributes.py +++ b/plenum/test/monitoring/test_monitor_attributes.py @@ -1,5 +1,5 @@ -def testHasMasterPrimary(nodeSet, up): +def testHasMasterPrimary(txnPoolNodeSet): masterPrimaryCount = 0 - for node in nodeSet: + for node in txnPoolNodeSet: masterPrimaryCount += int(node.monitor.hasMasterPrimary) assert masterPrimaryCount == 1 diff --git a/plenum/test/monitoring/test_monitoring_params_with_zfn.py b/plenum/test/monitoring/test_monitoring_params_with_zfn.py index 678b8c5b9c..4c3c5fd128 100644 --- a/plenum/test/monitoring/test_monitoring_params_with_zfn.py +++ b/plenum/test/monitoring/test_monitoring_params_with_zfn.py @@ -1,7 +1,5 @@ import pytest -from plenum.test.pool_transactions.conftest import looper - nodeCount = 7 diff --git a/plenum/test/monitoring/test_no_check_if_no_new_requests.py b/plenum/test/monitoring/test_no_check_if_no_new_requests.py index 0ef44b7f3a..9cf7e2f188 100644 --- a/plenum/test/monitoring/test_no_check_if_no_new_requests.py +++ b/plenum/test/monitoring/test_no_check_if_no_new_requests.py @@ -1,4 +1,3 @@ -from plenum.test.pool_transactions.conftest import looper from plenum.test.view_change.conftest import perf_chk_patched from plenum.test.helper import sdk_send_random_and_check @@ -15,7 +14,7 @@ def test_not_check_if_no_new_requests(perf_chk_patched, looper, txnPoolNodeSet, Checks that node does not do performance check if there were no new requests since previous check """ - + # Ensure that nodes participating, because otherwise they do not do check for node in txnPoolNodeSet: assert node.isParticipating diff --git a/plenum/test/monitoring/test_post_monitoring_stats.py b/plenum/test/monitoring/test_post_monitoring_stats.py index c5776fc364..d6fc96d167 100644 --- a/plenum/test/monitoring/test_post_monitoring_stats.py +++ b/plenum/test/monitoring/test_post_monitoring_stats.py @@ -1,11 +1,8 @@ -from plenum.common.config_util import getConfig from stp_core.loop.eventually import eventually from plenum.server.monitor import Monitor -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_random_and_check - def testPostingThroughput(postingStatsEnabled, decreasedMonitoringTimeouts, looper, @@ -32,10 +29,10 @@ def testPostingThroughput(postingStatsEnabled, assert node.monitor.totalRequests == 0 sdk_send_random_and_check(looper, - txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - reqCount) + txnPoolNodeSet, + sdk_pool_handle, + sdk_wallet_client, + reqCount) for node in txnPoolNodeSet: assert len(node.monitor.orderedRequestsInLast) == reqCount @@ -88,10 +85,10 @@ def testPostingLatency(postingStatsEnabled, assert node.monitor.avgBackupLatency == 0 sdk_send_random_and_check(looper, - txnPoolNodeSet, - sdk_pool_handle, - sdk_wallet_client, - reqCount) + txnPoolNodeSet, + sdk_pool_handle, + sdk_wallet_client, + reqCount) for node in txnPoolNodeSet: assert node.monitor.masterLatency > 0 diff --git a/plenum/test/monitoring/test_stats_publisher.py b/plenum/test/monitoring/test_stats_publisher.py index 792d8559e9..8d2aeff8fe 100644 --- a/plenum/test/monitoring/test_stats_publisher.py +++ b/plenum/test/monitoring/test_stats_publisher.py @@ -128,7 +128,7 @@ def testSendManyNoExceptionsIfDestPortFromSourceRange(): statsPublisher.send(message="testMessage{}".format(i)) assert N == len(statsPublisher.refused) + \ - len(statsPublisher.unexpected) + len(statsPublisher.sent) + len(statsPublisher.unexpected) + len(statsPublisher.sent) class TestStatsPublisher(StatsPublisher): diff --git a/plenum/test/monitoring/test_system_stats.py b/plenum/test/monitoring/test_system_stats.py index be3ec66332..996c1af71b 100644 --- a/plenum/test/monitoring/test_system_stats.py +++ b/plenum/test/monitoring/test_system_stats.py @@ -50,4 +50,4 @@ def test_traffic(): assert data2['cpu']['value'] == cpu assert data2['ram']['value'] == ram assert data2['traffic']['value'] == bytes / \ - 1024 - data1['traffic']['value'] + 1024 - data1['traffic']['value'] diff --git a/plenum/test/monitoring/test_throughput.py b/plenum/test/monitoring/test_throughput.py index 463fec02d6..f89bc5e12f 100644 --- a/plenum/test/monitoring/test_throughput.py +++ b/plenum/test/monitoring/test_throughput.py @@ -1,9 +1,7 @@ from typing import Iterable from stp_core.common.log import getlogger -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_random_and_check - nodeCount = 4 logger = getlogger() diff --git a/plenum/test/monitoring/test_warn_unordered_log_msg.py b/plenum/test/monitoring/test_warn_unordered_log_msg.py index 54513e3e11..a1c3b3dcb1 100644 --- a/plenum/test/monitoring/test_warn_unordered_log_msg.py +++ b/plenum/test/monitoring/test_warn_unordered_log_msg.py @@ -6,40 +6,39 @@ from plenum.test.helper import sendRandomRequest, \ waitForSufficientRepliesForRequests - nodeCount = 4 logger = getlogger() # noinspection PyIncorrectDocstring -def test_working_has_no_warn_log_msg(looper, nodeSet, +def test_working_has_no_warn_log_msg(looper, txnPoolNodeSet, wallet1, client1, patch_monitors): - monitor = nodeSet[0].monitor - assert no_any_warn(*nodeSet) + monitor = txnPoolNodeSet[0].monitor + assert no_any_warn(*txnPoolNodeSet) for i in range(monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM): req = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[req]) looper.runFor(monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC) - assert no_any_warn(*nodeSet) + assert no_any_warn(*txnPoolNodeSet) # noinspection PyIncorrectDocstring def test_slow_node_has_warn_unordered_log_msg(looper, - nodeSet, + txnPoolNodeSet, wallet1, client1, patch_monitors): - npr = getNonPrimaryReplicas(nodeSet, 0)[0] + npr = getNonPrimaryReplicas(txnPoolNodeSet, 0)[0] slow_node = npr.node - monitor = nodeSet[0].monitor + monitor = txnPoolNodeSet[0].monitor delay = monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC * \ - monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM + 10 + monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM + 10 delaysCommitProcessing(slow_node, delay=delay) - assert no_any_warn(*nodeSet), \ + assert no_any_warn(*txnPoolNodeSet), \ 'all nodes do not have warnings before test' for i in range(monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM): @@ -47,7 +46,7 @@ def test_slow_node_has_warn_unordered_log_msg(looper, waitForSufficientRepliesForRequests(looper, client1, requests=[req]) looper.runFor(monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC) - others = [node for node in nodeSet if node.name != slow_node.name] + others = [node for node in txnPoolNodeSet if node.name != slow_node.name] assert no_any_warn(*others), \ 'others do not have warning after test' assert has_some_warn(slow_node), \ @@ -91,12 +90,12 @@ def no_last_warn(*nodes): @pytest.fixture(scope="function") -def patch_monitors(nodeSet): +def patch_monitors(txnPoolNodeSet): backup = {} req_num = 3 diff_sec = 1 window_mins = 0.25 - for node in nodeSet: + for node in txnPoolNodeSet: backup[node.name] = ( node.monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM, node.monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC, @@ -106,7 +105,7 @@ def patch_monitors(nodeSet): node.monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC = diff_sec node.monitor.WARN_NOT_PARTICIPATING_WINDOW_MINS = window_mins yield req_num, diff_sec, window_mins - for node in nodeSet: + for node in txnPoolNodeSet: node.monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM = backup[node.name][0] node.monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC = backup[node.name][1] node.monitor.WARN_NOT_PARTICIPATING_WINDOW_MINS = backup[node.name][2] diff --git a/plenum/test/node_catchup/conftest.py b/plenum/test/node_catchup/conftest.py index 2ebeb03ede..643e66e24a 100644 --- a/plenum/test/node_catchup/conftest.py +++ b/plenum/test/node_catchup/conftest.py @@ -9,10 +9,6 @@ check_last_3pc_master from plenum.test.pool_transactions.helper import \ addNewStewardAndNode, buildPoolClientAndWallet -# noinspection PyUnresolvedReferences -from plenum.test.pool_transactions.conftest import stewardAndWallet1, \ - steward1, stewardWallet, clientAndWallet1, client1, wallet1, \ - client1Connected from plenum.test.test_client import TestClient from plenum.test.test_node import checkNodesConnected @@ -51,7 +47,7 @@ def nodeCreatedAfterSomeTxns(looper, testNodeClass, do_post_node_creation, allPluginsPath=allPluginsPath, autoStart=True, do_post_node_creation=do_post_node_creation) yield looper, newNode, client, wallet, newStewardClient, \ - newStewardWallet + newStewardWallet @pytest.fixture("module") diff --git a/plenum/test/node_catchup/helper.py b/plenum/test/node_catchup/helper.py index 59b19e5ea9..1ed9b2cf13 100644 --- a/plenum/test/node_catchup/helper.py +++ b/plenum/test/node_catchup/helper.py @@ -16,7 +16,6 @@ from plenum.test import waits import pytest - logger = getlogger() @@ -25,7 +24,6 @@ def checkNodeDataForEquality(node: TestNode, *otherNodes: TestNode, exclude_from_check=None): - def chk_ledger_and_state(first_node, second_node, ledger_id): checkLedgerEquality(first_node.getLedger(ledger_id), second_node.getLedger(ledger_id)) diff --git a/plenum/test/node_catchup/test_catchup_demoted.py b/plenum/test/node_catchup/test_catchup_demoted.py index 1fcc1d2e2b..9f6734421a 100644 --- a/plenum/test/node_catchup/test_catchup_demoted.py +++ b/plenum/test/node_catchup/test_catchup_demoted.py @@ -17,7 +17,7 @@ def test_catch_up_after_demoted( "1. add a new node after sending some txns and check that catch-up " "is done (the new node is up to date)") looper, newNode, client, wallet, newStewardClient, \ - newStewardWallet = nodeSetWithNodeAddedAfterSomeTxns + newStewardWallet = nodeSetWithNodeAddedAfterSomeTxns waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:4]) logger.info("2. turn the new node off (demote)") diff --git a/plenum/test/node_catchup/test_catchup_inlcuding_3PC.py b/plenum/test/node_catchup/test_catchup_inlcuding_3PC.py index 2f3565f7ac..b64478dd7c 100644 --- a/plenum/test/node_catchup/test_catchup_inlcuding_3PC.py +++ b/plenum/test/node_catchup/test_catchup_inlcuding_3PC.py @@ -9,7 +9,6 @@ from plenum.test.test_client import TestClient from stp_core.loop.eventually import eventually - TestRunningTimeLimitSec = 125 diff --git a/plenum/test/node_catchup/test_catchup_scenarios.py b/plenum/test/node_catchup/test_catchup_scenarios.py index f0b1b4f700..e6409e3497 100644 --- a/plenum/test/node_catchup/test_catchup_scenarios.py +++ b/plenum/test/node_catchup/test_catchup_scenarios.py @@ -10,7 +10,6 @@ from plenum.test.test_node import checkNodesConnected from plenum.test import waits - logger = getlogger() txnCount = 10 diff --git a/plenum/test/node_catchup/test_config_ledger.py b/plenum/test/node_catchup/test_config_ledger.py index 07b03f0841..aab21bb1fc 100644 --- a/plenum/test/node_catchup/test_config_ledger.py +++ b/plenum/test/node_catchup/test_config_ledger.py @@ -39,13 +39,13 @@ def read(key, looper, sdk_pool_handle, sdk_wallet): for op in [read_conf_op(key)]] reqs = sdk_sign_request_objects(looper, sdk_wallet, reqs_obj) sent_reqs = sdk_send_signed_requests(sdk_pool_handle, reqs) - (req, resp), = sdk_get_replies(looper, sent_reqs, timeout=10) + (req, resp), = sdk_get_replies(looper, sent_reqs, timeout=10) return json.loads(resp['result'][DATA])[key] def send_some_config_txns(looper, sdk_pool_handle, sdk_wallet_client, keys): for i in range(5): - key, val = 'key_{}'.format(i+1), randomString() + key, val = 'key_{}'.format(i + 1), randomString() write(key, val, looper, sdk_pool_handle, sdk_wallet_client) keys[key] = val return keys @@ -119,7 +119,7 @@ def test_new_node_catchup_config_ledger(looper, some_config_txns_done, A new node catches up the config ledger too """ assert len(newNodeCaughtUp.getLedger(CONFIG_LEDGER_ID)) >= \ - len(some_config_txns_done) + len(some_config_txns_done) def test_disconnected_node_catchup_config_ledger_txns(looper, diff --git a/plenum/test/node_catchup/test_discard_view_no.py b/plenum/test/node_catchup/test_discard_view_no.py index 2549da57c1..658ac45c99 100644 --- a/plenum/test/node_catchup/test_discard_view_no.py +++ b/plenum/test/node_catchup/test_discard_view_no.py @@ -11,7 +11,6 @@ checkProtocolInstanceSetup, getPrimaryReplica from plenum.test import waits - whitelist = ['found legacy entry'] # warnings diff --git a/plenum/test/node_catchup/test_large_catchup.py b/plenum/test/node_catchup/test_large_catchup.py index 6665547c8f..fa022ccf87 100644 --- a/plenum/test/node_catchup/test_large_catchup.py +++ b/plenum/test/node_catchup/test_large_catchup.py @@ -9,7 +9,6 @@ from stp_core.validators.message_length_validator import MessageLenValidator - TestRunningTimeLimitSec = 125 @@ -47,7 +46,6 @@ def test_large_catchup(tdir, tconf, txnPoolNodeSet, wallet1, client1, - client1Connected, allPluginsPath): """ Checks that node can catchup large ledgers diff --git a/plenum/test/node_catchup/test_new_node_catchup.py b/plenum/test/node_catchup/test_new_node_catchup.py index 331a408597..d10480ea50 100644 --- a/plenum/test/node_catchup/test_new_node_catchup.py +++ b/plenum/test/node_catchup/test_new_node_catchup.py @@ -43,7 +43,7 @@ def testPoolLegerCatchupBeforeDomainLedgerCatchup(txnPoolNodeSet, for comp in completes: completionTimes[comp.params.get('ledgerId')] = comp.endtime assert startTimes[0] < completionTimes[0] < \ - startTimes[1] < completionTimes[1] + startTimes[1] < completionTimes[1] @pytest.mark.skip(reason="SOV-554. " diff --git a/plenum/test/node_catchup/test_new_node_catchup2.py b/plenum/test/node_catchup/test_new_node_catchup2.py index e6a0b5bfb0..47770cb6b6 100644 --- a/plenum/test/node_catchup/test_new_node_catchup2.py +++ b/plenum/test/node_catchup/test_new_node_catchup2.py @@ -46,8 +46,8 @@ def testNodeDoesNotParticipateUntilCaughtUp(txnPoolNodeSet, node.reset_delays_and_process_delayeds() timeout = waits.expectedPoolCatchupTime(len(txnPoolNodeSet)) + \ - catchup_delay + \ - waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + catchup_delay + \ + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout) waitNodeDataEquality(looper, new_node, *old_nodes) diff --git a/plenum/test/node_catchup/test_no_catchup_if_got_from_3pc.py b/plenum/test/node_catchup/test_no_catchup_if_got_from_3pc.py index 1e319f2bda..9e14786c74 100644 --- a/plenum/test/node_catchup/test_no_catchup_if_got_from_3pc.py +++ b/plenum/test/node_catchup/test_no_catchup_if_got_from_3pc.py @@ -1,8 +1,6 @@ from plenum.common.constants import DOMAIN_LEDGER_ID from plenum.common.messages.node_messages import Commit, ConsistencyProof from plenum.test.delayers import cpDelay, cDelay -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper from plenum.test.helper import send_reqs_batches_and_get_suff_replies from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data, \ @@ -14,8 +12,7 @@ from plenum.test.view_change.helper import ensure_view_change -def test_no_catchup_if_got_from_3pc(looper, txnPoolNodeSet, wallet1, client1, - client1Connected): +def test_no_catchup_if_got_from_3pc(looper, txnPoolNodeSet, wallet1, client1): """ A node is slow to receive COMMIT messages so after a view change it starts catchup. But before it can start requesting txns, the COMMITs messages diff --git a/plenum/test/node_catchup/test_node_catchup_after_checkpoints.py b/plenum/test/node_catchup/test_node_catchup_after_checkpoints.py index 55688551ca..900ca02475 100644 --- a/plenum/test/node_catchup/test_node_catchup_after_checkpoints.py +++ b/plenum/test/node_catchup/test_node_catchup_after_checkpoints.py @@ -10,7 +10,6 @@ from plenum.test.node_catchup.helper import waitNodeDataInequality, waitNodeDataEquality from plenum.test.test_node import getNonPrimaryReplicas - logger = getLogger() TestRunningTimeLimitSec = 200 @@ -22,7 +21,6 @@ def test_node_catchup_after_checkpoints( txnPoolNodeSet, wallet1, client1, - client1Connected, broken_node_and_others): """ For some reason a node misses 3pc messages but eventually the node stashes diff --git a/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py b/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py index ab8e414f21..0f56311ea5 100644 --- a/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py +++ b/plenum/test/node_catchup/test_node_catchup_after_restart_after_txns.py @@ -118,7 +118,7 @@ def send_and_chk(ledger_state): # Not accurate timeout but a conservative one timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) + \ - 2 * delay_catchup_reply + 2 * delay_catchup_reply waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1], customTimeout=timeout) assert new_node_ledger.num_txns_caught_up == more_requests diff --git a/plenum/test/node_catchup/test_node_catchup_after_restart_no_txns.py b/plenum/test/node_catchup/test_node_catchup_after_restart_no_txns.py index 2f41225732..805024236c 100644 --- a/plenum/test/node_catchup/test_node_catchup_after_restart_no_txns.py +++ b/plenum/test/node_catchup/test_node_catchup_after_restart_no_txns.py @@ -1,4 +1,3 @@ - import pytest from plenum.common.constants import DOMAIN_LEDGER_ID, LedgerState diff --git a/plenum/test/node_catchup/test_node_catchup_causes_no_desync.py b/plenum/test/node_catchup/test_node_catchup_causes_no_desync.py index ce50d98176..8d60e0052e 100644 --- a/plenum/test/node_catchup/test_node_catchup_causes_no_desync.py +++ b/plenum/test/node_catchup/test_node_catchup_causes_no_desync.py @@ -11,9 +11,6 @@ disconnect_node_and_ensure_disconnected, \ reconnect_node_and_ensure_connected -# noinspection PyUnresolvedReferences -from plenum.test.pool_transactions.conftest import \ - clientAndWallet1, client1, wallet1, client1Connected, looper from stp_core.loop.eventually import eventually logger = getlogger() @@ -21,7 +18,6 @@ def make_master_replica_lag(node): - node.nodeIbStasher.delay(ppDelay(1200, 0)) node.nodeIbStasher.delay(pDelay(1200, 0)) node.nodeIbStasher.delay(cDelay(1200, 0)) @@ -46,7 +42,7 @@ def replicas_synced(node): def test_node_catchup_causes_no_desync(looper, txnPoolNodeSet, client1, - wallet1, client1Connected, monkeypatch): + wallet1, monkeypatch): """ Checks that transactions received by catchup do not break performance monitoring diff --git a/plenum/test/node_catchup/test_node_catchup_when_3_not_primary_node_restarted.py b/plenum/test/node_catchup/test_node_catchup_when_3_not_primary_node_restarted.py index 1774bae58d..679154091c 100644 --- a/plenum/test/node_catchup/test_node_catchup_when_3_not_primary_node_restarted.py +++ b/plenum/test/node_catchup/test_node_catchup_when_3_not_primary_node_restarted.py @@ -5,9 +5,6 @@ from plenum.common.constants import DOMAIN_LEDGER_ID, LedgerState, POOL_LEDGER_ID from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies -from plenum.test.pool_transactions.conftest import looper, \ - steward1, stewardWallet, stewardAndWallet1 - from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually from plenum.test.node_catchup.helper import check_ledger_state @@ -16,7 +13,6 @@ from plenum.common.startable import Mode from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected - logger = getlogger() @@ -26,7 +22,7 @@ def catchuped(node): def test_node_catchup_when_3_not_primary_node_restarted( looper, txnPoolNodeSet, tdir, tconf, - allPluginsPath, steward1, stewardWallet): + allPluginsPath, steward1, stewardWallet): """ Test case: 1. Create pool of 4 nodes diff --git a/plenum/test/node_catchup/test_node_ledger_manager.py b/plenum/test/node_catchup/test_node_ledger_manager.py index 8b2358c105..9e764ba668 100644 --- a/plenum/test/node_catchup/test_node_ledger_manager.py +++ b/plenum/test/node_catchup/test_node_ledger_manager.py @@ -1,14 +1,9 @@ -# noinspection PyUnresolvedReferences -from plenum.test.pool_transactions.conftest import \ - clientAndWallet1, client1, wallet1, client1Connected, looper - - def test_ledger_sync_order(looper, txnPoolNodeSet): for node in txnPoolNodeSet: ledger_ids = node.ledger_ids for idx, lid in enumerate(ledger_ids): next_ledger_id = node.ledgerManager.ledger_to_sync_after(lid) if idx != (len(ledger_ids) - 1): - assert next_ledger_id == ledger_ids[idx+1] + assert next_ledger_id == ledger_ids[idx + 1] else: assert next_ledger_id is None diff --git a/plenum/test/node_catchup/test_node_reject_invalid_txn_during_catchup.py b/plenum/test/node_catchup/test_node_reject_invalid_txn_during_catchup.py index 02c698ae64..9488298023 100644 --- a/plenum/test/node_catchup/test_node_reject_invalid_txn_during_catchup.py +++ b/plenum/test/node_catchup/test_node_reject_invalid_txn_during_catchup.py @@ -10,13 +10,11 @@ from plenum.test.test_node import checkNodesConnected, getNonPrimaryReplicas from plenum.test import waits - # Do not remove the next import from plenum.test.node_catchup.conftest import whitelist logger = getlogger() - txnCount = 10 @@ -81,7 +79,7 @@ def _sendIncorrectTxns(self, req, frm): " for catchup request {} from {}". format(self, req, frm)) start, end = getattr(req, f.SEQ_NO_START.nm), \ - getattr(req, f.SEQ_NO_END.nm) + getattr(req, f.SEQ_NO_END.nm) ledger = self.getLedgerForMsg(req) txns = {} for seqNo, txn in ledger.getAllTxn(start, end): diff --git a/plenum/test/node_catchup/test_node_request_consistency_proof.py b/plenum/test/node_catchup/test_node_request_consistency_proof.py index 3a08a636b2..8f22946715 100644 --- a/plenum/test/node_catchup/test_node_request_consistency_proof.py +++ b/plenum/test/node_catchup/test_node_request_consistency_proof.py @@ -14,7 +14,6 @@ from plenum.test.node_catchup.conftest import whitelist from plenum.test.batching_3pc.conftest import tconf - logger = getlogger() # So that `three_phase_key_for_txn_seq_no` always works, it makes the test # easy as the requesting node selects a random size for the ledger diff --git a/plenum/test/node_catchup/test_node_request_missing_transactions.py b/plenum/test/node_catchup/test_node_request_missing_transactions.py index 70d194af65..8190d3ac55 100644 --- a/plenum/test/node_catchup/test_node_request_missing_transactions.py +++ b/plenum/test/node_catchup/test_node_request_missing_transactions.py @@ -13,7 +13,6 @@ # Do not remove the next import from plenum.test.node_catchup.conftest import whitelist - logger = getlogger() TestRunningTimeLimitSec = 180 @@ -68,7 +67,7 @@ def ignoreCatchupReq(self, req, frm): # Since one of the nodes does not reply, this new node will experience a # timeout and retry catchup requests, hence a long test timeout. timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) + \ - reduced_catchup_timeout_conf.CatchupTransactionsTimeout + reduced_catchup_timeout_conf.CatchupTransactionsTimeout waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1], customTimeout=timeout) new_size = len(new_node_ledger.ledger) @@ -76,7 +75,7 @@ def ignoreCatchupReq(self, req, frm): # The new node ledger might catchup some transactions from the batch of # `more_request` transactions assert old_size_others - \ - old_size <= new_node_ledger.num_txns_caught_up <= new_size - old_size + old_size <= new_node_ledger.num_txns_caught_up <= new_size - old_size sendRandomRequests(wallet, client, 2) waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1], customTimeout=timeout) diff --git a/plenum/test/node_catchup/test_remove_request_keys_post_catchup.py b/plenum/test/node_catchup/test_remove_request_keys_post_catchup.py index 95589ef066..dc543d9195 100644 --- a/plenum/test/node_catchup/test_remove_request_keys_post_catchup.py +++ b/plenum/test/node_catchup/test_remove_request_keys_post_catchup.py @@ -12,8 +12,7 @@ @pytest.fixture(scope='module', params=['some', 'all']) -def setup(request, looper, txnPoolNodeSet, client1, wallet1, - client1Connected): +def setup(request, looper, txnPoolNodeSet, client1, wallet1): slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[1].node fast_nodes = [n for n in txnPoolNodeSet if n != slow_node] # Delay catchup reply so that the test gets time to make the check, @@ -38,12 +37,12 @@ def test_nodes_removes_request_keys_for_ordered(setup, looper, txnPoolNodeSet, looper, wallet1, client1, 10, 5) ensure_all_nodes_have_same_data(looper, fast_nodes) assert slow_node.master_replica.last_ordered_3pc != \ - fast_nodes[0].master_replica.last_ordered_3pc + fast_nodes[0].master_replica.last_ordered_3pc def chk(key, nodes, present): for node in nodes: assert ( - key in node.master_replica.requestQueues[DOMAIN_LEDGER_ID]) == present + key in node.master_replica.requestQueues[DOMAIN_LEDGER_ID]) == present for req in reqs: chk(req.key, fast_nodes, False) diff --git a/plenum/test/node_catchup/test_req_id_key_error.py b/plenum/test/node_catchup/test_req_id_key_error.py index ee09f952c8..b1651ae759 100644 --- a/plenum/test/node_catchup/test_req_id_key_error.py +++ b/plenum/test/node_catchup/test_req_id_key_error.py @@ -7,11 +7,11 @@ def test_req_id_key_error(testNode, wallet1): - #create random transactions + # create random transactions count_of_txn = 3 reqs = signed_random_requests(wallet1, count_of_txn) txns = [] - #prepare transactions and remove reqId from + # prepare transactions and remove reqId from for i, req in enumerate(reqs): txnreq = reqToTxn(req) txnreq[f.SEQ_NO.nm] = i diff --git a/plenum/test/node_catchup/test_revert_during_catchup.py b/plenum/test/node_catchup/test_revert_during_catchup.py index 7814491adb..67e97a317d 100644 --- a/plenum/test/node_catchup/test_revert_during_catchup.py +++ b/plenum/test/node_catchup/test_revert_during_catchup.py @@ -17,14 +17,13 @@ TestRunningTimeLimitSec = 125 # Do not remove the next imports -from plenum.test.batching_3pc.conftest import tconf # noqa +from plenum.test.batching_3pc.conftest import tconf # noqa def test_slow_node_reverts_unordered_state_during_catchup(looper, txnPoolNodeSet, client1, - wallet1, - client1Connected): + wallet1): """ Delay COMMITs to a node such that when it needs to catchup, it needs to revert some unordered state. Also till this time the node should have @@ -62,6 +61,7 @@ def test_slow_node_reverts_unordered_state_during_catchup(looper, def is_catchup_needed_count(): return len(getAllReturnVals(slow_node, slow_node.is_catchup_needed, compare_val_to=True)) + old_lcu_count = slow_node.spylog.count(slow_node.allLedgersCaughtUp) old_cn_count = is_catchup_needed_count() diff --git a/plenum/test/node_request/conftest.py b/plenum/test/node_request/conftest.py index 16811c539d..333adc5220 100644 --- a/plenum/test/node_request/conftest.py +++ b/plenum/test/node_request/conftest.py @@ -10,7 +10,6 @@ from plenum.test.node_request.node_request_helper import checkPrePrepared, \ checkPropagated, checkPrepared from plenum.test.node_request.node_request_helper import checkCommitted -from plenum.test.pool_transactions.conftest import looper from plenum.common.util import getNoInstances diff --git a/plenum/test/node_request/message_request/conftest.py b/plenum/test/node_request/message_request/conftest.py index 72e6240ce6..7d793b407c 100644 --- a/plenum/test/node_request/message_request/conftest.py +++ b/plenum/test/node_request/message_request/conftest.py @@ -7,7 +7,6 @@ @pytest.fixture(scope="module") def teardown(request, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): - def tear(): # Repair any broken network for node in txnPoolNodeSet: diff --git a/plenum/test/node_request/message_request/test_node_request_missing_three_phase_messages.py b/plenum/test/node_request/message_request/test_node_request_missing_three_phase_messages.py index ff8399ff6f..26fcde92f1 100644 --- a/plenum/test/node_request/message_request/test_node_request_missing_three_phase_messages.py +++ b/plenum/test/node_request/message_request/test_node_request_missing_three_phase_messages.py @@ -4,10 +4,8 @@ from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected, reconnect_node_and_ensure_connected -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_random_requests, sdk_send_random_and_check - logger = getlogger() diff --git a/plenum/test/node_request/message_request/test_node_requests_missing_preprepare.py b/plenum/test/node_request/message_request/test_node_requests_missing_preprepare.py index 5d41f2c021..7a8c9c5917 100644 --- a/plenum/test/node_request/message_request/test_node_requests_missing_preprepare.py +++ b/plenum/test/node_request/message_request/test_node_requests_missing_preprepare.py @@ -12,8 +12,6 @@ from plenum.test.node_request.message_request.helper import split_nodes from plenum.test.spy_helpers import get_count from plenum.test.test_node import getNonPrimaryReplicas, get_master_primary_node -from plenum.test.pool_transactions.conftest import looper - whitelist = ['does not have expected state'] @@ -87,7 +85,7 @@ def do_not_send(self, msg, frm): bad_node.nodeMsgRouter.routes[MessageReq] = types.MethodType( do_not_send, bad_node) return primary_node, bad_node, good_non_primary_node, slow_node, \ - other_nodes, do_not_send, orig_method + other_nodes, do_not_send, orig_method if request.param == 'send_bad': orig_method = bad_node.nodeMsgRouter.routes[MessageReq] @@ -108,7 +106,7 @@ def send_bad(self, msg, frm): bad_node.nodeMsgRouter.routes[MessageReq] = types.MethodType(send_bad, bad_node) return primary_node, bad_node, good_non_primary_node, slow_node, \ - other_nodes, send_bad, orig_method + other_nodes, send_bad, orig_method def test_node_requests_missing_preprepare_malicious(looper, txnPoolNodeSet, @@ -127,20 +125,20 @@ def test_node_requests_missing_preprepare_malicious(looper, txnPoolNodeSet, # good_non_primary_node = [n for n in other_nodes if n != slow_node # and n != bad_node and n != primary_node][0] primary_node, bad_node, good_non_primary_node, slow_node, other_nodes, \ - bad_method, orig_method = malicious_setup + bad_method, orig_method = malicious_setup slow_node.nodeIbStasher.delay(ppDelay(300, 0)) def get_reply_count_frm(node): return sum([1 for entry in slow_node.spylog.getAll( slow_node.process_message_rep) - if entry.params['msg'].msg_type == PREPREPARE and - entry.params['frm'] == node.name]) + if entry.params['msg'].msg_type == PREPREPARE and + entry.params['frm'] == node.name]) old_reply_count_from_bad_node = get_reply_count_frm(bad_node) old_reply_count_from_good_node = get_reply_count_frm(good_non_primary_node) old_discarded = countDiscarded(slow_node.master_replica, 'does not have ' - 'expected state') + 'expected state') sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, @@ -163,7 +161,7 @@ def get_reply_count_frm(node): 'does not have expected state') > old_discarded assert get_reply_count_frm(good_non_primary_node) > \ - old_reply_count_from_good_node + old_reply_count_from_good_node slow_node.reset_delays_and_process_delayeds() bad_node.nodeMsgRouter.routes[MessageReq] = orig_method diff --git a/plenum/test/node_request/message_request/test_node_requests_missing_three_phase_messages_after_long_disconnection.py b/plenum/test/node_request/message_request/test_node_requests_missing_three_phase_messages_after_long_disconnection.py index 51909bfc4f..6086170028 100644 --- a/plenum/test/node_request/message_request/test_node_requests_missing_three_phase_messages_after_long_disconnection.py +++ b/plenum/test/node_request/message_request/test_node_requests_missing_three_phase_messages_after_long_disconnection.py @@ -7,7 +7,6 @@ from plenum.test.waits import expectedPoolGetReadyTimeout from stp_core.loop.eventually import eventually from stp_core.common.log import getlogger -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_random_requests, sdk_send_random_and_check logger = getlogger() diff --git a/plenum/test/node_request/message_request/test_preprepare_request.py b/plenum/test/node_request/message_request/test_preprepare_request.py index e878d77f37..f4036356fd 100644 --- a/plenum/test/node_request/message_request/test_preprepare_request.py +++ b/plenum/test/node_request/message_request/test_preprepare_request.py @@ -7,7 +7,6 @@ from plenum.test.spy_helpers import getAllReturnVals, get_count from stp_core.loop.eventually import eventually -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_batches_of_random_and_check @@ -31,7 +30,7 @@ def test_node_request_preprepare(looper, txnPoolNodeSet, Node requests PRE-PREPARE only once after getting PREPAREs. """ slow_node, other_nodes, primary_node, \ - other_primary_nodes = split_nodes(txnPoolNodeSet) + other_primary_nodes = split_nodes(txnPoolNodeSet) # Drop PrePrepares and Prepares slow_node.nodeIbStasher.delay(ppDelay(300, 0)) slow_node.nodeIbStasher.delay(pDelay(300, 0)) diff --git a/plenum/test/node_request/message_request/test_requested_preprepare_handling.py b/plenum/test/node_request/message_request/test_requested_preprepare_handling.py index 45b9debdee..aa7f8b60ec 100644 --- a/plenum/test/node_request/message_request/test_requested_preprepare_handling.py +++ b/plenum/test/node_request/message_request/test_requested_preprepare_handling.py @@ -9,7 +9,6 @@ from plenum.test.spy_helpers import get_count from stp_core.loop.eventually import eventually -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_batches_of_random_and_check diff --git a/plenum/test/node_request/message_request/test_valid_message_request.py b/plenum/test/node_request/message_request/test_valid_message_request.py index e173e53f44..53733035e1 100644 --- a/plenum/test/node_request/message_request/test_valid_message_request.py +++ b/plenum/test/node_request/message_request/test_valid_message_request.py @@ -10,15 +10,12 @@ from plenum.test.helper import countDiscarded from stp_core.loop.eventually import eventually - invalid_type_discard_log = "unknown value 'invalid_type'" invalid_req_discard_log = "cannot serve request" invalid_rep_discard_log = "cannot process requested message response" - whitelist = [invalid_type_discard_log, ] - patched_schema = ( (f.MSG_TYPE.nm, ChooseField(values={'invalid_type', LEDGER_STATUS, CONSISTENCY_PROOF, PREPREPARE, @@ -30,6 +27,7 @@ def patched_MessageReq(): class PMessageReq(MessageReq): schema = patched_schema + return PMessageReq @@ -39,6 +37,7 @@ class PMessageRep(MessageRep): *patched_schema, (f.MSG.nm, AnyField()) ) + return PMessageRep diff --git a/plenum/test/node_request/node_request_helper.py b/plenum/test/node_request/node_request_helper.py index ab258e4601..a0f24c41f7 100644 --- a/plenum/test/node_request/node_request_helper.py +++ b/plenum/test/node_request/node_request_helper.py @@ -97,7 +97,7 @@ def nonPrimarySeesCorrectNumberOfPREPREPAREs(): param['pre_prepare'][4:], param['sender']) == ( expectedPrePrepareRequest[0:3] + - expectedPrePrepareRequest[4:], + expectedPrePrepareRequest[4:], primary.name)]) numOfMsgsWithZFN = 1 @@ -257,14 +257,14 @@ def nonPrimaryReplicasReceiveCorrectNumberOfPREPAREs(): actualMsgs = len( [ param for param in getAllArgs( - npr, - npr.processPrepare) if ( - param['prepare'].instId, - param['prepare'].viewNo, - param['prepare'].ppSeqNo) == ( - primary.instId, - primary.viewNo, - primary.lastPrePrepareSeqNo)]) + npr, + npr.processPrepare) if ( + param['prepare'].instId, + param['prepare'].viewNo, + param['prepare'].ppSeqNo) == ( + primary.instId, + primary.viewNo, + primary.lastPrePrepareSeqNo)]) passes += int(msgCountOK(nodeCount, faultyNodes, diff --git a/plenum/test/node_request/test_belated_request_not_processed.py b/plenum/test/node_request/test_belated_request_not_processed.py index 59c043f9a4..cd2a9430c0 100644 --- a/plenum/test/node_request/test_belated_request_not_processed.py +++ b/plenum/test/node_request/test_belated_request_not_processed.py @@ -2,14 +2,12 @@ from plenum.test.delayers import cDelay, req_delay, ppgDelay from plenum.test.helper import sdk_signed_random_requests, \ sdk_send_signed_requests, sdk_send_and_check -from plenum.test.pool_transactions.conftest import looper from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change def test_repeated_request_not_processed_if_already_ordered( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size @@ -25,7 +23,6 @@ def test_repeated_request_not_processed_if_already_ordered( def test_belated_request_not_processed_if_already_ordered( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) @@ -42,7 +39,6 @@ def test_belated_request_not_processed_if_already_ordered( def test_belated_propagate_not_processed_if_already_ordered( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) @@ -59,7 +55,6 @@ def test_belated_propagate_not_processed_if_already_ordered( def test_repeated_request_not_processed_if_already_in_3pc_process( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size for node in txnPoolNodeSet: @@ -88,7 +83,6 @@ def test_repeated_request_not_processed_if_already_in_3pc_process( def test_belated_request_not_processed_if_already_in_3pc_process( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) @@ -118,7 +112,6 @@ def test_belated_request_not_processed_if_already_in_3pc_process( def test_belated_propagate_not_processed_if_already_in_3pc_process( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) @@ -148,7 +141,6 @@ def test_belated_propagate_not_processed_if_already_in_3pc_process( def test_repeated_request_not_processed_after_view_change( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size @@ -167,7 +159,6 @@ def test_repeated_request_not_processed_after_view_change( def test_belated_request_not_processed_after_view_change( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) @@ -187,7 +178,6 @@ def test_belated_request_not_processed_after_view_change( def test_belated_propagate_not_processed_after_view_change( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): - delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) diff --git a/plenum/test/node_request/test_commit/test_commits_recvd_first.py b/plenum/test/node_request/test_commit/test_commits_recvd_first.py index 25aa353154..c1d3655efb 100644 --- a/plenum/test/node_request/test_commit/test_commits_recvd_first.py +++ b/plenum/test/node_request/test_commit/test_commits_recvd_first.py @@ -2,7 +2,6 @@ from plenum.test.delayers import ppDelay, pDelay from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.test_node import getNonPrimaryReplicas -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_batches_of_random_and_check diff --git a/plenum/test/node_request/test_commit/test_commits_without_prepares.py b/plenum/test/node_request/test_commit/test_commits_without_prepares.py index e4cbdaa3fb..1472adb4ff 100644 --- a/plenum/test/node_request/test_commit/test_commits_without_prepares.py +++ b/plenum/test/node_request/test_commit/test_commits_without_prepares.py @@ -1,7 +1,6 @@ from plenum.test.delayers import pDelay from plenum.test.test_node import get_master_primary_node -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_random_and_check diff --git a/plenum/test/node_request/test_discard_3pc_for_ordered.py b/plenum/test/node_request/test_discard_3pc_for_ordered.py index 7f5732a33b..d509b01441 100644 --- a/plenum/test/node_request/test_discard_3pc_for_ordered.py +++ b/plenum/test/node_request/test_discard_3pc_for_ordered.py @@ -27,8 +27,9 @@ def test_discard_3PC_messages_for_already_ordered(looper, txnPoolNodeSet, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, - num_reqs=2*sent_batches, + num_reqs=2 * sent_batches, num_batches=sent_batches) + # send_reqs_batches_and_get_suff_replies(looper, wallet1, client1, # 2 * sent_batches, sent_batches) diff --git a/plenum/test/node_request/test_order/test_ordering_when_pre_prepare_not_received.py b/plenum/test/node_request/test_order/test_ordering_when_pre_prepare_not_received.py index ec5725e446..e5cc96ad49 100644 --- a/plenum/test/node_request/test_order/test_ordering_when_pre_prepare_not_received.py +++ b/plenum/test/node_request/test_order/test_ordering_when_pre_prepare_not_received.py @@ -6,7 +6,6 @@ from plenum.test.delayers import ppDelay, pDelay from plenum.test.helper import sdk_send_random_request from plenum.test.test_node import getNonPrimaryReplicas -from plenum.test.pool_transactions.conftest import looper def testOrderingWhenPrePrepareNotReceived(looper, txnPoolNodeSet, @@ -43,7 +42,7 @@ def patched_p(self, msg, sender): def chk1(): assert len(slow_rep.commitsWaitingForPrepare) > 0 - sdk_send_random_request(looper,sdk_pool_handle, sdk_wallet_client) + sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delay looper.run(eventually(chk1, retryWait=1, timeout=timeout)) diff --git a/plenum/test/node_request/test_order/test_request_ordering_1.py b/plenum/test/node_request/test_order/test_request_ordering_1.py index 4481eaa064..3654e7e153 100644 --- a/plenum/test/node_request/test_order/test_request_ordering_1.py +++ b/plenum/test/node_request/test_order/test_request_ordering_1.py @@ -4,7 +4,6 @@ from plenum.test.helper import sdk_send_random_request from plenum.test.malicious_behaviors_node import delaysPrePrepareProcessing from plenum.test.test_node import getNonPrimaryReplicas -from plenum.test.pool_transactions.conftest import looper def testOrderingCase1(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): diff --git a/plenum/test/node_request/test_order/test_request_ordering_2.py b/plenum/test/node_request/test_order/test_request_ordering_2.py index c26fadb205..6790f016d2 100644 --- a/plenum/test/node_request/test_order/test_request_ordering_2.py +++ b/plenum/test/node_request/test_order/test_request_ordering_2.py @@ -22,7 +22,7 @@ def testOrderingCase2(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client https://www.pivotaltracker.com/n/projects/1889887/stories/133655009 """ pr, replicas = getPrimaryReplica(txnPoolNodeSet, instId=0), \ - getNonPrimaryReplicas(txnPoolNodeSet, instId=0) + getNonPrimaryReplicas(txnPoolNodeSet, instId=0) assert len(replicas) == 6 rep0 = pr @@ -55,7 +55,7 @@ def specificCommits(wrappedMsg): delayedPpSeqNos.add(msg.ppSeqNo) logger.debug('ppSeqNo {} be delayed'.format(msg.ppSeqNo)) if isinstance(msg, Commit) and msg.instId == 0 and \ - sender in (n.name for n in (node3, node4, node5)) and \ + sender in (n.name for n in (node3, node4, node5)) and \ msg.ppSeqNo in delayedPpSeqNos: return commitDelay @@ -64,7 +64,7 @@ def specificCommits(wrappedMsg): node.nodeIbStasher.delay(specificCommits) sdk_reqs = sdk_send_random_requests(looper, sdk_pool_handle, - sdk_wallet_client, requestCount) + sdk_wallet_client, requestCount) timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) diff --git a/plenum/test/node_request/test_pre_prepare/test_ignore_pre_prepare_pp_seq_no_less_than_expected.py b/plenum/test/node_request/test_pre_prepare/test_ignore_pre_prepare_pp_seq_no_less_than_expected.py index 1b4353ca83..8a7da085fc 100644 --- a/plenum/test/node_request/test_pre_prepare/test_ignore_pre_prepare_pp_seq_no_less_than_expected.py +++ b/plenum/test/node_request/test_pre_prepare/test_ignore_pre_prepare_pp_seq_no_less_than_expected.py @@ -1,9 +1,7 @@ import pytest -from stp_core.common.util import adict from plenum.test.helper import sdk_send_random_and_check from plenum.test.test_node import getNonPrimaryReplicas -from plenum.test.pool_transactions.conftest import looper def test_ignore_pre_prepare_pp_seq_no_less_than_expected(looper, diff --git a/plenum/test/node_request/test_pre_prepare/test_primary_sends_preprepare_of_high_num.py b/plenum/test/node_request/test_pre_prepare/test_primary_sends_preprepare_of_high_num.py index 1dd2216c75..0322bc16cb 100644 --- a/plenum/test/node_request/test_pre_prepare/test_primary_sends_preprepare_of_high_num.py +++ b/plenum/test/node_request/test_pre_prepare/test_primary_sends_preprepare_of_high_num.py @@ -15,9 +15,9 @@ @pytest.mark.skip(reason="SOV-555. Not implemented in replica. Add a check in " "replica to check value of preprepare seq number.") -def testPrePrepareWithHighSeqNo(looper, nodeSet, propagated1): +def testPrePrepareWithHighSeqNo(looper, txnPoolNodeSet, propagated1): def chk(): - for r in getNonPrimaryReplicas(nodeSet, instId): + for r in getNonPrimaryReplicas(txnPoolNodeSet, instId): nodeSuspicions = len(getNodeSuspicions( r.node, Suspicions.WRONG_PPSEQ_NO.code)) assert nodeSuspicions == 1 @@ -26,11 +26,11 @@ def checkPreprepare(replica, viewNo, ppSeqNo, req, numOfPrePrepares): assert (replica.prePrepares[viewNo, ppSeqNo][0]) == \ (req.identifier, req.reqId, req.digest) - primary = getPrimaryReplica(nodeSet, instId) - nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet, instId) + primary = getPrimaryReplica(txnPoolNodeSet, instId) + nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, instId) req = propagated1.reqDigest primary.doPrePrepare(req) - timeout = waits.expectedPrePrepareTime(len(nodeSet)) + timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) for np in nonPrimaryReplicas: looper.run( eventually(checkPreprepare, np, primary.viewNo, @@ -45,5 +45,5 @@ def checkPreprepare(replica, viewNo, ppSeqNo, req, numOfPrePrepares): get_utc_epoch()) primary.send(incorrectPrePrepareReq, TPCStat.PrePrepareSent) - timeout = waits.expectedPrePrepareTime(len(nodeSet)) + timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout)) diff --git a/plenum/test/node_request/test_prepare/test_num_prepare_with_2_of_6_faulty.py b/plenum/test/node_request/test_prepare/test_num_prepare_with_2_of_6_faulty.py index b652439b4a..c630de9ee8 100644 --- a/plenum/test/node_request/test_prepare/test_num_prepare_with_2_of_6_faulty.py +++ b/plenum/test/node_request/test_prepare/test_num_prepare_with_2_of_6_faulty.py @@ -9,7 +9,6 @@ from plenum.test.node_request.helper import nodes_by_rank - nodeCount = 6 f = 1 faultyNodes = f + 1 diff --git a/plenum/test/node_request/test_propagate/test_node_lacks_finalised_requests.py b/plenum/test/node_request/test_propagate/test_node_lacks_finalised_requests.py index 54d2164478..1e9f4263b0 100644 --- a/plenum/test/node_request/test_propagate/test_node_lacks_finalised_requests.py +++ b/plenum/test/node_request/test_propagate/test_node_lacks_finalised_requests.py @@ -3,7 +3,6 @@ from plenum.test.spy_helpers import get_count, getAllReturnVals from plenum.test.test_node import getNonPrimaryReplicas from plenum.test.helper import sdk_send_random_and_check -from plenum.test.pool_transactions.conftest import looper from plenum.test.node_request.helper import sdk_ensure_pool_functional @@ -65,8 +64,8 @@ def sum_of_sent_batches(): # number of sent batches in both replicas since both replicas # independently request PROPAGATEs assert get_count(faulty_node, faulty_node.request_propagates) - \ - old_count_request_propagates == (sum_of_sent_batches() - - old_sum_of_sent_batches) + old_count_request_propagates == (sum_of_sent_batches() - + old_sum_of_sent_batches) requested_propagate_counts = getAllReturnVals( faulty_node, faulty_node.request_propagates) diff --git a/plenum/test/node_request/test_quorum_f_plus_2_nodes_but_not_primary_off_and_on.py b/plenum/test/node_request/test_quorum_f_plus_2_nodes_but_not_primary_off_and_on.py index 1602a1f492..a411b1f4cf 100644 --- a/plenum/test/node_request/test_quorum_f_plus_2_nodes_but_not_primary_off_and_on.py +++ b/plenum/test/node_request/test_quorum_f_plus_2_nodes_but_not_primary_off_and_on.py @@ -2,8 +2,6 @@ from plenum.test.helper import checkViewNoForNodes, sendRandomRequest, \ waitForSufficientRepliesForRequests, \ verify_request_not_replied_and_not_ordered -from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ - client1, wallet1, client1Connected from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected from plenum.test.test_node import ensureElectionsDone, getRequiredInstances @@ -21,8 +19,7 @@ def stop_node(node_to_stop, looper, pool_nodes): def test_quorum_after_f_plus_2_nodes_but_not_primary_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, - txnPoolNodeSet, wallet1, client1, client1Connected): - + txnPoolNodeSet, wallet1, client1): nodes = txnPoolNodeSet request1 = sendRandomRequest(wallet1, client1) diff --git a/plenum/test/node_request/test_quorum_f_plus_2_nodes_including_primary_off_and_on.py b/plenum/test/node_request/test_quorum_f_plus_2_nodes_including_primary_off_and_on.py index aa8fc5a703..a0a7f754fe 100644 --- a/plenum/test/node_request/test_quorum_f_plus_2_nodes_including_primary_off_and_on.py +++ b/plenum/test/node_request/test_quorum_f_plus_2_nodes_including_primary_off_and_on.py @@ -2,8 +2,6 @@ from plenum.test.helper import waitForViewChange, checkViewNoForNodes, \ sendRandomRequest, waitForSufficientRepliesForRequests, \ verify_request_not_replied_and_not_ordered -from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ - client1, wallet1, client1Connected from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected from plenum.test.test_node import ensureElectionsDone, getRequiredInstances @@ -21,8 +19,7 @@ def stop_node(node_to_stop, looper, pool_nodes): def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, - txnPoolNodeSet, wallet1, client1, client1Connected): - + txnPoolNodeSet, wallet1, client1): nodes = txnPoolNodeSet request1 = sendRandomRequest(wallet1, client1) diff --git a/plenum/test/node_request/test_request_forwarding.py b/plenum/test/node_request/test_request_forwarding.py index 0c9f9f27b0..cc6a00b1d1 100644 --- a/plenum/test/node_request/test_request_forwarding.py +++ b/plenum/test/node_request/test_request_forwarding.py @@ -2,7 +2,6 @@ from plenum.test import waits from plenum.test.delayers import nom_delay, delay_3pc_messages from plenum.test.batching_3pc.conftest import tconf -from plenum.test.pool_transactions.conftest import looper from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change from stp_core.loop.eventually import eventually @@ -46,7 +45,7 @@ def chk(count): # Only non primary replicas should have all request keys with them looper.run(eventually(chk, tconf.Max3PCBatchSize - 1)) sdk_get_replies(looper, req_resps, timeout=sdk_eval_timeout( - tconf.Max3PCBatchSize-1, len(txnPoolNodeSet), + tconf.Max3PCBatchSize - 1, len(txnPoolNodeSet), add_delay_to_timeout=delay_3pc)) # Replicas should have no request keys with them since they are ordered looper.run(eventually(chk, 0)) # Need to wait since one node might not @@ -66,7 +65,7 @@ def chk(count): # Since each nomination is delayed and there will be multiple nominations # so adding some extra time timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \ - len(txnPoolNodeSet) * delay + len(txnPoolNodeSet) * delay ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout) sdk_get_replies(looper, req_resps, timeout=timeout) looper.run(eventually(chk, 0)) diff --git a/plenum/test/node_request/test_timestamp/conftest.py b/plenum/test/node_request/test_timestamp/conftest.py deleted file mode 100644 index 38dac4e336..0000000000 --- a/plenum/test/node_request/test_timestamp/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper, nodeThetaAdded, \ - stewardAndWallet1, steward1, stewardWallet diff --git a/plenum/test/node_request/test_timestamp/test_clock_disruption.py b/plenum/test/node_request/test_timestamp/test_clock_disruption.py index 489569a0cc..4e3c616201 100644 --- a/plenum/test/node_request/test_timestamp/test_clock_disruption.py +++ b/plenum/test/node_request/test_timestamp/test_clock_disruption.py @@ -10,7 +10,6 @@ get_timestamp_suspicion_count from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_request - Max3PCBatchSize = 4 from plenum.test.batching_3pc.conftest import tconf @@ -40,9 +39,9 @@ def test_nodes_with_bad_clock(tconf, looper, txnPoolNodeSet, make_clock_faulty( node, clock_slow_by_sec=node.config.ACCEPTABLE_DEVIATION_PREPREPARE_SECS + - randint( - 5, - 15), + randint( + 5, + 15), ppr_always_wrong=False) for _ in range(5): diff --git a/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py b/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py index eac7b9c503..42f6c950b5 100644 --- a/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py +++ b/plenum/test/node_request/test_timestamp/test_timestamp_post_view_change.py @@ -59,9 +59,9 @@ def test_new_primary_has_wrong_clock(tconf, looper, txnPoolNodeSet, # Requests are sent for _ in range(5): sdk_send_random_requests(looper, - sdk_pool_handle, - sdk_wallet_client, - count=2) + sdk_pool_handle, + sdk_wallet_client, + count=2) looper.runFor(2) def chk(): @@ -78,7 +78,6 @@ def chk(): looper.run(eventually(chk, retryWait=1)) - # Eventually another view change happens looper.run(eventually(checkViewNoForNodes, txnPoolNodeSet, old_view_no + 1, retryWait=1, timeout=2 * tconf.PerfCheckFreq)) diff --git a/plenum/test/observer/conftest.py b/plenum/test/observer/conftest.py index bed4b19377..a0bd752df6 100644 --- a/plenum/test/observer/conftest.py +++ b/plenum/test/observer/conftest.py @@ -10,8 +10,6 @@ from plenum.test.test_node import TestNode from plenum.test.testable import spyable -from plenum.test.pool_transactions.conftest import looper - @spyable(methods=[Observable.append_input, Observable.send_to_observers, diff --git a/plenum/test/plugin/demo_plugin/__init__.py b/plenum/test/plugin/demo_plugin/__init__.py index 67748643de..306c66fee7 100644 --- a/plenum/test/plugin/demo_plugin/__init__.py +++ b/plenum/test/plugin/demo_plugin/__init__.py @@ -2,12 +2,11 @@ from plenum.test.plugin.demo_plugin.transactions import DemoTransactions from plenum.test.plugin.demo_plugin.constants import AUCTION_LEDGER_ID - dummy_field_length = 10 LEDGER_IDS = {AUCTION_LEDGER_ID, } CLIENT_REQUEST_FIELDS = {'fix_length_dummy': - FixedLengthField(dummy_field_length, - optional=True, nullable=True)} + FixedLengthField(dummy_field_length, + optional=True, nullable=True)} AcceptableWriteTypes = {DemoTransactions.AUCTION_START.value, DemoTransactions.AUCTION_END.value, diff --git a/plenum/test/plugin/demo_plugin/conftest.py b/plenum/test/plugin/demo_plugin/conftest.py index 03c4e62f5d..b42568616a 100644 --- a/plenum/test/plugin/demo_plugin/conftest.py +++ b/plenum/test/plugin/demo_plugin/conftest.py @@ -7,10 +7,6 @@ from plenum.common.pkg_util import update_module_vars from plenum.test.plugin.demo_plugin.main import integrate_plugin_in_node -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper, stewardAndWallet1, \ - stewardWallet, steward1 - def do_plugin_initialisation_for_tests(): # The next imports and reloading are needed only in tests, since in @@ -71,5 +67,5 @@ def _post_node_creation(node): @pytest.fixture(scope="module") -def nodeSet(tconf, do_post_node_creation, txnPoolNodeSet): +def txn_pool_node_set_post_creation(tconf, do_post_node_creation, txnPoolNodeSet): return txnPoolNodeSet diff --git a/plenum/test/plugin/demo_plugin/test_catchup.py b/plenum/test/plugin/demo_plugin/test_catchup.py index 48a93bf38b..887d0e1ab0 100644 --- a/plenum/test/plugin/demo_plugin/test_catchup.py +++ b/plenum/test/plugin/demo_plugin/test_catchup.py @@ -15,13 +15,13 @@ disconnect_node_and_ensure_disconnected, reconnect_node_and_ensure_connected -def test_new_node_catchup_plugin_ledger(nodeSet, looper, some_requests, +def test_new_node_catchup_plugin_ledger(txn_pool_node_set_post_creation, looper, some_requests, newNodeCaughtUp): """ A new node catches up the demo plugin's ledger too """ assert len(newNodeCaughtUp.getLedger(AUCTION_LEDGER_ID)) > 0 - for node in nodeSet[:-1]: + for node in txn_pool_node_set_post_creation[:-1]: assert len(newNodeCaughtUp.getLedger(AUCTION_LEDGER_ID)) == \ len(node.getLedger(AUCTION_LEDGER_ID)) diff --git a/plenum/test/plugin/demo_plugin/test_plugin_basic.py b/plenum/test/plugin/demo_plugin/test_plugin_basic.py index c4ef0829a2..aa36471a65 100644 --- a/plenum/test/plugin/demo_plugin/test_plugin_basic.py +++ b/plenum/test/plugin/demo_plugin/test_plugin_basic.py @@ -8,19 +8,19 @@ from stp_core.loop.eventually import eventually -def test_plugin_setup(nodeSet): +def test_plugin_setup(txn_pool_node_set_post_creation): """ Test that plugin's ledger and state are setup """ - for node in nodeSet: + for node in txn_pool_node_set_post_creation: assert AUCTION_LEDGER_ID in node.ledger_ids assert AUCTION_LEDGER_ID in node.ledgerManager.ledgerRegistry assert node.ledger_ids == node.ledgerManager.ledger_sync_order assert AUCTION_LEDGER_ID in node.states -def test_plugin_client_req_fields(nodeSet, looper, stewardWallet, - steward1, client1Connected, +def test_plugin_client_req_fields(txn_pool_node_set_post_creation, looper, stewardWallet, + steward1, sdk_wallet_steward, sdk_pool_handle): """ Test that plugin's addition of request fields and their validation is @@ -42,9 +42,9 @@ def test_plugin_client_req_fields(nodeSet, looper, stewardWallet, req = Request(operation=op, reqId=Request.gen_req_id(), protocolVersion=CURRENT_PROTOCOL_VERSION, identifier=stewardWallet.defaultId, - fix_length_dummy=randomString(dummy_field_length+1)) + fix_length_dummy=randomString(dummy_field_length + 1)) steward1.submitReqs(req) - for node in nodeSet: + for node in txn_pool_node_set_post_creation: looper.run(eventually(checkReqNackWithReason, steward1, 'should have length', node.clientstack.name, retryWait=1)) diff --git a/plenum/test/plugin/demo_plugin/test_plugin_request_handling.py b/plenum/test/plugin/demo_plugin/test_plugin_request_handling.py index 515b2a8a01..77cbe3fd2a 100644 --- a/plenum/test/plugin/demo_plugin/test_plugin_request_handling.py +++ b/plenum/test/plugin/demo_plugin/test_plugin_request_handling.py @@ -16,8 +16,8 @@ def successful_op(looper, op, sdk_wallet, sdk_pool_handle): sdk_get_reply(looper, req) -def test_plugin_static_validation(nodeSet, looper, stewardWallet, - steward1, client1Connected, +def test_plugin_static_validation(txn_pool_node_set_post_creation, looper, stewardWallet, + steward1, sdk_wallet_steward, sdk_pool_handle): """ Check plugin static validation fails and passes @@ -26,7 +26,7 @@ def test_plugin_static_validation(nodeSet, looper, stewardWallet, TXN_TYPE: AUCTION_START } send_signed_requests(steward1, sign_requests(stewardWallet, [op, ])) - waitReqNackFromPoolWithReason(looper, nodeSet, steward1, + waitReqNackFromPoolWithReason(looper, txn_pool_node_set_post_creation, steward1, 'attribute is missing or not in proper format') op = { @@ -34,7 +34,7 @@ def test_plugin_static_validation(nodeSet, looper, stewardWallet, DATA: 'should be a dict but giving a string' } send_signed_requests(steward1, sign_requests(stewardWallet, [op, ])) - waitReqNackFromPoolWithReason(looper, nodeSet, steward1, + waitReqNackFromPoolWithReason(looper, txn_pool_node_set_post_creation, steward1, 'attribute is missing or not in proper format') op = { @@ -49,7 +49,7 @@ def test_plugin_static_validation(nodeSet, looper, stewardWallet, DATA: {'id': 'abc', AMOUNT: -3} } send_signed_requests(steward1, sign_requests(stewardWallet, [op, ])) - waitReqNackFromPoolWithReason(looper, nodeSet, steward1, + waitReqNackFromPoolWithReason(looper, txn_pool_node_set_post_creation, steward1, 'must be present and should be a number') op = { @@ -59,8 +59,8 @@ def test_plugin_static_validation(nodeSet, looper, stewardWallet, successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) -def test_plugin_dynamic_validation(nodeSet, looper, stewardWallet, - steward1, client1Connected, +def test_plugin_dynamic_validation(txn_pool_node_set_post_creation, looper, stewardWallet, + steward1, sdk_wallet_steward, sdk_pool_handle): """ Check plugin dynamic validation fails and passes @@ -70,7 +70,7 @@ def test_plugin_dynamic_validation(nodeSet, looper, stewardWallet, DATA: {'id': 'abcdef'} } send_signed_requests(steward1, sign_requests(stewardWallet, [op, ])) - waitRejectFromPoolWithReason(looper, nodeSet, steward1, + waitRejectFromPoolWithReason(looper, txn_pool_node_set_post_creation, steward1, 'unknown auction') op = { @@ -87,9 +87,9 @@ def test_plugin_dynamic_validation(nodeSet, looper, stewardWallet, @pytest.fixture(scope="module") -def some_requests(nodeSet, looper, stewardWallet, - steward1, client1Connected, - sdk_wallet_steward, sdk_pool_handle): +def some_requests(txn_pool_node_set_post_creation, looper, stewardWallet, + steward1, + sdk_wallet_steward, sdk_pool_handle): op = { TXN_TYPE: AUCTION_START, DATA: {'id': 'pqr'} @@ -102,7 +102,7 @@ def some_requests(nodeSet, looper, stewardWallet, } successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) - for node in nodeSet: + for node in txn_pool_node_set_post_creation: auctions = node.get_req_handler(AUCTION_LEDGER_ID).auctions assert 'pqr' in auctions assert auctions['pqr'][stewardWallet.defaultId] == 20 @@ -113,7 +113,7 @@ def some_requests(nodeSet, looper, stewardWallet, } successful_op(looper, op, sdk_wallet_steward, sdk_pool_handle) - for node in nodeSet: + for node in txn_pool_node_set_post_creation: auctions = node.get_req_handler(AUCTION_LEDGER_ID).auctions assert 'pqr' in auctions assert auctions['pqr'][stewardWallet.defaultId] == 40 diff --git a/plenum/test/plugin/demo_plugin/transactions.py b/plenum/test/plugin/demo_plugin/transactions.py index 64c73af187..e6d27dec9a 100644 --- a/plenum/test/plugin/demo_plugin/transactions.py +++ b/plenum/test/plugin/demo_plugin/transactions.py @@ -1,6 +1,5 @@ from plenum.common.transactions import Transactions - # DO NOT CHANGE ONCE CODE IS DEPLOYED ON THE LEDGER # TODO: Might need a short hash with unique entropy, plugin name being input of hash. PREFIX = '9999' diff --git a/plenum/test/plugin/test_notifier_plugin_manager.py b/plenum/test/plugin/test_notifier_plugin_manager.py index 5978d2a603..c97876f9cf 100644 --- a/plenum/test/plugin/test_notifier_plugin_manager.py +++ b/plenum/test/plugin/test_notifier_plugin_manager.py @@ -18,7 +18,7 @@ def testPluginManagerFindsPlugins(monkeypatch, pluginManager): partial( mockGetInstalledDistributions, packages=validPackages + - invalidPackages)) + invalidPackages)) assert len(pluginManager._findPlugins()) == validPackagesCnt @@ -59,9 +59,9 @@ def testPluginManagerSendMessageUponSuspiciousSpikeFailsOnMinCnt( 'minActivityThreshold': 0, 'enabled': True } - assert pluginManagerWithImportedModules\ - .sendMessageUponSuspiciousSpike(topic, historicalData, - newVal, config, name, enabled=True) is None + assert pluginManagerWithImportedModules \ + .sendMessageUponSuspiciousSpike(topic, historicalData, + newVal, config, name, enabled=True) is None def testPluginManagerSendMessageUponSuspiciousSpikeFailsOnCoefficient( @@ -79,9 +79,9 @@ def testPluginManagerSendMessageUponSuspiciousSpikeFailsOnCoefficient( 'minActivityThreshold': 0, 'enabled': True } - assert pluginManagerWithImportedModules\ - .sendMessageUponSuspiciousSpike(topic, historicalData, - newVal, config, name, enabled=True) is None + assert pluginManagerWithImportedModules \ + .sendMessageUponSuspiciousSpike(topic, historicalData, + newVal, config, name, enabled=True) is None def testPluginManagerSendMessageUponSuspiciousSpike( @@ -99,7 +99,7 @@ def testPluginManagerSendMessageUponSuspiciousSpike( 'minActivityThreshold': 0, 'enabled': True } - sent, found = pluginManagerWithImportedModules\ + sent, found = pluginManagerWithImportedModules \ .sendMessageUponSuspiciousSpike(topic, historicalData, newVal, config, name, enabled=True) assert sent == 3 @@ -108,6 +108,7 @@ def testPluginManagerSendMessageUponSuspiciousSpike( def testNodeSendNodeRequestSpike(pluginManagerWithImportedModules, testNode): def mockProcessRequest(obj, inc=1): obj.nodeRequestSpikeMonitorData['accum'] += inc + testNode.config.SpikeEventsEnabled = True testNode.config.notifierEventTriggeringConfig['nodeRequestSpike'] = { 'coefficient': 3, diff --git a/plenum/test/pool_transactions/conftest.py b/plenum/test/pool_transactions/conftest.py index 521af7e208..94930f90e5 100644 --- a/plenum/test/pool_transactions/conftest.py +++ b/plenum/test/pool_transactions/conftest.py @@ -18,34 +18,6 @@ def tconf(tconf, request): return tconf -@pytest.yield_fixture(scope="module") -def looper(txnPoolNodesLooper): - yield txnPoolNodesLooper - - -@pytest.fixture(scope="module") -def stewardAndWallet1(looper, txnPoolNodeSet, poolTxnStewardData, - tdirWithClientPoolTxns, client_tdir): - client, wallet = buildPoolClientAndWallet(poolTxnStewardData, - client_tdir) - yield client, wallet - client.stop() - - -@pytest.fixture(scope="module") -def steward1(looper, txnPoolNodeSet, stewardAndWallet1): - steward, wallet = stewardAndWallet1 - looper.add(steward) - ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward, - *txnPoolNodeSet) - return steward - - -@pytest.fixture(scope="module") -def stewardWallet(stewardAndWallet1): - return stewardAndWallet1[1] - - @pytest.fixture("module") def nodeThetaAdded(looper, txnPoolNodeSet, tdir, client_tdir, tconf, steward1, stewardWallet, allPluginsPath, testNodeClass=None, @@ -99,31 +71,6 @@ def sdk_node_theta_added(looper, return new_steward_wallet, new_node -@pytest.fixture(scope="module") -def clientAndWallet1(txnPoolNodeSet, poolTxnClientData, tdirWithClientPoolTxns, client_tdir): - client, wallet = buildPoolClientAndWallet(poolTxnClientData, - client_tdir) - yield client, wallet - client.stop() - - -@pytest.fixture(scope="module") -def client1(clientAndWallet1): - return clientAndWallet1[0] - - -@pytest.fixture(scope="module") -def wallet1(clientAndWallet1): - return clientAndWallet1[1] - - -@pytest.fixture(scope="module") -def client1Connected(looper, client1): - looper.add(client1) - looper.run(client1.ensureConnectedToNodes()) - return client1 - - @pytest.fixture(scope="function") def newAdHocSteward(looper, client_tdir, steward1, stewardWallet): newStewardName = "testClientSteward" + randomString(3) diff --git a/plenum/test/pool_transactions/test_change_ha_persists_post_nodes_restart.py b/plenum/test/pool_transactions/test_change_ha_persists_post_nodes_restart.py index 35d461b86a..1edd3cfe81 100644 --- a/plenum/test/pool_transactions/test_change_ha_persists_post_nodes_restart.py +++ b/plenum/test/pool_transactions/test_change_ha_persists_post_nodes_restart.py @@ -7,7 +7,6 @@ from plenum.test.test_node import TestNode, checkNodesConnected from stp_core.network.port_dispenser import genHa from plenum.common.config_helper import PNodeConfigHelper -from plenum.test.pool_transactions.conftest import looper logger = getlogger() diff --git a/plenum/test/pool_transactions/test_nodes_with_pool_txns.py b/plenum/test/pool_transactions/test_nodes_with_pool_txns.py index 4409b53431..d58afac793 100644 --- a/plenum/test/pool_transactions/test_nodes_with_pool_txns.py +++ b/plenum/test/pool_transactions/test_nodes_with_pool_txns.py @@ -30,7 +30,6 @@ # reaches it - def testStewardCannotAddMoreThanOneNode(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, tdir, tconf, @@ -305,4 +304,4 @@ def test_add_node_with_not_unique_alias(looper, allPluginsPath) assert 'existing data has conflicts with request data' in \ e._excinfo[1].args[0] - sdk_pool_refresh(looper, sdk_pool_handle) \ No newline at end of file + sdk_pool_refresh(looper, sdk_pool_handle) diff --git a/plenum/test/primary_election/helpers.py b/plenum/test/primary_election/helpers.py index 18432f75b9..f7754c3259 100644 --- a/plenum/test/primary_election/helpers.py +++ b/plenum/test/primary_election/helpers.py @@ -23,7 +23,7 @@ def getSelfNominationByNode(node: TestNode) -> int: for instId, replica in enumerate(node.elector.replicas): name = Replica.generateName(node.name, instId) if node.elector.nominations.get(instId, {}).get(name, [None, ])[ - 0] == name: + 0] == name: return instId diff --git a/plenum/test/primary_election/test_primary_election_case1.py b/plenum/test/primary_election/test_primary_election_case1.py index a1bd9d194e..ca9671bc97 100644 --- a/plenum/test/primary_election/test_primary_election_case1.py +++ b/plenum/test/primary_election/test_primary_election_case1.py @@ -9,11 +9,10 @@ from plenum.test.helper import whitelistNode from plenum.test.primary_election.helpers import checkNomination, \ getSelfNominationByNode, nominationByNode -from plenum.test.test_node import TestNodeSet, checkNodesConnected, \ +from plenum.test.test_node import checkNodesConnected, \ ensureElectionsDone from plenum.test import waits - nodeCount = 4 whitelist = ['already got nomination', 'doing nothing for now'] @@ -24,11 +23,8 @@ @pytest.fixture() -def case1Setup(startedNodes: TestNodeSet): - nodes = startedNodes - nodeNames = nodes.nodeNames - - nodeB = nodes.getNode(nodeNames[1]) +def case1Setup(txnPoolNodeSet): + nodeB = txnPoolNodeSet[1] # Node B delays self nomination so A's nomination reaches everyone nodeB.delaySelfNomination(10) @@ -36,8 +32,8 @@ def case1Setup(startedNodes: TestNodeSet): nodeB.nodeIbStasher.delay(delayerMsgTuple(delayOfNomination, Nomination)) # Add node C and node D - nodeC = nodes.getNode(nodeNames[2]) - nodeD = nodes.getNode(nodeNames[3]) + nodeC = txnPoolNodeSet[2] + nodeD = txnPoolNodeSet[3] # Node C should not try to nominate itself until it gets NOMINATE from B nodeC.delaySelfNomination(10) @@ -51,30 +47,29 @@ def case1Setup(startedNodes: TestNodeSet): # multiple nominations from the same node have any impact on # the election whitelistNode(nodeB.name, - [node for node in nodes if node != nodeB], + [node for node in txnPoolNodeSet if node != nodeB], Suspicions.DUPLICATE_NOM_SENT.code) - return nodes + return txnPoolNodeSet # noinspection PyIncorrectDocstring @pytest.mark.skip('Nodes use round robin primary selection') -def testPrimaryElectionCase1(case1Setup, looper, keySharedNodes): +def testPrimaryElectionCase1(case1Setup, looper, txnPoolNodeSet): """ Case 1 - A node making multiple nominations for a particular node. Consider 4 nodes A, B, C and D. Lets say node B is malicious and is repeatedly nominating Node D """ - nodes = keySharedNodes - nodeA, nodeB, nodeC, nodeD = [nodes.getNode(nm) for nm in nodes.nodeNames] + nodeA, nodeB, nodeC, nodeD = txnPoolNodeSet # Doesn't matter if nodes reach the ready state or not. Just start them - looper.run(checkNodesConnected(nodes)) + looper.run(checkNodesConnected(txnPoolNodeSet)) # Node B sends multiple NOMINATE messages for Node D but only after A has # nominated itself timeout = waits.expectedPoolNominationTimeout( - nodeCount=len(keySharedNodes)) + nodeCount=len(txnPoolNodeSet)) looper.run(eventually(checkNomination, nodeA, nodeA.name, retryWait=.25, timeout=timeout)) @@ -92,13 +87,13 @@ def testPrimaryElectionCase1(case1Setup, looper, keySharedNodes): for node in [nodeA, nodeC, nodeD]: assert [n[0] for n in node.elector.nominations[instId].values()].count( Replica.generateName(nodeD.name, instId)) \ - <= 1 + <= 1 timeout = waits.expectedPoolElectionTimeout(nodeCount) + delayOfNomination primaryReplicas = ensureElectionsDone(looper=looper, - nodes=nodes, customTimeout=timeout) + nodes=txnPoolNodeSet, customTimeout=timeout) - for node in nodes: + for node in txnPoolNodeSet: logger.debug( "{}'s nominations {}".format(node, node.elector.nominations)) # Node D should not have any primary diff --git a/plenum/test/primary_election/test_primary_election_case2.py b/plenum/test/primary_election/test_primary_election_case2.py index a0a4725f33..afe08d2df0 100644 --- a/plenum/test/primary_election/test_primary_election_case2.py +++ b/plenum/test/primary_election/test_primary_election_case2.py @@ -7,19 +7,18 @@ from plenum.test.delayers import delayerMsgTuple from plenum.test.primary_election.helpers import checkNomination, \ getSelfNominationByNode, nominationByNode -from plenum.test.test_node import TestNodeSet, checkNodesConnected, \ +from plenum.test.test_node import checkNodesConnected, \ ensureElectionsDone from plenum.test import waits - nodeCount = 4 whitelist = ['already got nomination', 'doing nothing for now'] @pytest.fixture() -def case2Setup(startedNodes: TestNodeSet): - A, B, C, D = startedNodes.nodes.values() +def case2Setup(txnPoolNodeSet): + A, B, C, D = txnPoolNodeSet # Node B delays self nomination so A's nomination reaches everyone B.delaySelfNomination(5) @@ -37,23 +36,23 @@ def case2Setup(startedNodes: TestNodeSet): for node in A, C, D: node.whitelistNode(B.name, Suspicions.DUPLICATE_NOM_SENT.code) + # noinspection PyIncorrectDocstring @pytest.mark.skip('Nodes use round robin primary selection') -def testPrimaryElectionCase2(case2Setup, looper, keySharedNodes): +def testPrimaryElectionCase2(case2Setup, looper, txnPoolNodeSet): """ Case 2 - A node making nominations for a multiple other nodes. Consider 4 nodes A, B, C, and D. Lets say node B is malicious and nominates node C to all nodes. Again node B nominates node D to all nodes. """ - nodeSet = keySharedNodes - A, B, C, D = nodeSet.nodes.values() + A, B, C, D = txnPoolNodeSet - looper.run(checkNodesConnected(nodeSet)) + looper.run(checkNodesConnected(txnPoolNodeSet)) # Node B sends multiple NOMINATE msgs but only after A has nominated itself - timeout = waits.expectedPoolNominationTimeout(len(nodeSet)) + timeout = waits.expectedPoolNominationTimeout(len(txnPoolNodeSet)) looper.run(eventually(checkNomination, A, A.name, retryWait=.25, timeout=timeout)) @@ -71,7 +70,7 @@ def testPrimaryElectionCase2(case2Setup, looper, keySharedNodes): B.send(nominationByNode(DRep, B, instId)) # Ensure elections are done - ensureElectionsDone(looper=looper, nodes=nodeSet) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) # All nodes from node A, node C, node D(node B is malicious anyway so # not considering it) should have nomination for node C from node B since diff --git a/plenum/test/primary_election/test_primary_election_case4.py b/plenum/test/primary_election/test_primary_election_case4.py index 21504b0022..07b879aa84 100644 --- a/plenum/test/primary_election/test_primary_election_case4.py +++ b/plenum/test/primary_election/test_primary_election_case4.py @@ -5,7 +5,7 @@ from plenum.server.suspicion_codes import Suspicions from plenum.test import waits from plenum.test.primary_election.helpers import primaryByNode -from plenum.test.test_node import TestNodeSet, checkNodesConnected, \ +from plenum.test.test_node import checkNodesConnected, \ ensureElectionsDone nodeCount = 4 @@ -18,8 +18,8 @@ @pytest.fixture() -def case4Setup(keySharedNodes: TestNodeSet): - allNodes = keySharedNodes.nodes.values() +def case4Setup(txnPoolNodeSet): + allNodes = txnPoolNodeSet A, B, C, D = allNodes # Delay each of the nodes A, B and C's self nomination so Node B gets to @@ -68,7 +68,7 @@ def x(): # also have to take into account the catchup procedure timeout = waits.expectedPoolNominationTimeout(len(allNodes)) + \ - waits.expectedPoolCatchupTime(len(allNodes)) + waits.expectedPoolCatchupTime(len(allNodes)) for node in (A, C, D): looper.run(eventually(x, retryWait=.5, timeout=timeout)) diff --git a/plenum/test/primary_election/test_primary_election_case5.py b/plenum/test/primary_election/test_primary_election_case5.py index 09e3d63422..c65e31cbb4 100644 --- a/plenum/test/primary_election/test_primary_election_case5.py +++ b/plenum/test/primary_election/test_primary_election_case5.py @@ -1,4 +1,3 @@ - import pytest from plenum.common.messages.node_messages import Nomination, Primary from plenum.test import waits @@ -7,7 +6,7 @@ from plenum.server.replica import Replica from plenum.server.suspicion_codes import Suspicions from plenum.test.primary_election.helpers import primaryByNode -from plenum.test.test_node import TestNodeSet, checkNodesConnected, \ +from plenum.test.test_node import checkNodesConnected, \ ensureElectionsDone from plenum.test.delayers import delayerMsgTuple @@ -17,7 +16,6 @@ 'doing nothing for now', 'know how to handle it'] - logger = getlogger() # the total delay of election done @@ -25,8 +23,8 @@ @pytest.fixture() -def case5Setup(startedNodes: TestNodeSet): - A, B, C, D = startedNodes.nodes.values() +def case5Setup(txnPoolNodeSet): + A, B, C, D = txnPoolNodeSet # Node B delays self nomination so A's nomination reaches everyone B.delaySelfNomination(30) @@ -58,17 +56,16 @@ def case5Setup(startedNodes: TestNodeSet): # noinspection PyIncorrectDocstring @pytest.mark.skip('Nodes use round robin primary selection') -def testPrimaryElectionCase5(case5Setup, looper, keySharedNodes): +def testPrimaryElectionCase5(case5Setup, looper, txnPoolNodeSet): """ Case 5 - A node making primary declarations for a multiple other nodes. Consider 4 nodes A, B, C, and D. Lets say node B is malicious and declares node C as primary to all nodes. Again node B declares node D as primary to all nodes. """ - nodeSet = keySharedNodes - A, B, C, D = nodeSet.nodes.values() + A, B, C, D = txnPoolNodeSet - looper.run(checkNodesConnected(nodeSet)) + looper.run(checkNodesConnected(txnPoolNodeSet)) BRep = Replica.generateName(B.name, 0) CRep = Replica.generateName(C.name, 0) @@ -83,10 +80,10 @@ def testPrimaryElectionCase5(case5Setup, looper, keySharedNodes): # Ensure elections are done # also have to take into account the catchup procedure - timeout = waits.expectedPoolElectionTimeout(len(nodeSet)) + \ - waits.expectedPoolCatchupTime(len(nodeSet)) + \ - delayOfElectionDone - ensureElectionsDone(looper=looper, nodes=nodeSet, customTimeout=timeout) + timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \ + waits.expectedPoolCatchupTime(len(txnPoolNodeSet)) + \ + delayOfElectionDone + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, customTimeout=timeout) # All nodes from node A, node C, node D(node B is malicious anyway so not # considering it) should have primary declarations for node C from node B @@ -94,5 +91,5 @@ def testPrimaryElectionCase5(case5Setup, looper, keySharedNodes): for node in [A, C, D]: logger.debug( "node {} should have primary declaration for C from node B" - .format(node)) + .format(node)) assert node.elector.primaryDeclarations[0][BRep][0] == CRep diff --git a/plenum/test/primary_election/test_primary_election_case6.py b/plenum/test/primary_election/test_primary_election_case6.py index 258100142d..f658b9774b 100644 --- a/plenum/test/primary_election/test_primary_election_case6.py +++ b/plenum/test/primary_election/test_primary_election_case6.py @@ -9,8 +9,8 @@ @pytest.fixture(scope='module') -def case_6_setup(startedNodes): - A, B, C, D = startedNodes.nodes.values() +def case_6_setup(txnPoolNodeSet): + A, B, C, D = txnPoolNodeSet # A will get Nomination, Primary, Reelection from after elections get over for m in (Nomination, Primary, Reelection): @@ -22,11 +22,10 @@ def case_6_setup(startedNodes): # noinspection PyIncorrectDocstring @pytest.fixture(scope='module') -def elections_done(case_6_setup, looper, keySharedNodes): +def elections_done(case_6_setup, looper, txnPoolNodeSet): # Make sure elections are done successfully - nodeSet = keySharedNodes - A, B, C, D = nodeSet.nodes.values() - looper.run(checkNodesConnected(nodeSet)) + A, B, C, D = txnPoolNodeSet + looper.run(checkNodesConnected(txnPoolNodeSet)) inst_ids = (0, 1) @@ -48,7 +47,7 @@ def chk(): assert primary_send_times[i][0] > max(primary_recv_times[i]) looper.run(eventually(chk, retryWait=1, timeout=15)) - checkProtocolInstanceSetup(looper=looper, nodes=nodeSet, retryWait=1) + checkProtocolInstanceSetup(looper=looper, nodes=txnPoolNodeSet, retryWait=1) # Make sure no Nominations or Primary are received by A from B for i in inst_ids: diff --git a/plenum/test/primary_election/test_primary_election_contested.py b/plenum/test/primary_election/test_primary_election_contested.py index 0165afcc0a..9effdc827a 100644 --- a/plenum/test/primary_election/test_primary_election_contested.py +++ b/plenum/test/primary_election/test_primary_election_contested.py @@ -5,19 +5,18 @@ from plenum.common.messages.node_messages import Nomination from plenum.test.delayers import delayerMsgTuple from plenum.test.primary_election.helpers import checkNomination -from plenum.test.test_node import TestNodeSet, checkPoolReady, \ +from plenum.test.test_node import checkPoolReady, \ checkProtocolInstanceSetup from plenum.test import waits - nodeCount = 4 logger = getlogger() @pytest.fixture() -def electContFixture(startedNodes: TestNodeSet): - A, B, C, D = startedNodes.nodes.values() +def electContFixture(txnPoolNodeSet): + A, B, C, D = txnPoolNodeSet # Delaying nodeB' self nomination so nodeA's nomination can reach NodeC # and NodeD @@ -34,7 +33,7 @@ def electContFixture(startedNodes: TestNodeSet): # noinspection PyIncorrectDocstring @pytest.mark.skip('Nodes use round robin primary selection') -def testPrimaryElectionContested(electContFixture, looper, keySharedNodes): +def testPrimaryElectionContested(electContFixture, looper, txnPoolNodeSet): """ Primary selection (Rainy Day) A, B, C, D, E @@ -51,10 +50,9 @@ def testPrimaryElectionContested(electContFixture, looper, keySharedNodes): All see the others have sent Primary A, and then the nodes record who is the Primary. """ - nodeSet = keySharedNodes - A, B, C, D = nodeSet.nodes.values() + A, B, C, D = txnPoolNodeSet - checkPoolReady(looper, nodeSet) + checkPoolReady(looper, txnPoolNodeSet) logger.debug("Check nomination") timeout = waits.expectedPoolNominationTimeout(nodeCount) @@ -72,7 +70,7 @@ def testPrimaryElectionContested(electContFixture, looper, keySharedNodes): looper.run(eventually(checkNomination, n, A.name, retryWait=1, timeout=timeout)) - checkProtocolInstanceSetup(looper=looper, nodes=nodeSet, retryWait=1) + checkProtocolInstanceSetup(looper=looper, nodes=txnPoolNodeSet, retryWait=1) # Node D should not be primary assert not D.hasPrimary diff --git a/plenum/test/primary_election/test_primary_election_with_clear_winner.py b/plenum/test/primary_election/test_primary_election_with_clear_winner.py index f3def006cb..1a7152ef1a 100644 --- a/plenum/test/primary_election/test_primary_election_with_clear_winner.py +++ b/plenum/test/primary_election/test_primary_election_with_clear_winner.py @@ -2,7 +2,7 @@ from stp_core.loop.eventually import eventually from plenum.test.primary_election.helpers import checkNomination -from plenum.test.test_node import TestNodeSet, checkPoolReady, \ +from plenum.test.test_node import checkPoolReady, \ checkProtocolInstanceSetup from plenum.test import waits @@ -10,8 +10,8 @@ @pytest.fixture() -def electContFixture(startedNodes: TestNodeSet): - A, B, C, D = startedNodes.nodes.values() +def electContFixture(txnPoolNodeSet): + A, B, C, D = txnPoolNodeSet for node in [B, C, D]: node.delaySelfNomination(4) @@ -20,7 +20,7 @@ def electContFixture(startedNodes: TestNodeSet): # noinspection PyIncorrectDocstring @pytest.mark.skip('Nodes use round robin primary selection') def testPrimaryElectionWithAClearWinner( - electContFixture, looper, keySharedNodes): + electContFixture, looper, txnPoolNodeSet): """ Primary selection (Sunny Day) A, B, C, D, E @@ -52,23 +52,21 @@ def testPrimaryElectionWithAClearWinner( D sees at least two other PRIMARY(A) votes (3 including it's own) selects A as primary """ - - nodeSet = keySharedNodes - A, B, C, D = nodeSet.nodes.values() + A, B, C, D = txnPoolNodeSet nodesBCD = [B, C, D] - checkPoolReady(looper, nodeSet) + checkPoolReady(looper, txnPoolNodeSet) # Checking whether one of the replicas of Node A nominated itself - timeout = waits.expectedPoolNominationTimeout(len(nodeSet)) + timeout = waits.expectedPoolNominationTimeout(len(txnPoolNodeSet)) looper.run(eventually(checkNomination, A, A.name, retryWait=1, timeout=timeout)) - timeout = waits.expectedPoolNominationTimeout(len(nodeSet)) + timeout = waits.expectedPoolNominationTimeout(len(txnPoolNodeSet)) for n in nodesBCD: # Checking whether Node B, C and D nominated Node A looper.run(eventually(checkNomination, n, A.name, retryWait=1, timeout=timeout)) - checkProtocolInstanceSetup(looper=looper, nodes=nodeSet, retryWait=1) + checkProtocolInstanceSetup(looper=looper, nodes=txnPoolNodeSet, retryWait=1) assert A.hasPrimary diff --git a/plenum/test/primary_election/test_primary_election_with_tie.py b/plenum/test/primary_election/test_primary_election_with_tie.py index 0cd18c31bb..6085eccec6 100644 --- a/plenum/test/primary_election/test_primary_election_with_tie.py +++ b/plenum/test/primary_election/test_primary_election_with_tie.py @@ -5,20 +5,18 @@ from plenum.common.messages.node_messages import Nomination from plenum.test.delayers import delay from plenum.test.primary_election.helpers import checkNomination -from plenum.test.test_node import TestNodeSet, checkPoolReady, \ +from plenum.test.test_node import checkPoolReady, \ checkProtocolInstanceSetup from plenum.test import waits - nodeCount = 4 - logger = getlogger() @pytest.fixture() -def electTieFixture(startedNodes: TestNodeSet): - A, B, C, D = startedNodes.nodes.values() +def electTieFixture(txnPoolNodeSet): + A, B, C, D = txnPoolNodeSet for node in [C, D]: node.delaySelfNomination(10) @@ -31,7 +29,7 @@ def electTieFixture(startedNodes: TestNodeSet): # noinspection PyIncorrectDocstring @pytest.mark.skip('Nodes use round robin primary selection') -def testPrimaryElectionWithTie(electTieFixture, looper, keySharedNodes): +def testPrimaryElectionWithTie(electTieFixture, looper, txnPoolNodeSet): """ Primary selection (Rainy Day) A, B, C, D, E @@ -58,18 +56,17 @@ def testPrimaryElectionWithTie(electTieFixture, looper, keySharedNodes): # millis have passed, we send the several queued messages in one # batch. - nodeSet = keySharedNodes - A, B, C, D = nodeSet.nodes.values() + A, B, C, D = txnPoolNodeSet - checkPoolReady(looper, nodeSet.nodes.values()) + checkPoolReady(looper, txnPoolNodeSet) - for node in nodeSet.nodes.values(): + for node in txnPoolNodeSet: for instId, replica in enumerate(node.elector.replicas): logger.debug("replica {} {} with votes {}". format(replica.name, replica.instId, node.elector.nominations.get(instId, {}))) - nominationTimeout = waits.expectedPoolNominationTimeout(len(nodeSet)) + nominationTimeout = waits.expectedPoolNominationTimeout(len(txnPoolNodeSet)) logger.debug("Check nomination") # Checking whether Node A nominated itself looper.run(eventually(checkNomination, A, A.name, @@ -88,10 +85,10 @@ def testPrimaryElectionWithTie(electTieFixture, looper, keySharedNodes): retryWait=1, timeout=nominationTimeout)) # No node should be primary - for node in nodeSet.nodes.values(): + for node in txnPoolNodeSet: assert node.hasPrimary is False - for node in nodeSet.nodes.values(): + for node in txnPoolNodeSet: node.resetDelays() - checkProtocolInstanceSetup(looper=looper, nodes=nodeSet, retryWait=1) + checkProtocolInstanceSetup(looper=looper, nodes=txnPoolNodeSet, retryWait=1) diff --git a/plenum/test/primary_election/test_primary_forfeit.py b/plenum/test/primary_election/test_primary_forfeit.py index adfb2acef1..f1245527fa 100644 --- a/plenum/test/primary_election/test_primary_forfeit.py +++ b/plenum/test/primary_election/test_primary_forfeit.py @@ -5,12 +5,12 @@ @pytest.mark.skip(reason="SOV-556. Test implementation pending, " "although bug fixed") -def testPrimaryForfeit(looper, nodeSet, up, client1, wallet1): +def testPrimaryForfeit(looper, txnPoolNodeSet, client1, wallet1): """ The primary of master protocol instance of the pool forfeits the primary status by triggering an election and not nominating itself """ - pr = getPrimaryReplica(nodeSet, instId=0) + pr = getPrimaryReplica(txnPoolNodeSet, instId=0) prNode = pr.node # TODO: Incomplete raise NotImplementedError diff --git a/plenum/test/primary_selection/conftest.py b/plenum/test/primary_selection/conftest.py index 158f9ab9ef..957dbd6975 100644 --- a/plenum/test/primary_selection/conftest.py +++ b/plenum/test/primary_selection/conftest.py @@ -1,12 +1,11 @@ import pytest +from plenum.test.conftest import getValueFromModule from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.primary_selection.helper import check_newly_added_nodes, \ getPrimaryNodesIdxs -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper, nodeThetaAdded, \ - stewardAndWallet1, steward1, stewardWallet from plenum.test.pool_transactions.helper import buildPoolClientAndWallet +from plenum.test.pool_transactions.conftest import nodeThetaAdded @pytest.fixture(scope="module") @@ -49,3 +48,23 @@ def stewardAndWalletForMasterNode(looper, poolTxnData, poolTxnStewardNames, looper.run(stewardClient.ensureConnectedToNodes()) return stewardClient, stewardWallet + + +@pytest.fixture(scope="module") +def checkpoint_size(tconf, request): + oldChkFreq = tconf.CHK_FREQ + oldLogSize = tconf.LOG_SIZE + oldMax3PCBatchSize = tconf.Max3PCBatchSize + + tconf.Max3PCBatchSize = 3 + tconf.CHK_FREQ = getValueFromModule(request, "CHK_FREQ", 2) + tconf.LOG_SIZE = 2 * tconf.CHK_FREQ + + def reset(): + tconf.CHK_FREQ = oldChkFreq + tconf.LOG_SIZE = oldLogSize + tconf.Max3PCBatchSize = oldMax3PCBatchSize + + request.addfinalizer(reset) + + return tconf.CHK_FREQ * tconf.Max3PCBatchSize diff --git a/plenum/test/primary_selection/test_add_node_to_pool_with_large_ppseqno.py b/plenum/test/primary_selection/test_add_node_to_pool_with_large_ppseqno.py index f8a0cee588..e936f1af41 100644 --- a/plenum/test/primary_selection/test_add_node_to_pool_with_large_ppseqno.py +++ b/plenum/test/primary_selection/test_add_node_to_pool_with_large_ppseqno.py @@ -12,7 +12,7 @@ def _get_ppseqno(nodes): for node in nodes: for repl in node.replicas: res.add(repl.lastPrePrepareSeqNo) - assert(len(res) == 1) + assert (len(res) == 1) return min(res) @@ -41,11 +41,11 @@ def test_add_node_to_pool_with_large_ppseqno_diff_views(do_view_change, looper, big_ppseqno = tconf.LOG_SIZE * 2 + 2345 cur_ppseqno = _get_ppseqno(txnPoolNodeSet) - assert(big_ppseqno > cur_ppseqno) + assert (big_ppseqno > cur_ppseqno) # ensure pool is working properly sendReqsToNodesAndVerifySuffReplies(looper, stewardWallet, steward1, numReqs=3) - assert(cur_ppseqno < _get_ppseqno(txnPoolNodeSet)) + assert (cur_ppseqno < _get_ppseqno(txnPoolNodeSet)) _set_ppseqno(txnPoolNodeSet, big_ppseqno) cur_ppseqno = _get_ppseqno(txnPoolNodeSet) @@ -55,7 +55,7 @@ def test_add_node_to_pool_with_large_ppseqno_diff_views(do_view_change, looper, new_steward_name = "testClientSteward" + randomString(4) new_node_name = "TestTheta" + randomString(4) - new_steward, new_steward_wallet, new_node =\ + new_steward, new_steward_wallet, new_node = \ addNewStewardAndNode(looper, steward1, stewardWallet, new_steward_name, new_node_name, tdir, client_tdir, tconf, allPluginsPath) txnPoolNodeSet.append(new_node) diff --git a/plenum/test/primary_selection/test_catchup_after_view_change.py b/plenum/test/primary_selection/test_catchup_after_view_change.py index 78ec458859..33c762f608 100644 --- a/plenum/test/primary_selection/test_catchup_after_view_change.py +++ b/plenum/test/primary_selection/test_catchup_after_view_change.py @@ -10,13 +10,8 @@ checkProtocolInstanceSetup, TestReplica from plenum.test.view_change.helper import ensure_view_change from stp_core.loop.eventually import eventually - -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper, nodeThetaAdded, \ - stewardAndWallet1, steward1, stewardWallet from plenum.test.batching_3pc.conftest import tconf - Max3PCBatchSize = 4 TestRunningTimeLimitSec = 150 diff --git a/plenum/test/primary_selection/test_catchup_multiple_rounds.py b/plenum/test/primary_selection/test_catchup_multiple_rounds.py index db54c0bab1..14c16a7746 100644 --- a/plenum/test/primary_selection/test_catchup_multiple_rounds.py +++ b/plenum/test/primary_selection/test_catchup_multiple_rounds.py @@ -5,9 +5,6 @@ from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies, \ sendRandomRequests, waitForSufficientRepliesForRequests, checkViewNoForNodes from plenum.test.node_catchup.helper import waitNodeDataEquality -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper, nodeThetaAdded, \ - stewardAndWallet1, steward1, stewardWallet from plenum.test.batching_3pc.conftest import tconf from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica, \ @@ -26,8 +23,7 @@ def test_slow_nodes_catchup_before_selecting_primary_in_new_view( txnPoolNodeSet, client1, wallet1, - one_node_added, - client1Connected): + one_node_added): """ Delay 3PC messages to one node and view change messages to some others (including primary) so the node that does not receive enough 3PC messages is @@ -54,7 +50,7 @@ def test_slow_nodes_catchup_before_selecting_primary_in_new_view( def start_count(): return sum([1 for e in slow_node.ledgerManager.spylog.getAll( slow_node.ledgerManager.startCatchUpProcess.__name__) - if e.params['ledgerId'] == DOMAIN_LEDGER_ID]) + if e.params['ledgerId'] == DOMAIN_LEDGER_ID]) s = start_count() requests = sendRandomRequests(wallet1, client1, 10 * Max3PCBatchSize) diff --git a/plenum/test/primary_selection/test_catchup_needed_check.py b/plenum/test/primary_selection/test_catchup_needed_check.py index 2588f48f0c..2a0ba2c164 100644 --- a/plenum/test/primary_selection/test_catchup_needed_check.py +++ b/plenum/test/primary_selection/test_catchup_needed_check.py @@ -13,7 +13,6 @@ # noinspection PyUnresolvedReferences from plenum.test.batching_3pc.conftest import tconf - Max3PCBatchSize = 2 diff --git a/plenum/test/primary_selection/test_new_node_accepts_chosen_primary.py b/plenum/test/primary_selection/test_new_node_accepts_chosen_primary.py index ff67f61d03..de4ec93517 100644 --- a/plenum/test/primary_selection/test_new_node_accepts_chosen_primary.py +++ b/plenum/test/primary_selection/test_new_node_accepts_chosen_primary.py @@ -40,7 +40,7 @@ def testNodeClass(patchPluginManager): @pytest.fixture("module") -def txnPoolNodeSet(txnPoolNodeSet, looper, client1, wallet1, client1Connected, +def txnPoolNodeSet(txnPoolNodeSet, looper, client1, wallet1, tconf, tdirWithPoolTxns, allPluginsPath): logger.debug("Do several view changes to round the list of primaries") diff --git a/plenum/test/primary_selection/test_primary_selection.py b/plenum/test/primary_selection/test_primary_selection.py index 2f0d7e7f9c..433378d276 100644 --- a/plenum/test/primary_selection/test_primary_selection.py +++ b/plenum/test/primary_selection/test_primary_selection.py @@ -22,21 +22,22 @@ @pytest.fixture() -def primaryReplicas(nodeSet, up): +def primaryReplicas(txnPoolNodeSet): instanceCount = getNoInstances(nodeCount) - return [getPrimaryReplica(nodeSet, i) for i in range(instanceCount)] + return [getPrimaryReplica(txnPoolNodeSet, i) for i in range(instanceCount)] # noinspection PyIncorrectDocstring def testPrimarySelectionAfterPoolReady( - looper, nodeSet, ready, wallet1, client1): + looper, txnPoolNodeSet, wallet1, client1): """ Once the pool is ready(node has connected to at least 3 other nodes), appropriate primary replicas should be selected. """ + def checkPrimaryPlacement(): # Node names sorted by rank - sortedNodes = sorted(nodeSet.nodes.values(), + sortedNodes = sorted(txnPoolNodeSet, key=operator.attrgetter("rank")) for idx, node in enumerate(sortedNodes): @@ -64,33 +65,33 @@ def checkPrimaryPlacement(): assert not node.replicas[1].isPrimary assert node.replicas[2].isPrimary - check_rank_consistent_across_each_node(nodeSet) + check_rank_consistent_across_each_node(txnPoolNodeSet) # Check if the primary is on the correct node - timeout = waits.expectedPoolElectionTimeout(len(nodeSet)) + timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) looper.run(eventually(checkPrimaryPlacement, retryWait=1, timeout=timeout)) # Check if every protocol instance has one and only one primary and any node # has no more than one primary - checkProtocolInstanceSetup(looper, nodeSet, retryWait=1) + checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 5) @pytest.fixture(scope='module') -def catchup_complete_count(nodeSet): - return {n.name: n.spylog.count(n.allLedgersCaughtUp) for n in nodeSet} +def catchup_complete_count(txnPoolNodeSet): + return {n.name: n.spylog.count(n.allLedgersCaughtUp) for n in txnPoolNodeSet} + +@pytest.fixture(scope='module') # noqa +def view_change_done(looper, txnPoolNodeSet): + ensure_view_change(looper, txnPoolNodeSet) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) -@pytest.fixture(scope='module') # noqa -def view_change_done(looper, nodeSet): - ensure_view_change(looper, nodeSet) - ensureElectionsDone(looper=looper, nodes=nodeSet) # noinspection PyIncorrectDocstring -def testPrimarySelectionAfterViewChange( # noqa +def testPrimarySelectionAfterViewChange( # noqa looper, - nodeSet, - ready, + txnPoolNodeSet, primaryReplicas, catchup_complete_count, view_change_done): @@ -100,7 +101,7 @@ def testPrimarySelectionAfterViewChange( # noqa """ # TODO: This test can fail due to view change. - for n in nodeSet: + for n in txnPoolNodeSet: assert n.spylog.count( n.allLedgersCaughtUp) > catchup_complete_count[n.name] @@ -109,11 +110,11 @@ def testPrimarySelectionAfterViewChange( # noqa # Primary replicas after view change instanceCount = getNoInstances(nodeCount) - prAfterVC = [getPrimaryReplica(nodeSet, i) for i in range(instanceCount)] + prAfterVC = [getPrimaryReplica(txnPoolNodeSet, i) for i in range(instanceCount)] # Primary replicas have moved to the next node for br, ar in zip(prBeforeVC, prAfterVC): assert ar.node.rank - br.node.rank == 1 - check_rank_consistent_across_each_node(nodeSet) - checkProtocolInstanceSetup(looper, nodeSet, retryWait=1) + check_rank_consistent_across_each_node(txnPoolNodeSet) + checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py index dd2ebdcfa3..3a9db246ef 100644 --- a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_pool_restart.py @@ -1,7 +1,6 @@ from stp_core.common.log import getlogger from plenum.common.constants import ALIAS, SERVICES -from plenum.test.pool_transactions.conftest import looper from plenum.test.pool_transactions.helper import updateNodeData from plenum.test.test_node import TestNode, checkNodesConnected, \ @@ -14,9 +13,11 @@ logger = getlogger() + def test_primary_selection_after_primary_demotion_and_pool_restart(looper, - txnPoolNodeSet, stewardAndWalletForMasterNode, txnPoolMasterNodes, - tdir, tconf): + txnPoolNodeSet, stewardAndWalletForMasterNode, + txnPoolMasterNodes, + tdir, tconf): """ Demote primary and restart the pool. Pool should select new primary and have viewNo=0 after restart. diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_promotion.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_promotion.py index 1f6a547e3b..58af8b3b61 100644 --- a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_promotion.py +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_promotion.py @@ -1,5 +1,4 @@ from plenum.common.constants import ALIAS, SERVICES, VALIDATOR -from plenum.test.pool_transactions.conftest import looper from plenum.test.pool_transactions.helper import updateNodeData from plenum.test.test_node import ensureElectionsDone diff --git a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py index 228af4111f..f846de58e1 100644 --- a/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py +++ b/plenum/test/primary_selection/test_primary_selection_after_primary_demotion_and_view_changes.py @@ -2,7 +2,6 @@ from plenum.common.constants import ALIAS, SERVICES -from plenum.test.pool_transactions.conftest import looper from plenum.test.pool_transactions.helper import updateNodeData from plenum.test.helper import checkViewNoForNodes, \ @@ -12,8 +11,9 @@ logger = getlogger() + def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPoolNodeSet, - stewardAndWalletForMasterNode, txnPoolMasterNodes): + stewardAndWalletForMasterNode, txnPoolMasterNodes): """ Demote primary and do multiple view changes forcing primaries rotation. Demoted primary should be skipped without additional view changes. @@ -32,14 +32,14 @@ def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPo updateNodeData(looper, client, wallet, master_node, node_data) restNodes = [node for node in txnPoolNodeSet \ - if node.name != master_node.name] + if node.name != master_node.name] ensureElectionsDone(looper, restNodes) viewNo1 = checkViewNoForNodes(restNodes) assert viewNo1 == viewNo0 + 1 assert master_node.viewNo == viewNo0 - assert len(restNodes[0].replicas) == 1 # only one instance left + assert len(restNodes[0].replicas) == 1 # only one instance left assert restNodes[0].replicas[0].primaryName != master_node.name # ensure pool is working properly diff --git a/plenum/test/primary_selection/test_primary_selection_pool_txn.py b/plenum/test/primary_selection/test_primary_selection_pool_txn.py index fe7c49632a..a3bc89bbcf 100644 --- a/plenum/test/primary_selection/test_primary_selection_pool_txn.py +++ b/plenum/test/primary_selection/test_primary_selection_pool_txn.py @@ -20,7 +20,7 @@ def check_accepted_view_change_sent(node, nodes): continue if other_node.name in node.view_changer._view_change_done: assert node.view_changer._view_change_done[other_node.name] == \ - node.view_changer._accepted_view_change_done_message + node.view_changer._accepted_view_change_done_message def test_primary_selection_non_genesis_node(one_node_added, looper, @@ -52,6 +52,5 @@ def test_primary_selection_increase_f( # check_accepted_view_change_sent(n, txnPoolNodeSet) ensure_pool_functional(looper, txnPoolNodeSet, stewardWallet, steward1) - # TODO: Add more tests to make one next primary crashed, malicious, ensure primary - # selection happens after catchup +# selection happens after catchup diff --git a/plenum/test/primary_selection/test_primary_selection_routes.py b/plenum/test/primary_selection/test_primary_selection_routes.py index 1c2d348864..2df9723e87 100644 --- a/plenum/test/primary_selection/test_primary_selection_routes.py +++ b/plenum/test/primary_selection/test_primary_selection_routes.py @@ -3,6 +3,6 @@ nodeCount = 7 -def test_routes(nodeSet, up): +def test_routes(txnPoolNodeSet): # TODO: Low priority. pass diff --git a/plenum/test/primary_selection/test_primary_selector.py b/plenum/test/primary_selection/test_primary_selector.py index c6b01ded9d..f1628008b7 100644 --- a/plenum/test/primary_selection/test_primary_selector.py +++ b/plenum/test/primary_selection/test_primary_selector.py @@ -17,7 +17,6 @@ from plenum.test.helper import create_new_test_node from plenum.test.test_node import TestNode - whitelist = ['but majority declared'] @@ -103,6 +102,7 @@ def on_view_change_start(self): def start_catchup(self): pass + def test_has_view_change_quorum_number(tconf, tdir): """ Checks method _hasViewChangeQuorum of SimpleSelector @@ -299,7 +299,7 @@ def test_get_msgs_for_lagged_nodes(tconf, tdir): viewNo=0, name='Node2', ledgerInfo=ledgerInfo), - 'Node1'), + 'Node1'), (ViewChangeDone( viewNo=0, name='Node3', @@ -483,4 +483,4 @@ def test_primaries_selection_gaps(txnPoolNodeSetWithElector): name, instance_name = node.elector.next_primary_replica_name_for_backup( 2, master_primary_rank, primaries) assert name == "Gamma" and \ - instance_name == "Gamma:2" + instance_name == "Gamma:2" diff --git a/plenum/test/primary_selection/test_recover_more_than_f_failure.py b/plenum/test/primary_selection/test_recover_more_than_f_failure.py index 4b28252845..fe1bd3e56a 100644 --- a/plenum/test/primary_selection/test_recover_more_than_f_failure.py +++ b/plenum/test/primary_selection/test_recover_more_than_f_failure.py @@ -2,28 +2,19 @@ from stp_core.common.log import getlogger -from plenum.test.conftest import getValueFromModule from plenum.test.helper import stopNodes, waitForViewChange, \ sendReqsToNodesAndVerifySuffReplies from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.pool_transactions.helper import \ - disconnect_node_and_ensure_disconnected, \ - reconnect_node_and_ensure_connected -from plenum.test.test_node import ensureElectionsDone, ensure_node_disconnected -from plenum.test.view_change.helper import ensure_view_change + disconnect_node_and_ensure_disconnected +from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import start_stopped_node - -# Do not remove these imports -from plenum.test.pool_transactions.conftest import client1, wallet1, client1Connected, looper - - logger = getlogger() def test_recover_stop_primaries(looper, checkpoint_size, txnPoolNodeSet, - allPluginsPath, tdir, tconf, client1, wallet1, - client1Connected): + allPluginsPath, tdir, tconf, client1, wallet1): """ Test that we can recover after having more than f nodes disconnected: - stop current master primary (Alpha) @@ -47,7 +38,7 @@ def test_recover_stop_primaries(looper, checkpoint_size, txnPoolNodeSet, logger.info("send at least one checkpoint") assert nodes_do_not_have_checkpoints(*active_nodes) - sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, numReqs=2*checkpoint_size) + sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, numReqs=2 * checkpoint_size) assert nodes_have_checkpoints(*active_nodes) ensure_all_nodes_have_same_data(looper, nodes=active_nodes) @@ -67,7 +58,7 @@ def test_recover_stop_primaries(looper, checkpoint_size, txnPoolNodeSet, ensure_all_nodes_have_same_data(looper, nodes=active_nodes) logger.info("Check if the pool is able to process requests") - sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, numReqs=10*checkpoint_size) + sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, numReqs=10 * checkpoint_size) ensure_all_nodes_have_same_data(looper, nodes=active_nodes) assert nodes_have_checkpoints(*active_nodes) @@ -83,26 +74,6 @@ def stop_primary(looper, active_nodes): return stopped_node, active_nodes -@pytest.fixture(scope="module") -def checkpoint_size(tconf, request): - oldChkFreq = tconf.CHK_FREQ - oldLogSize = tconf.LOG_SIZE - oldMax3PCBatchSize = tconf.Max3PCBatchSize - - tconf.Max3PCBatchSize = 3 - tconf.CHK_FREQ = getValueFromModule(request, "CHK_FREQ", 2) - tconf.LOG_SIZE = 2*tconf.CHK_FREQ - - def reset(): - tconf.CHK_FREQ = oldChkFreq - tconf.LOG_SIZE = oldLogSize - tconf.Max3PCBatchSize = oldMax3PCBatchSize - - request.addfinalizer(reset) - - return tconf.CHK_FREQ * tconf.Max3PCBatchSize - - def primary_replicas_iter(*nodes): for node in nodes: for replica in node.replicas: diff --git a/plenum/test/primary_selection/test_recover_primary_no_view_change.py b/plenum/test/primary_selection/test_recover_primary_no_view_change.py index 95f4fc9062..19ec82af8d 100644 --- a/plenum/test/primary_selection/test_recover_primary_no_view_change.py +++ b/plenum/test/primary_selection/test_recover_primary_no_view_change.py @@ -3,22 +3,14 @@ from stp_core.common.log import getlogger from plenum.test.conftest import getValueFromModule -from plenum.test.helper import stopNodes, waitForViewChange, \ +from plenum.test.helper import waitForViewChange, \ sendReqsToNodesAndVerifySuffReplies from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.helper import \ - disconnect_node_and_ensure_disconnected, \ - reconnect_node_and_ensure_connected -from plenum.test.test_node import ensureElectionsDone, ensure_node_disconnected -from plenum.test.view_change.helper import ensure_view_change +from plenum.test.test_node import ensureElectionsDone from plenum.test.view_change.helper import start_stopped_node from plenum.test.primary_selection.test_recover_more_than_f_failure import \ - stop_primary, checkpoint_size, primary_replicas_iter, nodes_have_checkpoints, nodes_do_not_have_checkpoints - -# Do not remove these imports -from plenum.test.pool_transactions.conftest import client1, wallet1, client1Connected, looper - + stop_primary, nodes_have_checkpoints, nodes_do_not_have_checkpoints logger = getlogger() @@ -32,8 +24,7 @@ def tconf(tconf): def test_recover_stop_primaries_no_view_change(looper, checkpoint_size, txnPoolNodeSet, - allPluginsPath, tdir, tconf, client1, wallet1, - client1Connected): + allPluginsPath, tdir, tconf, client1, wallet1): """ Test that we can recover after having more than f nodes disconnected: - send txns @@ -48,7 +39,7 @@ def test_recover_stop_primaries_no_view_change(looper, checkpoint_size, txnPoolN logger.info("send at least one checkpoint") assert nodes_do_not_have_checkpoints(*active_nodes) - sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, numReqs=2*checkpoint_size) + sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, numReqs=2 * checkpoint_size) assert nodes_have_checkpoints(*active_nodes) ensure_all_nodes_have_same_data(looper, nodes=active_nodes) @@ -68,6 +59,6 @@ def test_recover_stop_primaries_no_view_change(looper, checkpoint_size, txnPoolN ensure_all_nodes_have_same_data(looper, nodes=active_nodes) logger.info("Check if the pool is able to process requests") - sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, numReqs=10*checkpoint_size) + sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, numReqs=10 * checkpoint_size) ensure_all_nodes_have_same_data(looper, nodes=active_nodes) assert nodes_have_checkpoints(*active_nodes) diff --git a/plenum/test/primary_selection/test_selection_f_plus_one_quorum.py b/plenum/test/primary_selection/test_selection_f_plus_one_quorum.py index 889cdfb1da..abeb361cd9 100644 --- a/plenum/test/primary_selection/test_selection_f_plus_one_quorum.py +++ b/plenum/test/primary_selection/test_selection_f_plus_one_quorum.py @@ -7,10 +7,6 @@ from plenum.test.view_change.helper import start_stopped_node -# Do not remove these imports -from plenum.test.pool_transactions.conftest import client1, wallet1, client1Connected, looper - - def test_selection_f_plus_one_quorum(looper, txnPoolNodeSet, allPluginsPath, tdir, tconf, sdk_pool_handle, sdk_wallet_client): """ diff --git a/plenum/test/propagate/test_propagate_recvd_after_request.py b/plenum/test/propagate/test_propagate_recvd_after_request.py index 870ceb9309..88559c8c2b 100644 --- a/plenum/test/propagate/test_propagate_recvd_after_request.py +++ b/plenum/test/propagate/test_propagate_recvd_after_request.py @@ -13,16 +13,16 @@ @pytest.fixture() -def setup(nodeSet): - A, B, C, D = nodeSet.nodes.values() # type: TestNode +def setup(txnPoolNodeSet): + A, B, C, D = txnPoolNodeSet # type: TestNode delay(Propagate, frm=[B, C, D], to=A, howlong=howlong) # Delay MessageRep by long simulating loss as if Propagate is missing, it # is requested - A.nodeIbStasher.delay(msg_rep_delay(10*howlong, [PROPAGATE, ])) + A.nodeIbStasher.delay(msg_rep_delay(10 * howlong, [PROPAGATE, ])) -def testPropagateRecvdAfterRequest(setup, looper, nodeSet, up, sent1): - A, B, C, D = nodeSet.nodes.values() # type: TestNode +def testPropagateRecvdAfterRequest(setup, looper, txnPoolNodeSet, sent1): + A, B, C, D = txnPoolNodeSet # type: TestNode def x(): # A should have received a request from the client diff --git a/plenum/test/propagate/test_propagate_recvd_before_request.py b/plenum/test/propagate/test_propagate_recvd_before_request.py index bb0d60ff2b..cbdce9037d 100644 --- a/plenum/test/propagate/test_propagate_recvd_before_request.py +++ b/plenum/test/propagate/test_propagate_recvd_before_request.py @@ -8,21 +8,20 @@ sentPropagate, forwardedRequest from plenum.test import waits - nodeCount = 4 howlong = 10 delaySec = 5 @pytest.fixture() -def setup(nodeSet): - A, B, C, D = nodeSet.nodes.values() +def setup(txnPoolNodeSet): + A, B, C, D = txnPoolNodeSet A.clientIbStasher.delay(lambda x: delaySec) delay(Propagate, frm=[C, D], to=A, howlong=howlong) -def testPropagateRecvdBeforeRequest(setup, looper, nodeSet, up, sent1): - A, B, C, D = nodeSet.nodes.values() +def testPropagateRecvdBeforeRequest(setup, looper, txnPoolNodeSet, sent1): + A, B, C, D = txnPoolNodeSet def x(): # A should not have received a request from the client @@ -49,5 +48,5 @@ def chk(): assertLength(forwardedRequest(A), 1) timeout = waits.expectedClientRequestPropagationTime( - len(nodeSet)) + delaySec + len(txnPoolNodeSet)) + delaySec looper.run(eventually(chk, retryWait=1, timeout=timeout)) diff --git a/plenum/test/replica/test_buffers_cleaning.py b/plenum/test/replica/test_buffers_cleaning.py index ef0290a164..940aa8d1dc 100644 --- a/plenum/test/replica/test_buffers_cleaning.py +++ b/plenum/test/replica/test_buffers_cleaning.py @@ -3,7 +3,6 @@ def test_ordered_cleaning(tconf): - global_view_no = 2 node = FakeSomething( @@ -12,7 +11,7 @@ def test_ordered_cleaning(tconf): viewNo=global_view_no, ) bls_bft_replica = FakeSomething( - gc = lambda *args: None, + gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) @@ -34,33 +33,32 @@ def test_ordered_cleaning(tconf): def test_primary_names_cleaning(tconf): - node = FakeSomething( name="fake node", ledger_ids=[0], viewNo=0, ) bls_bft_replica = FakeSomething( - gc = lambda *args: None, + gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica.primaryName = "Node1:0" assert list(replica.primaryNames.items()) == \ - [(0, "Node1:0")] + [(0, "Node1:0")] node.viewNo += 1 replica.primaryName = "Node2:0" assert list(replica.primaryNames.items()) == \ - [(0, "Node1:0"), (1, "Node2:0")] + [(0, "Node1:0"), (1, "Node2:0")] node.viewNo += 1 replica.primaryName = "Node3:0" assert list(replica.primaryNames.items()) == \ - [(1, "Node2:0"), (2, "Node3:0")] + [(1, "Node2:0"), (2, "Node3:0")] node.viewNo += 1 replica.primaryName = "Node4:0" assert list(replica.primaryNames.items()) == \ - [(2, "Node3:0"), (3, "Node4:0")] + [(2, "Node3:0"), (3, "Node4:0")] diff --git a/plenum/test/replica/test_primary_marked_suspicious_for_sending_prepare.py b/plenum/test/replica/test_primary_marked_suspicious_for_sending_prepare.py index 3fd437b419..28f8382bcb 100644 --- a/plenum/test/replica/test_primary_marked_suspicious_for_sending_prepare.py +++ b/plenum/test/replica/test_primary_marked_suspicious_for_sending_prepare.py @@ -1,4 +1,3 @@ - import pytest from plenum.test.delayers import cDelay @@ -14,22 +13,22 @@ @pytest.fixture(scope="module") -def delay_commits(nodeSet): +def delay_commits(txnPoolNodeSet): # Delay COMMITs so that ordering is delayed and checks can be made - for n in nodeSet: + for n in txnPoolNodeSet: n.nodeIbStasher.delay(cDelay(5)) -def testPrimarySendsAPrepareAndMarkedSuspicious(looper, nodeSet, delay_commits, +def testPrimarySendsAPrepareAndMarkedSuspicious(looper, txnPoolNodeSet, delay_commits, preprepared1): def sendPrepareFromPrimary(instId): - primary = getPrimaryReplica(nodeSet, instId) + primary = getPrimaryReplica(txnPoolNodeSet, instId) viewNo, ppSeqNo = next(iter(primary.sentPrePrepares.keys())) ppReq = primary.sentPrePrepares[viewNo, ppSeqNo] primary.doPrepare(ppReq) def chk(): - for r in getNonPrimaryReplicas(nodeSet, instId): + for r in getNonPrimaryReplicas(txnPoolNodeSet, instId): l = len([param for param in getAllArgs(r, r.processPrepare) if param['sender'] == primary.name]) assert l == 1 @@ -38,10 +37,10 @@ def chk(): sendPrepareFromPrimary(0) - for node in nodeSet: - if node in getNonPrimaryReplicas(nodeSet, 0): + for node in txnPoolNodeSet: + if node in getNonPrimaryReplicas(txnPoolNodeSet, 0): frm, reason, code = getAllArgs(node, TestNode.reportSuspiciousNode) - assert frm == getPrimaryReplica(nodeSet, 0).node.name + assert frm == getPrimaryReplica(txnPoolNodeSet, 0).node.name assert isinstance(reason, SuspiciousNode) assert len(getNodeSuspicions(node, Suspicions.PR_FRM_PRIMARY.code)) == 10 diff --git a/plenum/test/replica/test_replica_reject_same_pre_prepare.py b/plenum/test/replica/test_replica_reject_same_pre_prepare.py index 00416973f4..0fb79c6872 100644 --- a/plenum/test/replica/test_replica_reject_same_pre_prepare.py +++ b/plenum/test/replica/test_replica_reject_same_pre_prepare.py @@ -1,4 +1,3 @@ - import pytest from plenum.test.delayers import cDelay @@ -17,12 +16,11 @@ 'cannot process incoming PRE-PREPARE', 'InvalidSignature'] - logger = getlogger() # noinspection PyIncorrectDocstring -def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1): +def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, client1, wallet1): """ Replicas should not accept PRE-PREPARE for view "v" and prepare sequence number "n" if it has already accepted a request with view number "v" and @@ -31,14 +29,14 @@ def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1): """ numOfNodes = 4 fValue = getMaxFailures(numOfNodes) - primaryRepl = getPrimaryReplica(nodeSet, 1) + primaryRepl = getPrimaryReplica(txnPoolNodeSet, 1) logger.debug("Primary Replica: {}".format(primaryRepl)) - nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet, 1) + nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, 1) logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas)) # Delay COMMITs so request is not ordered and checks can be made c_delay = 10 - for node in nodeSet: + for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(delay=c_delay, instId=1)) request1 = sendRandomRequest(wallet1, client1) @@ -71,7 +69,7 @@ def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1): primaryRepl._lastPrePrepareSeqNo -= 1 view_no = primaryRepl.viewNo request2 = sendRandomRequest(wallet1, client1) - timeout = waits.expectedPrePrepareTime(len(nodeSet)) + timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(checkPrePrepareReqSent, primaryRepl, request2, retryWait=1, timeout=timeout)) @@ -96,7 +94,7 @@ def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1): logger.debug("""Checking whether all the non primary replicas have received the pre-prepare request with same sequence number""") - timeout = waits.expectedPrePrepareTime(len(nodeSet)) + timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, @@ -105,7 +103,7 @@ def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1): logger.debug("""Check that none of the non primary replicas didn't send any prepare message " in response to the pre-prepare message""") - timeout = waits.expectedPrepareTime(len(nodeSet)) + timeout = waits.expectedPrepareTime(len(txnPoolNodeSet)) looper.runFor(timeout) # expect prepare processing timeout # check if prepares have not been sent @@ -119,7 +117,7 @@ def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1): retryWait=1, timeout=timeout)) - timeout = waits.expectedTransactionExecutionTime(len(nodeSet)) + c_delay + timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) + c_delay result1 = \ looper.run(eventually(check_sufficient_replies_received, client1, request1.identifier, request1.reqId, diff --git a/plenum/test/restart/test_restart_node_4_all.py b/plenum/test/restart/test_restart_node_4_all.py index 46dbd471e1..6f39800dad 100644 --- a/plenum/test/restart/test_restart_node_4_all.py +++ b/plenum/test/restart/test_restart_node_4_all.py @@ -6,15 +6,11 @@ from plenum.test.test_node import TestNode from plenum.test.restart.test_restart_nodes import get_group, restart_nodes -from plenum.test.pool_transactions.conftest import looper - - nodeCount = 7 def test_restart_groups_np(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 4, include_primary=False) diff --git a/plenum/test/restart/test_restart_nodes.py b/plenum/test/restart/test_restart_nodes.py index 665b502002..8816bb3255 100644 --- a/plenum/test/restart/test_restart_nodes.py +++ b/plenum/test/restart/test_restart_nodes.py @@ -5,8 +5,6 @@ from plenum.common.config_helper import PNodeConfigHelper from plenum.test.test_node import TestNode -from plenum.test.pool_transactions.conftest import looper - def get_group(nodeSet, group_cnt, include_primary=False): if group_cnt >= len(nodeSet): @@ -60,7 +58,6 @@ def restart_nodes(looper, nodeSet, restart_set, tconf, tdir, allPluginsPath, def test_restart_groups(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 4, include_primary=False) diff --git a/plenum/test/restart/test_restart_nodes_4_all_wp.py b/plenum/test/restart/test_restart_nodes_4_all_wp.py index 5c8f743513..8576f90e95 100644 --- a/plenum/test/restart/test_restart_nodes_4_all_wp.py +++ b/plenum/test/restart/test_restart_nodes_4_all_wp.py @@ -6,15 +6,11 @@ from plenum.test.test_node import TestNode from plenum.test.restart.test_restart_nodes import get_group, restart_nodes -from plenum.test.pool_transactions.conftest import looper - - nodeCount = 7 def test_restart_groups_wp(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 4, include_primary=True) diff --git a/plenum/test/restart/test_restart_nodes_4_np.py b/plenum/test/restart/test_restart_nodes_4_np.py index fa7e3ee9ef..b0a6ab713f 100644 --- a/plenum/test/restart/test_restart_nodes_4_np.py +++ b/plenum/test/restart/test_restart_nodes_4_np.py @@ -6,15 +6,11 @@ from plenum.test.test_node import TestNode from plenum.test.restart.test_restart_nodes import get_group, restart_nodes -from plenum.test.pool_transactions.conftest import looper - - nodeCount = 7 def test_restart_groups_wp(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 4, include_primary=True) diff --git a/plenum/test/restart/test_restart_nodes_6.py b/plenum/test/restart/test_restart_nodes_6.py index 5dbee1bce1..dd21658233 100644 --- a/plenum/test/restart/test_restart_nodes_6.py +++ b/plenum/test/restart/test_restart_nodes_6.py @@ -6,15 +6,11 @@ from plenum.test.test_node import TestNode from plenum.test.restart.test_restart_nodes import get_group, restart_nodes -from plenum.test.pool_transactions.conftest import looper - - nodeCount = 7 def test_restart_groups_wp(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 6, include_primary=False) diff --git a/plenum/test/restart/test_restart_nodes_6_all.py b/plenum/test/restart/test_restart_nodes_6_all.py index 1b476a87cb..5496e7c9b4 100644 --- a/plenum/test/restart/test_restart_nodes_6_all.py +++ b/plenum/test/restart/test_restart_nodes_6_all.py @@ -6,15 +6,11 @@ from plenum.test.test_node import TestNode from plenum.test.restart.test_restart_nodes import get_group, restart_nodes -from plenum.test.pool_transactions.conftest import looper - - nodeCount = 7 def test_restart_groups_np(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 6, include_primary=False) diff --git a/plenum/test/restart/test_restart_nodes_6_all_wp.py b/plenum/test/restart/test_restart_nodes_6_all_wp.py index 2631ee3576..5aebce9826 100644 --- a/plenum/test/restart/test_restart_nodes_6_all_wp.py +++ b/plenum/test/restart/test_restart_nodes_6_all_wp.py @@ -6,15 +6,11 @@ from plenum.test.test_node import TestNode from plenum.test.restart.test_restart_nodes import get_group, restart_nodes -from plenum.test.pool_transactions.conftest import looper - - nodeCount = 7 def test_restart_groups_wp(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 6, include_primary=True) diff --git a/plenum/test/restart/test_restart_nodes_6_np.py b/plenum/test/restart/test_restart_nodes_6_np.py index f41ee608c3..873ed9ef1a 100644 --- a/plenum/test/restart/test_restart_nodes_6_np.py +++ b/plenum/test/restart/test_restart_nodes_6_np.py @@ -6,15 +6,11 @@ from plenum.test.test_node import TestNode from plenum.test.restart.test_restart_nodes import get_group, restart_nodes -from plenum.test.pool_transactions.conftest import looper - - nodeCount = 7 def test_restart_groups_wp(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 6, include_primary=True) diff --git a/plenum/test/restart/test_restart_nodes_7.py b/plenum/test/restart/test_restart_nodes_7.py index a5ee975e40..35b9ef35a8 100644 --- a/plenum/test/restart/test_restart_nodes_7.py +++ b/plenum/test/restart/test_restart_nodes_7.py @@ -6,15 +6,11 @@ from plenum.test.test_node import TestNode from plenum.test.restart.test_restart_nodes import get_group, restart_nodes -from plenum.test.pool_transactions.conftest import looper - - nodeCount = 7 def test_restart_groups_wp(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 7, include_primary=False) diff --git a/plenum/test/restart/test_restart_nodes_7_all.py b/plenum/test/restart/test_restart_nodes_7_all.py index f1943fa93b..d11968e92a 100644 --- a/plenum/test/restart/test_restart_nodes_7_all.py +++ b/plenum/test/restart/test_restart_nodes_7_all.py @@ -6,15 +6,11 @@ from plenum.test.test_node import TestNode from plenum.test.restart.test_restart_nodes import get_group, restart_nodes -from plenum.test.pool_transactions.conftest import looper - - nodeCount = 7 def test_restart_groups_wp(looper, txnPoolNodeSet, tconf, tdir, sdk_pool_handle, sdk_wallet_client, allPluginsPath): - tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) restart_group = get_group(txnPoolNodeSet, 7, include_primary=True) diff --git a/plenum/test/script/helper.py b/plenum/test/script/helper.py index ac57a49017..0ef9fb3b7a 100644 --- a/plenum/test/script/helper.py +++ b/plenum/test/script/helper.py @@ -1,4 +1,3 @@ - import pytest from plenum.client.wallet import Wallet @@ -26,7 +25,6 @@ def looper(txnPoolNodesLooper): def changeNodeHa(looper, txnPoolNodeSet, tdirWithClientPoolTxns, poolTxnData, poolTxnStewardNames, tconf, shouldBePrimary, tdir): - # prepare new ha for node and client stack subjectedNode = None stewardName = None @@ -40,7 +38,7 @@ def changeNodeHa(looper, txnPoolNodeSet, tdirWithClientPoolTxns, break nodeStackNewHA, clientStackNewHA = genHa(2) - logger.debug("change HA for node: {} to {}". format( + logger.debug("change HA for node: {} to {}".format( subjectedNode.name, (nodeStackNewHA, clientStackNewHA))) nodeSeed = poolTxnData["seeds"][subjectedNode.name].encode() diff --git a/plenum/test/script/test_change_non_primary_node_ha.py b/plenum/test/script/test_change_non_primary_node_ha.py index 5e8d5687f7..da52b4cdf5 100644 --- a/plenum/test/script/test_change_non_primary_node_ha.py +++ b/plenum/test/script/test_change_non_primary_node_ha.py @@ -4,7 +4,6 @@ from stp_core.common.log import getlogger from plenum.test.script.helper import changeNodeHa - logger = getlogger() whitelist = ['found legacy entry', "doesn't match", 'reconciling nodeReg', @@ -16,7 +15,6 @@ @pytest.mark.skipif('sys.platform == "win32"', reason='SOV-330') def testChangeNodeHaForNonPrimary(looper, txnPoolNodeSet, tdirWithClientPoolTxns, poolTxnData, poolTxnStewardNames, tconf, tdir): - changeNodeHa(looper, txnPoolNodeSet, tdirWithClientPoolTxns, diff --git a/plenum/test/script/test_change_primary_node_ha.py b/plenum/test/script/test_change_primary_node_ha.py index 464120d604..e7289b5e87 100644 --- a/plenum/test/script/test_change_primary_node_ha.py +++ b/plenum/test/script/test_change_primary_node_ha.py @@ -4,7 +4,6 @@ from stp_core.common.log import getlogger from plenum.test.script.helper import changeNodeHa - logger = getlogger() whitelist = ['found legacy entry', "doesn't match", 'reconciling nodeReg', diff --git a/plenum/test/sdk/test_sdk_bindings.py b/plenum/test/sdk/test_sdk_bindings.py index faec4593d7..1145e98fce 100644 --- a/plenum/test/sdk/test_sdk_bindings.py +++ b/plenum/test/sdk/test_sdk_bindings.py @@ -1,5 +1,4 @@ import pytest -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import sdk_send_random_request, \ sdk_send_random_requests, sdk_get_and_check_replies, sdk_send_random_and_check from plenum.test.pool_transactions.helper import sdk_pool_refresh diff --git a/plenum/test/signing/test_signing.py b/plenum/test/signing/test_signing.py index bb8277ab8f..b06814c321 100644 --- a/plenum/test/signing/test_signing.py +++ b/plenum/test/signing/test_signing.py @@ -15,29 +15,30 @@ @pytest.fixture(scope="module") -def setup(nodeSet): - gn = [v for k, v in nodeSet.nodes.items() if k != 'Alpha'] +def setup(txnPoolNodeSet): + pool_without_alpha = list(txnPoolNodeSet) + pool_without_alpha.remove(txnPoolNodeSet[0]) # delay incoming client messages for good nodes by 250 milliseconds # this gives Alpha a chance to send a propagate message - for n in gn: # type: TestNode + for n in pool_without_alpha: # type: TestNode n.clientIbStasher.delay(lambda _: 1) - return adict(goodNodes=gn) + return adict(goodNodes=pool_without_alpha) @pytest.fixture(scope="module") -def evilAlpha(nodeSet): - makeNodeFaulty(nodeSet.Alpha, changesRequest) +def evilAlpha(txnPoolNodeSet): + makeNodeFaulty(txnPoolNodeSet[0], changesRequest) faultyNodes = 1 def testOneNodeAltersAClientRequest(looper, - nodeSet, + txnPoolNodeSet, setup, evilAlpha, sent1): - checkPropagated(looper, nodeSet, sent1, faultyNodes) + checkPropagated(looper, txnPoolNodeSet, sent1, faultyNodes) goodNodes = setup.goodNodes @@ -58,5 +59,5 @@ def check(): for good in goodNodes: assert good.name in props - timeout = waits.expectedClientRequestPropagationTime(len(nodeSet)) + timeout = waits.expectedClientRequestPropagationTime(len(txnPoolNodeSet)) looper.run(eventually(check, retryWait=1, timeout=timeout)) diff --git a/plenum/test/storage/test_client_req_rep_store.py b/plenum/test/storage/test_client_req_rep_store.py index 417e8b593b..3676e9c351 100644 --- a/plenum/test/storage/test_client_req_rep_store.py +++ b/plenum/test/storage/test_client_req_rep_store.py @@ -4,4 +4,4 @@ def testReqAcks(replied1, client1): assert len(client1.nodeReg) == len(client1.reqRepStore.getAcks(identifier, reqId)) assert set(client1.nodeReg.keys()) == \ - set(client1.reqRepStore.getAcks(identifier, reqId)) + set(client1.reqRepStore.getAcks(identifier, reqId)) diff --git a/plenum/test/storage/test_reply_persistence_ledger.py b/plenum/test/storage/test_reply_persistence_ledger.py index a8f3d44e29..9025581b33 100644 --- a/plenum/test/storage/test_reply_persistence_ledger.py +++ b/plenum/test/storage/test_reply_persistence_ledger.py @@ -1,5 +1,5 @@ from plenum.test.storage.helper import checkReplyIsPersisted -def testReplyPersistedInLedger(nodeSet, looper, replied1): - checkReplyIsPersisted(nodeSet, looper, replied1) +def testReplyPersistedInLedger(txnPoolNodeSet, looper, replied1): + checkReplyIsPersisted(txnPoolNodeSet, looper, replied1) diff --git a/plenum/test/test_action_queue.py b/plenum/test/test_action_queue.py index 36ee5b188b..004ae63802 100644 --- a/plenum/test/test_action_queue.py +++ b/plenum/test/test_action_queue.py @@ -5,6 +5,7 @@ from plenum.common.motor import Motor from plenum.server.has_action_queue import HasActionQueue + class Q1(Motor, HasActionQueue): def __init__(self, name): self.name = name @@ -15,7 +16,7 @@ def __init__(self, name): def start(self, loop): pass - async def prod(self, limit: int=None) -> int: + async def prod(self, limit: int = None) -> int: return self._serviceActions() def meth(self, meth_name, x): @@ -23,6 +24,7 @@ def meth(self, meth_name, x): self.results[meth_name] = [] self.results[meth_name].append((x, time.perf_counter())) + def test_action_scheduling(): with Looper() as looper: q1 = Q1('q1') diff --git a/plenum/test/test_bootstrapping.py b/plenum/test/test_bootstrapping.py index 2574be22cb..5f5bd1fb51 100644 --- a/plenum/test/test_bootstrapping.py +++ b/plenum/test/test_bootstrapping.py @@ -9,7 +9,6 @@ logger = getlogger() - whitelist = ['public key from disk', 'verification key from disk', 'doesnt have enough info to connect'] diff --git a/plenum/test/test_client.py b/plenum/test/test_client.py index 2520f3670e..8e8ef5f1d4 100644 --- a/plenum/test/test_client.py +++ b/plenum/test/test_client.py @@ -18,7 +18,6 @@ from plenum.test.testable import spyable from plenum.common.constants import OP_FIELD_NAME - logger = getlogger() client_spyables = [Client.handleOneNodeMsg, @@ -51,8 +50,8 @@ def genTestClient(nodes=None, nodeReg=None, tmpdir=None, testClientClass=TestClient, - identifier: Identifier=None, - verkey: str=None, + identifier: Identifier = None, + verkey: str = None, bootstrapKeys=True, ha=None, usePoolLedger=False, diff --git a/plenum/test/test_connections_with_converted_key.py b/plenum/test/test_connections_with_converted_key.py index f572ec74b7..1766f05e95 100644 --- a/plenum/test/test_connections_with_converted_key.py +++ b/plenum/test/test_connections_with_converted_key.py @@ -3,8 +3,8 @@ from stp_core.crypto.util import ed25519SkToCurve25519, ed25519PkToCurve25519 -def testNodesConnectedUsingConvertedKeys(nodeSet, up): - for node in nodeSet: +def testNodesConnectedUsingConvertedKeys(txnPoolNodeSet): + for node in txnPoolNodeSet: secretKey = ed25519SkToCurve25519(node.nodestack.keyhex) publicKey = ed25519PkToCurve25519(node.nodestack.verhex) assert unhexlify(node.nodestack.prihex) == secretKey @@ -16,7 +16,7 @@ def testNodesConnectedUsingConvertedKeys(nodeSet, up): assert unhexlify(node.clientstack.pubhex) == publicKey -def testClientConnectedUsingConvertedKeys(nodeSet, up, client1, replied1): +def testClientConnectedUsingConvertedKeys(txnPoolNodeSet, client1, replied1): secretKey = ed25519SkToCurve25519(client1.nodestack.keyhex) publicKey = ed25519PkToCurve25519(client1.nodestack.verhex) assert unhexlify(client1.nodestack.prihex) == secretKey diff --git a/plenum/test/test_crypto.py b/plenum/test/test_crypto.py index 47e7266fcc..3f2c2e3a27 100644 --- a/plenum/test/test_crypto.py +++ b/plenum/test/test_crypto.py @@ -142,7 +142,7 @@ def testKeyConversionFromEd25519ToCurve25519(): publicKey = ed25519PkToCurve25519(vk) assert PrivateKey(secretKey).public_key.__bytes__() == publicKey assert ed25519PkToCurve25519(vk, toHex=True) == \ - hexlify(PrivateKey(secretKey).public_key.__bytes__()) + hexlify(PrivateKey(secretKey).public_key.__bytes__()) # Check when keys are passed as hex secretKey = ed25519SkToCurve25519(hexlify(sk)) diff --git a/plenum/test/test_current_state_propagation.py b/plenum/test/test_current_state_propagation.py index b73cea8059..01aa43246a 100644 --- a/plenum/test/test_current_state_propagation.py +++ b/plenum/test/test_current_state_propagation.py @@ -5,9 +5,6 @@ disconnect_node_and_ensure_disconnected, reconnect_node_and_ensure_connected from plenum.test.node_catchup.helper import waitNodeDataEquality, \ waitNodeDataInequality, checkNodeDataForEquality -from plenum.test.pool_transactions.conftest import stewardAndWallet1, \ - steward1, stewardWallet, clientAndWallet1, client1, wallet1, \ - client1Connected from plenum.test.test_node import checkNodesConnected from plenum.test.view_change.helper import start_stopped_node diff --git a/plenum/test/test_delay.py b/plenum/test/test_delay.py index b40b8c379c..cbd219ed30 100644 --- a/plenum/test/test_delay.py +++ b/plenum/test/test_delay.py @@ -7,44 +7,41 @@ from plenum.test import waits from plenum.test.delayers import delayerMsgTuple from plenum.test.helper import sendMessageAndCheckDelivery, addNodeBack, assertExp -from plenum.test.msgs import randomMsg, TestMsg +from plenum.test.msgs import TestMsg from plenum.test.test_node import TestNodeSet, checkNodesConnected, \ ensureElectionsDone, prepareNodeSet logger = getlogger() +nodeCount = 2 + @pytest.mark.skipif('sys.platform == "win32"', reason='SOV-457') -def testTestNodeDelay(tdir_for_func, tconf_for_func): - nodeNames = {"testA", "testB"} - with TestNodeSet(tconf_for_func, names=nodeNames, tmpdir=tdir_for_func) as nodes: - nodeA = nodes.getNode("testA") - nodeB = nodes.getNode("testB") - - with Looper(nodes) as looper: - looper.run(checkNodesConnected(nodes)) - - # send one message, without delay - looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB)) - - # set delay, then send another message - # and find that it doesn't arrive - delay = 5 * waits.expectedNodeToNodeMessageDeliveryTime() - nodeB.nodeIbStasher.delay( - delayerMsgTuple(delay, TestMsg, nodeA.name) - ) - with pytest.raises(AssertionError): - looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB)) - - # but then find that it arrives after the delay - # duration has passed - timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delay - looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB, - customTimeout=timeout)) - - # reset the delay, and find another message comes quickly - nodeB.nodeIbStasher.reset_delays_and_process_delayeds() - looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB)) +def testTestNodeDelay(looper, txnPoolNodeSet): + looper.run(checkNodesConnected(txnPoolNodeSet)) + nodeA = txnPoolNodeSet[0] + nodeB = txnPoolNodeSet[1] + # send one message, without delay + looper.run(sendMessageAndCheckDelivery(nodeA, nodeB)) + + # set delay, then send another message + # and find that it doesn't arrive + delay = 5 * waits.expectedNodeToNodeMessageDeliveryTime() + nodeB.nodeIbStasher.delay( + delayerMsgTuple(delay, TestMsg, nodeA.name) + ) + with pytest.raises(AssertionError): + looper.run(sendMessageAndCheckDelivery(nodeA, nodeB)) + + # but then find that it arrives after the delay + # duration has passed + timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delay + looper.run(sendMessageAndCheckDelivery(nodeA, nodeB, + customTimeout=timeout)) + + # reset the delay, and find another message comes quickly + nodeB.nodeIbStasher.reset_delays_and_process_delayeds() + looper.run(sendMessageAndCheckDelivery(nodeA, nodeB)) @pytest.mark.skip('Nodes use round robin primary selection') @@ -87,4 +84,4 @@ def testSelfNominationDelay(tdir_for_func): looper.run(eventually(lambda: assertExp( len(nodeA.spylog.getAll( Node.decidePrimaries.__name__)) > 0), - retryWait=1, timeout=delay)) + retryWait=1, timeout=delay)) diff --git a/plenum/test/test_dirty_read.py b/plenum/test/test_dirty_read.py index 9b29f3cc52..e386ae0de1 100644 --- a/plenum/test/test_dirty_read.py +++ b/plenum/test/test_dirty_read.py @@ -11,22 +11,24 @@ def make_node_slow(node): old = node.serviceReplicas + async def serviceReplicas(limit): for replica in node.replicas: for index, message in enumerate(list(replica.outBox)): if isinstance(message, Ordered): del replica.outBox[index] return await old(limit) + node.serviceReplicas = serviceReplicas -def test_dirty_read(looper, nodeSet, client1, wallet1): +def test_dirty_read(looper, txnPoolNodeSet, client1, wallet1): """ Tests the case when read request comes before write request is not executed on some nodes """ - slow_nodes = list(nodeSet)[2:4] + slow_nodes = list(txnPoolNodeSet)[2:4] for node in slow_nodes: logger.debug("Making node {} slow".format(node)) make_node_slow(node) @@ -37,7 +39,7 @@ def test_dirty_read(looper, nodeSet, client1, wallet1): numReqs=1)[0] received_replies = getRepliesFromClientInbox(inbox=client1.inBox, - reqId=set_request.reqId) + reqId=set_request.reqId) seq_no = received_replies[0]["result"]["seqNo"] get_request = [wallet1.signOp({ diff --git a/plenum/test/test_memory_consumpion.py b/plenum/test/test_memory_consumpion.py index cba0afd5be..77ba471f9d 100644 --- a/plenum/test/test_memory_consumpion.py +++ b/plenum/test/test_memory_consumpion.py @@ -7,7 +7,6 @@ ensureClientConnectedToNodesAndPoolLedgerSame from plenum.test.pool_transactions.helper import buildPoolClientAndWallet - logger = getlogger() diff --git a/plenum/test/test_node.py b/plenum/test/test_node.py index 9e33c89c64..d91c5bd4e1 100644 --- a/plenum/test/test_node.py +++ b/plenum/test/test_node.py @@ -62,7 +62,7 @@ class TestCoreAuthnr(CoreAuthNr): class TestDomainRequestHandler(DomainRequestHandler): - write_types = DomainRequestHandler.write_types.union({'buy', 'randombuy',}) + write_types = DomainRequestHandler.write_types.union({'buy', 'randombuy', }) query_types = DomainRequestHandler.query_types.union({'get_buy', }) @staticmethod @@ -111,13 +111,13 @@ def __init__(self, *args, **kwargs): # is among the set of suspicion codes mapped to its name. If the set of # suspicion codes is empty then the node would not be blacklisted for # any suspicion code - self.whitelistedNodes = {} # type: Dict[str, Set[int]] + self.whitelistedNodes = {} # type: Dict[str, Set[int]] # Clients that wont be blacklisted by this node if the suspicion code # is among the set of suspicion codes mapped to its name. If the set of # suspicion codes is empty then the client would not be blacklisted for # suspicion code - self.whitelistedClients = {} # type: Dict[str, Set[int]] + self.whitelistedClients = {} # type: Dict[str, Set[int]] # Reinitialize the monitor d, l, o = self.monitor.Delta, self.monitor.Lambda, self.monitor.Omega @@ -235,7 +235,7 @@ def whitelistNode(self, nodeName: str, *codes: int): logger.debug("{} whitelisting {} for codes {}" .format(self, nodeName, codes)) - def blacklistNode(self, nodeName: str, reason: str=None, code: int=None): + def blacklistNode(self, nodeName: str, reason: str = None, code: int = None): if nodeName in self.whitelistedClients: # If node whitelisted for all codes if len(self.whitelistedClients[nodeName]) == 0: @@ -253,7 +253,7 @@ def whitelistClient(self, clientName: str, *codes: int): .format(self, clientName, codes)) def blacklistClient(self, clientName: str, - reason: str=None, code: int=None): + reason: str = None, code: int = None): if clientName in self.whitelistedClients: # If node whitelisted for all codes if len(self.whitelistedClients[clientName]) == 0: @@ -388,6 +388,7 @@ def sendRepliesToClients(self, committedTxns, ppTime): txn[STATE_PROOF] = proof super().sendRepliesToClients(committedTxns, ppTime) + elector_spyables = [ PrimaryElector.discard, PrimaryElector.processPrimary, @@ -423,12 +424,12 @@ class TestPrimarySelector(PrimarySelector): ViewChanger.startViewChange ] + @spyable(methods=view_changer_spyables) class TestViewChanger(ViewChanger): pass - replica_spyables = [ replica.Replica.sendPrePrepare, replica.Replica._can_process_pre_prepare, @@ -473,13 +474,13 @@ class TestNodeSet(ExitStack): def __init__(self, config, - names: Iterable[str]=None, - count: int=None, + names: Iterable[str] = None, + count: int = None, nodeReg=None, tmpdir=None, keyshare=True, primaryDecider=None, - pluginPaths: Iterable[str]=None, + pluginPaths: Iterable[str] = None, testNodeClass=TestNode): super().__init__() @@ -500,8 +501,8 @@ def __init__(self, self.nodeReg = nodeReg else: nodeNames = (names if names is not None and count is None else - genNodeNames(count) if count is not None else - error("only one of either names or count is required")) + genNodeNames(count) if count is not None else + error("only one of either names or count is required")) self.nodeReg = genNodeReg( names=nodeNames) # type: Dict[str, NodeDetail] for name in self.nodeReg.keys(): @@ -713,12 +714,12 @@ async def checkNodesConnected(nodes: Iterable[TestNode], acceptableExceptions=[AssertionError, RemoteNotFound]) -def checkNodeRemotes(node: TestNode, states: Dict[str, RemoteState]=None, +def checkNodeRemotes(node: TestNode, states: Dict[str, RemoteState] = None, state: RemoteState = None): assert states or state, "either state or states is required" assert not ( - states and state), "only one of state or states should be provided, " \ - "but not both" + states and state), "only one of state or states should be provided, " \ + "but not both" for remote in node.nodestack.remotes.values(): try: s = states[remote.name] if states else state @@ -743,8 +744,8 @@ def checkIfSameReplicaIPrimary(looper: Looper, def checkElectionDone(): unknowns = [r for r in replicas if r.primaryName is None] assert len(unknowns) == 0, "election should be complete, " \ - "but {} out of {} ({}) don't know who the primary " \ - "is for protocol instance {}".\ + "but {} out of {} ({}) don't know who the primary " \ + "is for protocol instance {}". \ format(len(unknowns), len(replicas), unknowns, replicas[0].instId) def checkPrisAreOne(): # number of expected primaries @@ -757,6 +758,7 @@ def checkPrisAreSame(): assert len(pris) == 1, "Primary should be same for all, but were {} " \ "for protocol no {}" \ .format(pris, replicas[0].instId) + looper.run( eventuallyAll(checkElectionDone, checkPrisAreOne, checkPrisAreSame, retryWait=retryWait, totalTimeout=timeout)) @@ -767,7 +769,7 @@ def checkNodesAreReady(nodes: Sequence[TestNode]): assert node.isReady(), '{} has status {}'.format(node, node.status) -async def checkNodesParticipating(nodes: Sequence[TestNode], timeout: int=None): +async def checkNodesParticipating(nodes: Sequence[TestNode], timeout: int = None): # TODO is this used? If so - add timeout for it to plenum.test.waits if not timeout: timeout = .75 * len(nodes) @@ -784,7 +786,6 @@ def checkEveryProtocolInstanceHasOnlyOnePrimary(looper: Looper, retryWait: float = None, timeout: float = None, numInstances: int = None): - coro = eventually(instances, nodes, numInstances, retryWait=retryWait, timeout=timeout) insts, timeConsumed = timeThis(looper.run, coro) @@ -818,7 +819,6 @@ def checkProtocolInstanceSetup(looper: Looper, retryWait: float = 1, customTimeout: float = None, numInstances: int = None): - timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes)) checkEveryProtocolInstanceHasOnlyOnePrimary(looper=looper, @@ -890,16 +890,16 @@ def extractCliNodeReg(self): return nodeReg -def prepareNodeSet(looper: Looper, nodeSet: TestNodeSet): +def prepareNodeSet(looper: Looper, txnPoolNodeSet): # TODO: Come up with a more specific name for this # Key sharing party - looper.run(checkNodesConnected(nodeSet)) + looper.run(checkNodesConnected(txnPoolNodeSet)) # Remove all the nodes - for n in list(nodeSet.nodes.keys()): - looper.removeProdable(nodeSet.nodes[n]) - nodeSet.removeNode(n) + for n in list(txnPoolNodeSet): + looper.removeProdable(txnPoolNodeSet) + txnPoolNodeSet.remove(n) def checkViewChangeInitiatedForNode(node: TestNode, proposedViewNo: int): @@ -926,7 +926,7 @@ def timeThis(func, *args, **kwargs): def instances(nodes: Sequence[Node], numInstances: int = None) -> Dict[int, List[replica.Replica]]: numInstances = (getRequiredInstances(len(nodes)) - if numInstances is None else numInstances) + if numInstances is None else numInstances) for n in nodes: assert len(n.replicas) == numInstances return {i: [n.replicas[i] for n in nodes] for i in range(numInstances)} @@ -1020,7 +1020,7 @@ def check_node_disconnected(disconnected: TestNode, def ensure_node_disconnected(looper: Looper, disconnected: TestNode, other_nodes: Iterable[TestNode], - timeout: float=None): + timeout: float = None): timeout = timeout or (len(other_nodes) - 1) looper.run(eventually(check_node_disconnected, disconnected, other_nodes, retryWait=1, timeout=timeout)) diff --git a/plenum/test/test_node_basic.py b/plenum/test/test_node_basic.py index b2abbd2b31..c35317f520 100644 --- a/plenum/test/test_node_basic.py +++ b/plenum/test/test_node_basic.py @@ -13,12 +13,12 @@ @pytest.fixture(scope="module") -def pool(looper, nodeSet): +def pool(looper, txnPoolNodeSet): # for n in nodeSet: # type: TestNode # n.startKeySharing() - looper.run(checkNodesConnected(nodeSet)) - checkProtocolInstanceSetup(looper, nodeSet) - return adict(looper=looper, nodeset=nodeSet) + looper.run(checkNodesConnected(txnPoolNodeSet)) + checkProtocolInstanceSetup(looper, txnPoolNodeSet) + return adict(looper=looper, nodeset=txnPoolNodeSet) def testConnectNodes(pool): @@ -29,14 +29,13 @@ def testAllBroadcast(pool): pool.looper.run(msgAll(pool.nodeset)) -def testMsgSendingTime(pool, nodeReg): - nodeNames = list(nodeReg.keys()) +def testMsgSendingTime(pool): + nodes = pool.nodeset msg = randomMsg() timeout = waits.expectedNodeStartUpTimeout() pool.looper.run( - sendMessageAndCheckDelivery(pool.nodeset, - nodeNames[0], - nodeNames[1], + sendMessageAndCheckDelivery(nodes[0], + nodes[1], msg, customTimeout=timeout)) @@ -63,7 +62,7 @@ def testCorrectNumOfReplicas(pool): for instId in getProtocolInstanceNums(node): # num of replicas for a instance on a node must be 1 assert len([node.replicas[instId]]) == 1 and \ - node.replicas[instId].instId == instId + node.replicas[instId].instId == instId # num of primary on every protocol instance is 1 numberOfPrimary = len([node for node in pool.nodeset if node.replicas[instId].isPrimary]) diff --git a/plenum/test/test_node_request.py b/plenum/test/test_node_request.py index d28b847f4c..75938e78cb 100644 --- a/plenum/test/test_node_request.py +++ b/plenum/test/test_node_request.py @@ -44,6 +44,7 @@ async def chk(): assert result else: assert result is False + timeout = waits.expectedOrderingTime( nodeSet.nodes['Alpha'].instances.count) looper.run(eventually(chk, timeout=timeout)) @@ -133,8 +134,9 @@ async def checkIfPropagateRecvdFromNode(recvrNode: TestNode, assert senderNode.name in recvrNode.requests[key].propagates -def testClientSendingSameRequestAgainBeforeFirstIsProcessed(looper, nodeSet, - up, wallet1, +def testClientSendingSameRequestAgainBeforeFirstIsProcessed(looper, + txnPoolNodeSet, + wallet1, client1): size = len(client1.inBox) req = sendRandomRequest(wallet1, client1) diff --git a/plenum/test/test_performance.py b/plenum/test/test_performance.py index 1051d64116..086b6f04b1 100644 --- a/plenum/test/test_performance.py +++ b/plenum/test/test_performance.py @@ -25,8 +25,7 @@ # noinspection PyUnresolvedReferences from plenum.test.node_catchup.conftest import whitelist, \ nodeCreatedAfterSomeTxns, nodeSetWithNodeAddedAfterSomeTxns, newNodeCaughtUp -from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ - client1, wallet1, client1Connected + @pytest.fixture def logger(): @@ -36,13 +35,13 @@ def logger(): yield logger logger.root.setLevel(old_value) + # autouse and inject before others in all tests pytestmark = pytest.mark.usefixtures("logger") txnCount = 5 TestRunningTimeLimitSec = math.inf - """ Since these tests expect performance to be of certain level, they can fail and for now should only be run when a perf check is required, like after a relevant @@ -92,7 +91,6 @@ def test_node_load_consistent_time(tconf, change_checkpoint_freq, disable_node_monitor_config, looper, txnPoolNodeSet, tdirWithPoolTxns, allPluginsPath, poolTxnStewardData, capsys): - # One of the reason memory grows is because spylog grows client, wallet = buildPoolClientAndWallet(poolTxnStewardData, tdirWithPoolTxns, @@ -173,7 +171,7 @@ def test_node_load_consistent_time(tconf, change_checkpoint_freq, def test_node_load_after_add(newNodeCaughtUp, txnPoolNodeSet, tconf, tdirWithPoolTxns, allPluginsPath, poolTxnStewardData, looper, client1, wallet1, - client1Connected, capsys): + capsys): """ A node that restarts after some transactions should eventually get the transactions which happened while it was down @@ -202,7 +200,6 @@ def test_node_load_after_add(newNodeCaughtUp, txnPoolNodeSet, tconf, @skipper def test_node_load_after_add_then_disconnect(newNodeCaughtUp, txnPoolNodeSet, tconf, looper, client1, wallet1, - client1Connected, tdirWithPoolTxns, allPluginsPath, poolTxnStewardData, capsys): """ @@ -253,7 +250,7 @@ def test_node_load_after_add_then_disconnect(newNodeCaughtUp, txnPoolNodeSet, # Not accurate timeout but a conservative one timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) + \ - 2 * delay_catchup_reply + 2 * delay_catchup_reply waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4], customTimeout=timeout) @@ -274,7 +271,6 @@ def test_nodestack_contexts_are_discrete(txnPoolNodeSet): def test_node_load_after_disconnect(looper, txnPoolNodeSet, tconf, tdirWithPoolTxns, allPluginsPath, poolTxnStewardData, capsys): - client, wallet = buildPoolClientAndWallet(poolTxnStewardData, tdirWithPoolTxns, clientClass=TestClient) @@ -319,7 +315,6 @@ def test_node_load_after_one_node_drops_all_msgs( allPluginsPath, poolTxnStewardData, capsys): - client, wallet = buildPoolClientAndWallet(poolTxnStewardData, tdirWithPoolTxns, clientClass=TestClient) diff --git a/plenum/test/test_req_authenticator.py b/plenum/test/test_req_authenticator.py index dd4d4c8788..ec675b3398 100644 --- a/plenum/test/test_req_authenticator.py +++ b/plenum/test/test_req_authenticator.py @@ -18,7 +18,6 @@ def pre_reqs(): return simple_authnr, core_authnr, req_authnr - @pytest.fixture(scope='module') def registration(pre_reqs): simple_authnr, core_authnr, req_authnr = pre_reqs diff --git a/plenum/test/test_request_executed_once_and_without_failing_behind.py b/plenum/test/test_request_executed_once_and_without_failing_behind.py index 6cb7304567..36c2533f13 100644 --- a/plenum/test/test_request_executed_once_and_without_failing_behind.py +++ b/plenum/test/test_request_executed_once_and_without_failing_behind.py @@ -19,7 +19,8 @@ def send(msg, stat=None): def test_request_executed_once_and_without_failing_behind(tconf, looper, - nodeSet, client1, + txnPoolNodeSet, + client1, wallet1): """ Checks that all requests executed only once and without failing behind in @@ -36,7 +37,7 @@ def test_request_executed_once_and_without_failing_behind(tconf, looper, number_of_requests = 5 tconf.CHK_FREQ = 1 - for node in nodeSet: + for node in txnPoolNodeSet: for replica in node.replicas: set_checkpoint_faking(replica) @@ -50,7 +51,7 @@ def test_request_executed_once_and_without_failing_behind(tconf, looper, requests=[request]) expected = [request.reqId for request in requests] - for node in nodeSet: - real_ledger_state = [txn[1]["reqId"] - for txn in node.getLedger(DOMAIN_LEDGER_ID).getAllTxn()] + for node in txnPoolNodeSet: + real_ledger_state = [txn[1]['reqId'] + for txn in node.getLedger(DOMAIN_LEDGER_ID).getAllTxn() if 'reqId' in txn[1]] assert expected == real_ledger_state diff --git a/plenum/test/test_round_trip_with_one_faulty_node.py b/plenum/test/test_round_trip_with_one_faulty_node.py index 2605402f9f..2022896961 100644 --- a/plenum/test/test_round_trip_with_one_faulty_node.py +++ b/plenum/test/test_round_trip_with_one_faulty_node.py @@ -13,13 +13,12 @@ # noinspection PyIncorrectDocstring @pytest.fixture("module") -def node_doesnt_propagate(startedNodes): +def node_doesnt_propagate(txnPoolNodeSet): """ Makes the node named Alpha in the given set of nodes faulty. After applying this behavior, the node Alpha no longer sends propagate requests. """ - nodes = startedNodes def evilProcessPropagate(self, msg, frm): logger.info("TEST: Evil {} is not processing PROPAGATE".format(self)) @@ -29,7 +28,7 @@ def evilPropagateRequest(self, request, clientName): format(self)) # Choosing a node which will not be primary - node = nodes.Delta + node = txnPoolNodeSet[3] epp = types.MethodType(evilProcessPropagate, node) node.nodeMsgRouter.routes[Propagate] = epp node.processPropagate = epp @@ -49,4 +48,3 @@ def testRequestFullRoundTrip(node_doesnt_propagate, replied1): still be able to successfully complete a full cycle. """ pass - diff --git a/plenum/test/test_stack.py b/plenum/test/test_stack.py index 12f77cba25..da55d4ab13 100644 --- a/plenum/test/test_stack.py +++ b/plenum/test/test_stack.py @@ -14,7 +14,6 @@ logger = getlogger() - BaseStackClass = ZStack @@ -62,7 +61,7 @@ def checkIfConnectedTo(self, count=None): async def ensureConnectedToNodes(self, customTimeout=None, count=None): timeout = customTimeout or \ - waits.expectedClientToPoolConnectionTimeout(len(self.nodeReg)) + waits.expectedClientToPoolConnectionTimeout(len(self.nodeReg)) logger.debug( "waiting for {} seconds to check client connections to " @@ -109,7 +108,7 @@ def getTestableStack(stack: NetworkInterface): JOINED = RemoteState(isConnected=False) -def checkState(state: RemoteState, obj: Any, details: str=None): +def checkState(state: RemoteState, obj: Any, details: str = None): if state is not None: checkedItems = {} for key, s in state._asdict().items(): diff --git a/plenum/test/test_state_regenerated_from_ledger.py b/plenum/test/test_state_regenerated_from_ledger.py index 06f6e145e5..6bb014c49d 100644 --- a/plenum/test/test_state_regenerated_from_ledger.py +++ b/plenum/test/test_state_regenerated_from_ledger.py @@ -5,9 +5,6 @@ from plenum.test.helper import send_reqs_batches_and_get_suff_replies from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data, \ waitNodeDataEquality - -from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ - client1, wallet1, client1Connected from plenum.test.test_node import checkNodesConnected, TestNode from plenum.common.config_helper import PNodeConfigHelper from stp_core.types import HA @@ -20,7 +17,6 @@ def test_state_regenerated_from_ledger( txnPoolNodeSet, client1, wallet1, - client1Connected, tdir, tconf, allPluginsPath): diff --git a/plenum/test/test_testable.py b/plenum/test/test_testable.py index 59ae63ae7a..7093a4f21f 100644 --- a/plenum/test/test_testable.py +++ b/plenum/test/test_testable.py @@ -6,7 +6,6 @@ from plenum.server.node import Node from plenum.test.testable import spyable - pr = slice(3, 5) # params and result logger = getlogger() diff --git a/plenum/test/test_verif_merkle_proof.py b/plenum/test/test_verif_merkle_proof.py deleted file mode 100644 index 37fc85d764..0000000000 --- a/plenum/test/test_verif_merkle_proof.py +++ /dev/null @@ -1,19 +0,0 @@ -import pytest - -from plenum.client.client import Client -from plenum.test.helper import waitForSufficientRepliesForRequests, \ - sendRandomRequest -from plenum.test.test_client import TestClient - - -def testMerkleProofForFirstLeaf(client1: TestClient, replied1): - replies = client1.getRepliesFromAllNodes(*replied1.key).values() - assert Client.verifyMerkleProof(*replies) - - -def testMerkleProofForNonFirstLeaf( - looper, nodeSet, wallet1, client1, replied1): - req2 = sendRandomRequest(wallet1, client1) - waitForSufficientRepliesForRequests(looper, client1, requests=[req2]) - replies = client1.getRepliesFromAllNodes(*req2.key).values() - assert Client.verifyMerkleProof(*replies) diff --git a/plenum/test/testable.py b/plenum/test/testable.py index a4dea73f6b..668520da3f 100644 --- a/plenum/test/testable.py +++ b/plenum/test/testable.py @@ -126,12 +126,12 @@ def decorator(clas): if callable(getattr(clas, method))]: isInit = nm == "__init__" matched = (nm if methods and nm in methods else - func if methods and func in methods else - None) + func if methods and func in methods else + None) # if method was specified to be spied on or is `__init__` method # or is does not have name starting with `__` shouldSpy = bool(matched) if methods else ( - not nm.startswith("__") or isInit) + not nm.startswith("__") or isInit) if shouldSpy or isInit: newFunc = spy(func, isInit, shouldSpy) morphed[func] = newFunc diff --git a/plenum/test/validator_info/test_validator_info.py b/plenum/test/validator_info/test_validator_info.py index 5b9d5301fe..23fb486b67 100644 --- a/plenum/test/validator_info/test_validator_info.py +++ b/plenum/test/validator_info/test_validator_info.py @@ -16,13 +16,11 @@ sendRandomRequest, check_sufficient_replies_received # noinspection PyUnresolvedReferences from plenum.test.node_catchup.helper import ensureClientConnectedToNodesAndPoolLedgerSame -from plenum.test.pool_transactions.conftest import steward1, stewardWallet, client1Connected # noqa from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.test_client import genTestClient from stp_core.common.constants import ZMQ_NETWORK_PROTOCOL from stp_core.loop.eventually import eventually - TEST_NODE_NAME = 'Alpha' INFO_FILENAME = '{}_info.json'.format(TEST_NODE_NAME.lower()) PERIOD_SEC = 1 @@ -242,6 +240,7 @@ def read_wrapped(txn_type): retryWait=1, timeout=timeout)) txnPoolNodesLooper.runFor(patched_dump_info_period) return load_info(info_path) + return read_wrapped @@ -257,6 +256,7 @@ def write_wrapped(): waitForSufficientRepliesForRequests(txnPoolNodesLooper, client, requests=[req]) txnPoolNodesLooper.runFor(patched_dump_info_period) return load_info(info_path) + return write_wrapped @@ -265,6 +265,7 @@ def load_latest_info(txnPoolNodesLooper, patched_dump_info_period, info_path): def wrapped(): txnPoolNodesLooper.runFor(patched_dump_info_period + 1) return load_info(info_path) + return wrapped diff --git a/plenum/test/view_change/conftest.py b/plenum/test/view_change/conftest.py index cc77cdbae5..1680317212 100644 --- a/plenum/test/view_change/conftest.py +++ b/plenum/test/view_change/conftest.py @@ -4,9 +4,9 @@ @pytest.fixture() -def viewNo(nodeSet): +def viewNo(txnPoolNodeSet): viewNos = set() - for n in nodeSet: + for n in txnPoolNodeSet: viewNos.add(n.viewNo) assert len(viewNos) == 1 return viewNos.pop() diff --git a/plenum/test/view_change/helper.py b/plenum/test/view_change/helper.py index 6258da846a..3ea2359158 100644 --- a/plenum/test/view_change/helper.py +++ b/plenum/test/view_change/helper.py @@ -35,7 +35,6 @@ def start_stopped_node(stopped_node, looper, tconf, def provoke_and_check_view_change(nodes, newViewNo, wallet, client): - if {n.viewNo for n in nodes} == {newViewNo}: return True @@ -69,12 +68,12 @@ def provoke_and_wait_for_view_change(looper, timeout=timeout)) -def simulate_slow_master(looper, nodeSet, wallet, +def simulate_slow_master(looper, txnPoolNodeSet, wallet, client, delay=10, num_reqs=4): - m_primary_node = get_master_primary_node(list(nodeSet.nodes.values())) + m_primary_node = get_master_primary_node(list(txnPoolNodeSet)) # Delay processing of PRE-PREPARE from all non primary replicas of master # so master's performance falls and view changes - delayNonPrimaries(nodeSet, 0, delay) + delayNonPrimaries(txnPoolNodeSet, 0, delay) sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, num_reqs) return m_primary_node @@ -124,45 +123,45 @@ def slow_master(self): def ensure_several_view_change(looper, nodes, vc_count=1, exclude_from_check=None, custom_timeout=None): - """ - This method patches the master performance check to return False and thus - ensures that all given nodes do a view change - Also, this method can do several view change. - If you try do several view_change by calling ensure_view_change, - than monkeypatching method isMasterDegraded would work unexpectedly. - Therefore, we return isMasterDegraded only after doing view_change needed count - """ - old_meths = {} - view_changes = {} - expected_view_no = None - for node in nodes: - old_meths[node.name] = node.monitor.isMasterDegraded - - for __ in range(vc_count): - old_view_no = checkViewNoForNodes(nodes) - expected_view_no = old_view_no + 1 - - for node in nodes: - view_changes[node.name] = node.monitor.totalViewChanges - - def slow_master(self): - # Only allow one view change - rv = self.totalViewChanges == view_changes[self.name] - if rv: - logger.info('{} making master look slow'.format(self)) - return rv - - node.monitor.isMasterDegraded = types.MethodType(slow_master, node.monitor) - - perf_check_freq = next(iter(nodes)).config.PerfCheckFreq - timeout = custom_timeout or waits.expectedPoolViewChangeStartedTimeout(len(nodes)) + perf_check_freq - nodes_to_check = nodes if exclude_from_check is None else [n for n in nodes if n not in exclude_from_check] - logger.debug('Checking view no for nodes {}'.format(nodes_to_check)) - looper.run(eventually(checkViewNoForNodes, nodes_to_check, expected_view_no, retryWait=1, timeout=timeout)) - ensureElectionsDone(looper=looper, nodes=nodes, customTimeout=timeout) - ensure_all_nodes_have_same_data(looper, nodes, custom_timeout=timeout, exclude_from_check=exclude_from_check) - - return expected_view_no + """ + This method patches the master performance check to return False and thus + ensures that all given nodes do a view change + Also, this method can do several view change. + If you try do several view_change by calling ensure_view_change, + than monkeypatching method isMasterDegraded would work unexpectedly. + Therefore, we return isMasterDegraded only after doing view_change needed count + """ + old_meths = {} + view_changes = {} + expected_view_no = None + for node in nodes: + old_meths[node.name] = node.monitor.isMasterDegraded + + for __ in range(vc_count): + old_view_no = checkViewNoForNodes(nodes) + expected_view_no = old_view_no + 1 + + for node in nodes: + view_changes[node.name] = node.monitor.totalViewChanges + + def slow_master(self): + # Only allow one view change + rv = self.totalViewChanges == view_changes[self.name] + if rv: + logger.info('{} making master look slow'.format(self)) + return rv + + node.monitor.isMasterDegraded = types.MethodType(slow_master, node.monitor) + + perf_check_freq = next(iter(nodes)).config.PerfCheckFreq + timeout = custom_timeout or waits.expectedPoolViewChangeStartedTimeout(len(nodes)) + perf_check_freq + nodes_to_check = nodes if exclude_from_check is None else [n for n in nodes if n not in exclude_from_check] + logger.debug('Checking view no for nodes {}'.format(nodes_to_check)) + looper.run(eventually(checkViewNoForNodes, nodes_to_check, expected_view_no, retryWait=1, timeout=timeout)) + ensureElectionsDone(looper=looper, nodes=nodes, customTimeout=timeout) + ensure_all_nodes_have_same_data(looper, nodes, custom_timeout=timeout, exclude_from_check=exclude_from_check) + + return expected_view_no def ensure_view_change_by_primary_restart( @@ -212,7 +211,7 @@ def check_each_node_reaches_same_end_for_view(nodes, view_no): for node in nodes: params = [e.params for e in node.replicas[0].spylog.getAll( node.replicas[0].primary_changed.__name__) - if e.params['view_no'] == view_no] + if e.params['view_no'] == view_no] assert params args[node.name] = (params[0]['last_ordered_pp_seq_no'], params[0]['ledger_summary']) @@ -279,12 +278,12 @@ def ensure_view_change_complete_by_primary_restart( return nodes -def view_change_in_between_3pc(looper, nodes, slow_nodes, wallet, client, +def view_change_in_between_3pc(looper, nodes, slow_nodes, wallet, client1, slow_delay=1, wait=None): - send_reqs_to_nodes_and_verify_all_replies(looper, wallet, client, 4) + send_reqs_to_nodes_and_verify_all_replies(looper, wallet, client1, 4) delay_3pc_messages(slow_nodes, 0, delay=slow_delay) - sendRandomRequests(wallet, client, 10) + sendRandomRequests(wallet, client1, 10) if wait: looper.runFor(wait) @@ -293,27 +292,27 @@ def view_change_in_between_3pc(looper, nodes, slow_nodes, wallet, client, reset_delays_and_process_delayeds(slow_nodes) sendReqsToNodesAndVerifySuffReplies( - looper, wallet, client, 5, total_timeout=30) + looper, wallet, client1, 5, total_timeout=30) send_reqs_to_nodes_and_verify_all_replies( - looper, wallet, client, 5, total_timeout=30) + looper, wallet, client1, 5, total_timeout=30) def view_change_in_between_3pc_random_delays( looper, nodes, slow_nodes, - wallet, - client, + wallet1, + client1, tconf, min_delay=0, max_delay=0): - send_reqs_to_nodes_and_verify_all_replies(looper, wallet, client, 4) + send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client1, 4) # max delay should not be more than catchup timeout. max_delay = max_delay or tconf.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE - 1 delay_3pc_messages(slow_nodes, 0, min_delay=min_delay, max_delay=max_delay) - sendRandomRequests(wallet, client, 10) + sendRandomRequests(wallet1, client1, 10) ensure_view_change_complete(looper, nodes, @@ -322,4 +321,4 @@ def view_change_in_between_3pc_random_delays( reset_delays_and_process_delayeds(slow_nodes) - send_reqs_to_nodes_and_verify_all_replies(looper, wallet, client, 10) + send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client1, 10) diff --git a/plenum/test/view_change/slow_nodes/conftest.py b/plenum/test/view_change/slow_nodes/conftest.py deleted file mode 100644 index c3a45be43c..0000000000 --- a/plenum/test/view_change/slow_nodes/conftest.py +++ /dev/null @@ -1,6 +0,0 @@ -import pytest - - -@pytest.fixture(scope="module") -def client(looper, txnPoolNodeSet, client1, client1Connected): - return client1Connected diff --git a/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_new_primary.py b/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_new_primary.py index 395648cb98..394f076e4a 100644 --- a/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_new_primary.py +++ b/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_new_primary.py @@ -1,7 +1,5 @@ from plenum.test.test_node import get_last_master_non_primary_node, get_first_master_non_primary_node from plenum.test.view_change.helper import view_change_in_between_3pc -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper def slow_nodes(node_set): @@ -10,7 +8,7 @@ def slow_nodes(node_set): def test_view_change_in_between_3pc_2_of_4_nodes_with_new_primary( - txnPoolNodeSet, looper, wallet1, client): + txnPoolNodeSet, looper, wallet1, client1): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow the the first and the last non-primary node @@ -20,11 +18,11 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_new_primary( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - wallet1, client) + wallet1, client1) def test_view_change_in_between_3pc_2_of_4_nodes_with_new_primary_long_delay( - txnPoolNodeSet, looper, wallet1, client): + txnPoolNodeSet, looper, wallet1, client1): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow the the first and the last non-primary node @@ -34,5 +32,5 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_new_primary_long_delay( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - wallet1, client, + wallet1, client1, slow_delay=20) diff --git a/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_non_primary.py b/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_non_primary.py index 8241dcc43a..bf765107d8 100644 --- a/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_non_primary.py +++ b/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_non_primary.py @@ -1,7 +1,5 @@ from plenum.test.test_node import getNonPrimaryReplicas from plenum.test.view_change.helper import view_change_in_between_3pc -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper def slow_nodes(node_set): @@ -9,7 +7,7 @@ def slow_nodes(node_set): def test_view_change_in_between_3pc_2_of_4_nodes_with_non_primary( - txnPoolNodeSet, looper, wallet1, client): + txnPoolNodeSet, looper, wallet1, client1): """ - Slow processing 3PC messages for 2 of 4 node (2>f). - Both nodes are non-primary for master neither in this nor the next view @@ -17,11 +15,11 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_non_primary( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - wallet1, client) + wallet1, client1) def test_view_change_in_between_3pc_2_of_4_nodes_with_non_primary_long_delay( - txnPoolNodeSet, looper, wallet1, client): + txnPoolNodeSet, looper, wallet1, client1): """ - Slow processing 3PC messages for 2 of 4 node (2>f). - Both nodes are non-primary for master neither in this nor the next view @@ -29,5 +27,5 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_non_primary_long_delay( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - wallet1, client, + wallet1, client1, slow_delay=20) diff --git a/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_old_and_new_primary.py b/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_old_and_new_primary.py index f802a78201..e6832d10e2 100644 --- a/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_old_and_new_primary.py +++ b/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_old_and_new_primary.py @@ -1,7 +1,5 @@ from plenum.test.test_node import get_master_primary_node, get_first_master_non_primary_node from plenum.test.view_change.helper import view_change_in_between_3pc -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper def slow_nodes(node_set): @@ -10,7 +8,7 @@ def slow_nodes(node_set): def test_view_change_in_between_3pc_2_of_4_nodes_with_old_and_new_primary( - txnPoolNodeSet, looper, wallet1, client): + txnPoolNodeSet, looper, wallet1, client1): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow both current and next primaries @@ -18,11 +16,11 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_old_and_new_primary( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - wallet1, client) + wallet1, client1) def test_view_change_in_between_3pc_2_of_4_nodes_with_old_and_new_primary_long_delay( - txnPoolNodeSet, looper, wallet1, client): + txnPoolNodeSet, looper, wallet1, client1): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow both current and next primaries @@ -30,5 +28,5 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_old_and_new_primary_long_d """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - wallet1, client, + wallet1, client1, slow_delay=20) diff --git a/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_old_primary.py b/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_old_primary.py index 7f3e5e2ef0..4fdb869599 100644 --- a/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_old_primary.py +++ b/plenum/test/view_change/slow_nodes/test_view_change_2_of_4_nodes_with_old_primary.py @@ -1,7 +1,5 @@ from plenum.test.test_node import get_master_primary_node, get_last_master_non_primary_node from plenum.test.view_change.helper import view_change_in_between_3pc -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper def slow_nodes(node_set): @@ -10,7 +8,7 @@ def slow_nodes(node_set): def test_view_change_in_between_3pc_2_of_4_nodes_with_old_primary( - txnPoolNodeSet, looper, wallet1, client): + txnPoolNodeSet, looper, wallet1, client1): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow the current Primary node and the last non-primary node (it will not @@ -19,11 +17,11 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_old_primary( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - wallet1, client) + wallet1, client1) def test_view_change_in_between_3pc_2_of_4_nodes_with_old_primary_long_delay( - txnPoolNodeSet, looper, wallet1, client): + txnPoolNodeSet, looper, wallet1, client1): """ - Slow processing 3PC messages for 2 of 4 node (2>f) - Slow the current Primary node and the last non-primary node (it will not @@ -32,5 +30,5 @@ def test_view_change_in_between_3pc_2_of_4_nodes_with_old_primary_long_delay( """ view_change_in_between_3pc(looper, txnPoolNodeSet, slow_nodes(txnPoolNodeSet), - wallet1, client, + wallet1, client1, slow_delay=20) diff --git a/plenum/test/view_change/slow_nodes/test_view_change_all_nodes.py b/plenum/test/view_change/slow_nodes/test_view_change_all_nodes.py index 91ec9bbb07..0cbc010156 100644 --- a/plenum/test/view_change/slow_nodes/test_view_change_all_nodes.py +++ b/plenum/test/view_change/slow_nodes/test_view_change_all_nodes.py @@ -1,25 +1,23 @@ from plenum.test.view_change.helper import view_change_in_between_3pc -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper def test_view_change_in_between_3pc_all_nodes(txnPoolNodeSet, looper, - wallet1, client): + wallet1, client1): """ - Slow processing 3PC messages for all nodes - do view change """ view_change_in_between_3pc(looper, txnPoolNodeSet, txnPoolNodeSet, wallet1, - client) + client1) def test_view_change_in_between_3pc_all_nodes_long_delay( - txnPoolNodeSet, looper, wallet1, client): + txnPoolNodeSet, looper, wallet1, client1): """ - Slow processing 3PC messages for all nodes - do view change """ view_change_in_between_3pc(looper, txnPoolNodeSet, txnPoolNodeSet, - wallet1, client, + wallet1, client1, slow_delay=20) diff --git a/plenum/test/view_change/slow_nodes/test_view_change_all_nodes_random_delay.py b/plenum/test/view_change/slow_nodes/test_view_change_all_nodes_random_delay.py index 5e44d1e22d..153fe5fea7 100644 --- a/plenum/test/view_change/slow_nodes/test_view_change_all_nodes_random_delay.py +++ b/plenum/test/view_change/slow_nodes/test_view_change_all_nodes_random_delay.py @@ -1,29 +1,26 @@ from plenum.test.view_change.helper import view_change_in_between_3pc_random_delays -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper - TestRunningTimeLimitSec = 300 def test_view_change_in_between_3pc_all_nodes_random_delays( - txnPoolNodeSet, tconf, looper, wallet1, client): + txnPoolNodeSet, tconf, looper, wallet1, client1): """ - Slow processing 3PC messages for all nodes randomly - do view change """ view_change_in_between_3pc_random_delays(looper, txnPoolNodeSet, txnPoolNodeSet, - wallet1, client, tconf) + wallet1, client1, tconf) def test_view_change_in_between_3pc_all_nodes_random_delays_long_delay( - txnPoolNodeSet, looper, wallet1, client, tconf): + txnPoolNodeSet, looper, wallet1, client1, tconf): """ - Slow processing 3PC messages for all nodes randomly - do view change """ view_change_in_between_3pc_random_delays(looper, txnPoolNodeSet, txnPoolNodeSet, - wallet1, client, tconf, + wallet1, client1, tconf, min_delay=5) diff --git a/plenum/test/view_change/slow_nodes/test_view_change_complex.py b/plenum/test/view_change/slow_nodes/test_view_change_complex.py index f2a8e1d2a1..ddfc8bb274 100644 --- a/plenum/test/view_change/slow_nodes/test_view_change_complex.py +++ b/plenum/test/view_change/slow_nodes/test_view_change_complex.py @@ -1,14 +1,11 @@ from plenum.test.view_change.helper import \ view_change_in_between_3pc_random_delays -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper - TestRunningTimeLimitSec = 300 def test_view_change_complex( - txnPoolNodeSet, looper, wallet1, client, tconf): + txnPoolNodeSet, looper, wallet1, client1, tconf): """ - Complex scenario with multiple view changes """ @@ -39,7 +36,7 @@ def test_view_change_complex( txnPoolNodeSet, txnPoolNodeSet, wallet1, - client, + client1, tconf, min_delay=0, max_delay=10) @@ -48,7 +45,7 @@ def test_view_change_complex( txnPoolNodeSet, txnPoolNodeSet, wallet1, - client, + client1, tconf, min_delay=1, max_delay=5) @@ -57,6 +54,6 @@ def test_view_change_complex( txnPoolNodeSet, txnPoolNodeSet, wallet1, - client, + client1, tconf, min_delay=5) diff --git a/plenum/test/view_change/slow_nodes/test_view_change_gc_all_nodes_random_delay.py b/plenum/test/view_change/slow_nodes/test_view_change_gc_all_nodes_random_delay.py index 52f6e92d66..2366130d83 100644 --- a/plenum/test/view_change/slow_nodes/test_view_change_gc_all_nodes_random_delay.py +++ b/plenum/test/view_change/slow_nodes/test_view_change_gc_all_nodes_random_delay.py @@ -11,8 +11,6 @@ from plenum.test.delayers import delay_3pc_messages, \ reset_delays_and_process_delayeds from plenum.test.view_change.helper import ensure_view_change_complete -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper def check_nodes_last_ordered_3pc(nodes, last_ordered_3pc): @@ -28,14 +26,14 @@ def check_nodes_requests_size(nodes, size): def test_view_change_gc_in_between_3pc_all_nodes_delays( - looper, txnPoolNodeSet, wallet1, client): + looper, txnPoolNodeSet, wallet1, client1): """ Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo) and does not remove messages from node's queues that have higher viewNo than last ordered one even if their ppSeqNo are less or equal """ - numNodes = len(client.nodeReg) + numNodes = len(client1.nodeReg) viewNo = checkViewNoForNodes(txnPoolNodeSet) # 1 send two messages one by one separately to make @@ -45,8 +43,8 @@ def test_view_change_gc_in_between_3pc_all_nodes_delays( # for master instances only cause non-master ones have # specific logic of its management which we don't care in # the test, see Replica::_setup_for_non_master) - send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client, 1) - send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client, 1) + send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client1, 1) + send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client1, 1) last_ordered_3pc = (viewNo, 2) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) @@ -74,7 +72,7 @@ def test_view_change_gc_in_between_3pc_all_nodes_delays( delay_3pc_messages(txnPoolNodeSet, 1, delay=propagationTimeout * 2) - requests = sendRandomRequests(wallet1, client, 1) + requests = sendRandomRequests(wallet1, client1, 1) def checkPrePrepareSentAtLeastByPrimary(): for node in txnPoolNodeSet: @@ -104,7 +102,7 @@ def checkPrePrepareSentAtLeastByPrimary(): # -> they should be ordered # -> last_ordered_3pc = (+2, 1) reset_delays_and_process_delayeds(txnPoolNodeSet) - waitForSufficientRepliesForRequests(looper, client, + waitForSufficientRepliesForRequests(looper, client1, requests=requests) checkViewNoForNodes(txnPoolNodeSet, viewNo) diff --git a/plenum/test/view_change/test_3pc_msgs_during_view_change.py b/plenum/test/view_change/test_3pc_msgs_during_view_change.py index 7c9b2520a4..933b81105a 100644 --- a/plenum/test/view_change/test_3pc_msgs_during_view_change.py +++ b/plenum/test/view_change/test_3pc_msgs_during_view_change.py @@ -9,47 +9,47 @@ @pytest.mark.skip('Currently we stash client requests during view change') -def test_no_requests_processed_during_view_change(looper, nodeSet, +def test_no_requests_processed_during_view_change(looper, txnPoolNodeSet, client1, wallet1): - for node in nodeSet: + for node in txnPoolNodeSet: node.view_change_in_progress = True sendRandomRequests(wallet1, client1, 10) waitRejectFromPoolWithReason( looper, - nodeSet, + txnPoolNodeSet, client1, 'Can not process requests when view change is in progress') - for node in nodeSet: + for node in txnPoolNodeSet: check_replica_queue_empty(node) @pytest.mark.skip('The filter is not enabled now') def test_no_new_view_3pc_messages_processed_during_view_change( - looper, nodeSet, client1, wallet1): - for node in nodeSet: + looper, txnPoolNodeSet, client1, wallet1): + for node in txnPoolNodeSet: node.view_change_in_progress = True - new_view_no = getPrimaryReplica(nodeSet).node.viewNo + 1 + new_view_no = getPrimaryReplica(txnPoolNodeSet).node.viewNo + 1 pp_seq_no = 1 - send_pre_prepare(new_view_no, pp_seq_no, wallet1, nodeSet) + send_pre_prepare(new_view_no, pp_seq_no, wallet1, txnPoolNodeSet) looper.runFor(1) - check_all_replica_queue_empty(nodeSet) + check_all_replica_queue_empty(txnPoolNodeSet) - send_prepare(new_view_no, pp_seq_no, nodeSet) + send_prepare(new_view_no, pp_seq_no, txnPoolNodeSet) looper.runFor(1) - check_all_replica_queue_empty(nodeSet) + check_all_replica_queue_empty(txnPoolNodeSet) - send_commit(new_view_no, pp_seq_no, nodeSet) + send_commit(new_view_no, pp_seq_no, txnPoolNodeSet) looper.runFor(1) - check_all_replica_queue_empty(nodeSet) + check_all_replica_queue_empty(txnPoolNodeSet) @pytest.mark.skip('The filter is not enabled now') -def test_old_view_requests_processed_during_view_change(looper, nodeSet, +def test_old_view_requests_processed_during_view_change(looper, txnPoolNodeSet, client1, wallet1): """ Make sure that requests sent before view change started are processed and replies are returned: @@ -57,14 +57,14 @@ def test_old_view_requests_processed_during_view_change(looper, nodeSet, - send requests - check that requests are ordered despite of view change being in progress """ - for node in nodeSet: + for node in txnPoolNodeSet: node.view_change_in_progress = False node.nodeIbStasher.delay(ppgDelay(3, 0)) reqs = sendRandomRequests(wallet1, client1, 2) looper.runFor(1) - for node in nodeSet: + for node in txnPoolNodeSet: node.view_change_in_progress = True waitForSufficientRepliesForRequests(looper, client1, requests=reqs) diff --git a/plenum/test/view_change/test_6th_node_join_after_view_change_by_primary_restart.py b/plenum/test/view_change/test_6th_node_join_after_view_change_by_primary_restart.py index b21143670b..d81167e21e 100644 --- a/plenum/test/view_change/test_6th_node_join_after_view_change_by_primary_restart.py +++ b/plenum/test/view_change/test_6th_node_join_after_view_change_by_primary_restart.py @@ -6,10 +6,6 @@ from plenum.common.constants import DOMAIN_LEDGER_ID, LedgerState, POOL_LEDGER_ID from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies -from plenum.test.pool_transactions.conftest import wallet1, client1,\ -client1Connected, looper, stewardAndWallet1, steward1, \ - stewardWallet - from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually from plenum.test.node_catchup.helper import check_ledger_state, \ @@ -20,7 +16,6 @@ from plenum.test import waits from plenum.common.startable import Mode - logger = getlogger() @@ -47,7 +42,7 @@ def catchuped(node): def add_new_node(looper, nodes, steward, steward_wallet, - tdir, client_tdir, tconf, all_plugins_path, name=None): + tdir, client_tdir, tconf, all_plugins_path, name=None): node_name = name or "Psi" new_steward_name = "testClientSteward" + randomString(3) new_steward, new_steward_wallet, new_node = addNewStewardAndNode(looper, @@ -68,9 +63,9 @@ def add_new_node(looper, nodes, steward, steward_wallet, def test_6th_node_join_after_view_change_by_master_restart( - looper, txnPoolNodeSet, tdir, tconf, - allPluginsPath, steward1, stewardWallet, - client_tdir, limitTestRunningTime): + looper, txnPoolNodeSet, tdir, tconf, + allPluginsPath, steward1, stewardWallet, + client_tdir, limitTestRunningTime): """ Test steps: 1. start pool of 4 nodes @@ -84,11 +79,11 @@ def test_6th_node_join_after_view_change_by_master_restart( pool_of_nodes = txnPoolNodeSet for __ in range(4): pool_of_nodes = ensure_view_change_by_primary_restart(looper, - pool_of_nodes, - tconf, - tdir, - allPluginsPath, - customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT) + pool_of_nodes, + tconf, + tdir, + allPluginsPath, + customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT) timeout = waits.expectedPoolCatchupTime(nodeCount=len(pool_of_nodes)) for node in pool_of_nodes: looper.run(eventually(catchuped, node, timeout=2 * timeout)) @@ -116,11 +111,11 @@ def test_6th_node_join_after_view_change_by_master_restart( LedgerState.synced, retryWait=.5, timeout=timeout)) for __ in range(4): pool_of_nodes = ensure_view_change_by_primary_restart(looper, - pool_of_nodes, - tconf, - tdir, - allPluginsPath, - customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT) + pool_of_nodes, + tconf, + tdir, + allPluginsPath, + customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT) timeout = waits.expectedPoolCatchupTime(nodeCount=len(pool_of_nodes)) for node in pool_of_nodes: @@ -139,5 +134,3 @@ def test_6th_node_join_after_view_change_by_master_restart( LedgerState.synced, retryWait=.5, timeout=5)) looper.run(eventually(check_ledger_state, new_psi_node, POOL_LEDGER_ID, LedgerState.synced, retryWait=.5, timeout=5)) - - diff --git a/plenum/test/view_change/test_diconnected_node_reconnects_after_view_change.py b/plenum/test/view_change/test_diconnected_node_reconnects_after_view_change.py index 7e3c8929aa..bff25f3762 100644 --- a/plenum/test/view_change/test_diconnected_node_reconnects_after_view_change.py +++ b/plenum/test/view_change/test_diconnected_node_reconnects_after_view_change.py @@ -2,7 +2,6 @@ from plenum.test.helper import checkViewNoForNodes, waitForViewChange, \ sdk_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.conftest import looper from plenum.test.pool_transactions.helper import \ disconnect_node_and_ensure_disconnected, \ reconnect_node_and_ensure_connected diff --git a/plenum/test/view_change/test_disable_view_change.py b/plenum/test/view_change/test_disable_view_change.py index ace32874a1..1fc79173c9 100644 --- a/plenum/test/view_change/test_disable_view_change.py +++ b/plenum/test/view_change/test_disable_view_change.py @@ -13,8 +13,7 @@ def disable_view_change_config(tconf): def test_disable_view_change( disable_view_change_config, looper, - nodeSet, - up, + txnPoolNodeSet, viewNo, wallet1, client1): @@ -22,7 +21,7 @@ def test_disable_view_change( assert isinstance(disable_view_change_config.unsafe, set) assert 'disable_view_change' in disable_view_change_config.unsafe - simulate_slow_master(looper, nodeSet, wallet1, client1) + simulate_slow_master(looper, txnPoolNodeSet, wallet1, client1) with pytest.raises(AssertionError): - waitForViewChange(looper, nodeSet, expectedViewNo=viewNo + 1) + waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=viewNo + 1) diff --git a/plenum/test/view_change/test_discard_inst_chng_msg_from_past_view.py b/plenum/test/view_change/test_discard_inst_chng_msg_from_past_view.py index 994df2e444..9c18399952 100644 --- a/plenum/test/view_change/test_discard_inst_chng_msg_from_past_view.py +++ b/plenum/test/view_change/test_discard_inst_chng_msg_from_past_view.py @@ -5,7 +5,7 @@ # noinspection PyIncorrectDocstring -def testDiscardInstChngMsgFrmPastView(nodeSet, looper, ensureView): +def testDiscardInstChngMsgFrmPastView(txnPoolNodeSet, looper, ensureView): """ Once a view change is done, any further INSTANCE_CHANGE messages for that view must be discarded by the node. @@ -14,23 +14,23 @@ def testDiscardInstChngMsgFrmPastView(nodeSet, looper, ensureView): curViewNo = ensureView # Send an instance change for an old instance message to all nodes - icMsg = nodeSet.Alpha.view_changer._create_instance_change_msg(curViewNo, 0) - nodeSet.Alpha.send(icMsg) + icMsg = txnPoolNodeSet[0].view_changer._create_instance_change_msg(curViewNo, 0) + txnPoolNodeSet[0].send(icMsg) # ensure every node but Alpha discards the invalid instance change request - timeout = waits.expectedPoolViewChangeStartedTimeout(len(nodeSet)) + timeout = waits.expectedPoolViewChangeStartedTimeout(len(txnPoolNodeSet)) # Check that that message is discarded. - looper.run(eventually(checkDiscardMsg, nodeSet, icMsg, + looper.run(eventually(checkDiscardMsg, txnPoolNodeSet, icMsg, 'which is not more than its view no', - nodeSet.Alpha, timeout=timeout)) + txnPoolNodeSet[0], timeout=timeout)) - waitForViewChange(looper, nodeSet) + waitForViewChange(looper, txnPoolNodeSet) # noinspection PyIncorrectDocstring def testDoNotSendInstChngMsgIfMasterDoesntSeePerformanceProblem( - nodeSet, looper, ensureView): + txnPoolNodeSet, looper, ensureView): """ A node that received an INSTANCE_CHANGE message must not send an INSTANCE_CHANGE message if it doesn't observe too much difference in @@ -42,17 +42,17 @@ def testDoNotSendInstChngMsgIfMasterDoesntSeePerformanceProblem( # Count sent instance changes of all nodes sentInstChanges = {} instChngMethodName = ViewChanger.sendInstanceChange.__name__ - for n in nodeSet: + for n in txnPoolNodeSet: sentInstChanges[n.name] = n.view_changer.spylog.count(instChngMethodName) # Send an instance change message to all nodes - icMsg = nodeSet.Alpha.view_changer._create_instance_change_msg(curViewNo, 0) - nodeSet.Alpha.send(icMsg) + icMsg = txnPoolNodeSet[0].view_changer._create_instance_change_msg(curViewNo, 0) + txnPoolNodeSet[0].send(icMsg) # Check that that message is discarded. - waitForViewChange(looper, nodeSet) + waitForViewChange(looper, txnPoolNodeSet) # No node should have sent a view change and thus must not have called # `sendInstanceChange` - for n in nodeSet: + for n in txnPoolNodeSet: assert n.spylog.count(instChngMethodName) == \ - sentInstChanges.get(n.name, 0) + sentInstChanges.get(n.name, 0) diff --git a/plenum/test/view_change/test_inst_chng_msg_throttling.py b/plenum/test/view_change/test_inst_chng_msg_throttling.py index 17aa5fe8ee..4bf6e35e48 100644 --- a/plenum/test/view_change/test_inst_chng_msg_throttling.py +++ b/plenum/test/view_change/test_inst_chng_msg_throttling.py @@ -1,7 +1,7 @@ import types -def testInstChngMsgThrottling(nodeSet, looper, up, viewNo): +def testInstChngMsgThrottling(txnPoolNodeSet, looper, viewNo): """ 2 nodes out of 4 keep on sending INSTANCE_CHANGE messages as they find the master to be slow but since we need 3 out of 4 (n-f) to say that @@ -12,8 +12,8 @@ def testInstChngMsgThrottling(nodeSet, looper, up, viewNo): THE TEST BELOW SHOULD TERMINATE. IF IT DOES NOT TERMINATE THEN THE BUG IS STILL PRESENT """ - nodeA = nodeSet.Alpha - nodeB = nodeSet.Beta + nodeA = txnPoolNodeSet[0] + nodeB = txnPoolNodeSet[0] # Nodes that always find master as degraded for node in (nodeA, nodeB): node.monitor.isMasterDegraded = types.MethodType( diff --git a/plenum/test/view_change/test_instance_change_msg_checking.py b/plenum/test/view_change/test_instance_change_msg_checking.py index 4f3c430db5..2c70f5100e 100644 --- a/plenum/test/view_change/test_instance_change_msg_checking.py +++ b/plenum/test/view_change/test_instance_change_msg_checking.py @@ -10,9 +10,9 @@ whitelist = [DISCARD_REASON, ] -def testInstanceChangeMsgTypeChecking(nodeSet, looper, up): - nodeA = nodeSet.Alpha - nodeB = nodeSet.Beta +def testInstanceChangeMsgTypeChecking(txnPoolNodeSet, looper): + nodeA = txnPoolNodeSet[0] + nodeB = txnPoolNodeSet[1] ridBeta = nodeA.nodestack.getRemote(nodeB.name).uid @@ -22,7 +22,7 @@ def createInstanceChangeMessage(): goodViewNo = 1 badViewNo = "BAD" - icMsg = nodeSet.Alpha.view_changer._create_instance_change_msg(goodViewNo, 0) + icMsg = txnPoolNodeSet[0].view_changer._create_instance_change_msg(goodViewNo, 0) icMsg._fields["viewNo"] = badViewNo return icMsg diff --git a/plenum/test/view_change/test_last_ordered_reset_for_new_view.py b/plenum/test/view_change/test_last_ordered_reset_for_new_view.py index d553629bac..c22de7d38a 100644 --- a/plenum/test/view_change/test_last_ordered_reset_for_new_view.py +++ b/plenum/test/view_change/test_last_ordered_reset_for_new_view.py @@ -1,6 +1,5 @@ from plenum.test.helper import sdk_send_random_and_check, checkViewNoForNodes from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.conftest import looper from plenum.test.view_change.helper import ensure_view_change_complete diff --git a/plenum/test/view_change/test_master_primary_different_from_previous.py b/plenum/test/view_change/test_master_primary_different_from_previous.py index 5a9ad58ba0..af396404de 100644 --- a/plenum/test/view_change/test_master_primary_different_from_previous.py +++ b/plenum/test/view_change/test_master_primary_different_from_previous.py @@ -6,10 +6,10 @@ sdk_send_random_and_check, countDiscarded from plenum.test.malicious_behaviors_node import slow_primary from plenum.test.test_node import getPrimaryReplica, ensureElectionsDone -from plenum.test.pool_transactions.conftest import looper from plenum.test.view_change.helper import provoke_and_wait_for_view_change, ensure_view_change from stp_core.common.log import getlogger + logger = getlogger() @@ -41,7 +41,7 @@ def test_master_primary_different_from_previous(txnPoolNodeSet, looper, @pytest.mark.skip(reason='Nodes use round robin primary selection') def test_master_primary_different_from_previous_view_for_itself( - txnPoolNodeSet, looper, client1, wallet1, client1Connected): + txnPoolNodeSet, looper, client1, wallet1): """ After a view change, primary must be different from previous primary for master instance, it does not matter for other instance. Break it into diff --git a/plenum/test/view_change/test_new_node_joins_after_view_change.py b/plenum/test/view_change/test_new_node_joins_after_view_change.py index 513af5e2b3..e42be2a5f1 100644 --- a/plenum/test/view_change/test_new_node_joins_after_view_change.py +++ b/plenum/test/view_change/test_new_node_joins_after_view_change.py @@ -11,13 +11,11 @@ from plenum.test.helper import send_reqs_to_nodes_and_verify_all_replies, \ checkViewNoForNodes, stopNodes, sendReqsToNodesAndVerifySuffReplies -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper, stewardAndWallet1, steward1, \ - stewardWallet -from plenum.test.primary_selection.conftest import nodeThetaAdded, \ - one_node_added +from plenum.test.pool_transactions.conftest import nodeThetaAdded +from plenum.test.primary_selection.conftest import one_node_added from stp_core.common.log import getlogger + logger = getlogger() @@ -28,8 +26,7 @@ def all_nodes_view_change( stewardWallet, steward1, client1, - wallet1, - client1Connected): + wallet1): for _ in range(5): send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client1, 2) ensure_view_change(looper, txnPoolNodeSet) diff --git a/plenum/test/view_change/test_no_instance_change_before_node_is_ready.py b/plenum/test/view_change/test_no_instance_change_before_node_is_ready.py index ac7818b418..b532f6917f 100644 --- a/plenum/test/view_change/test_no_instance_change_before_node_is_ready.py +++ b/plenum/test/view_change/test_no_instance_change_before_node_is_ready.py @@ -2,10 +2,6 @@ from plenum.server.view_change.view_changer import ViewChanger -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper, stewardAndWallet1, steward1, \ - stewardWallet - from stp_core.common.log import getlogger from plenum.test.pool_transactions.helper import start_not_added_node, add_started_node diff --git a/plenum/test/view_change/test_node_detecting_lag_from_view_change_messages.py b/plenum/test/view_change/test_node_detecting_lag_from_view_change_messages.py index 2ca91d5722..d5d13b3e12 100644 --- a/plenum/test/view_change/test_node_detecting_lag_from_view_change_messages.py +++ b/plenum/test/view_change/test_node_detecting_lag_from_view_change_messages.py @@ -11,9 +11,6 @@ from plenum.test.test_node import getNonPrimaryReplicas from plenum.test.view_change.helper import ensure_view_change from stp_core.loop.eventually import eventually -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper - TestRunningTimeLimitSec = 150 @@ -22,7 +19,6 @@ def test_node_detecting_lag_from_view_change_done_messages(txnPoolNodeSet, looper, wallet1, client1, - client1Connected, tconf): """ A node is slow and after view change starts, it marks it's `last_prepared` diff --git a/plenum/test/view_change/test_pp_seq_no_starts_from_1.py b/plenum/test/view_change/test_pp_seq_no_starts_from_1.py index e7810130fe..1ec3de0255 100644 --- a/plenum/test/view_change/test_pp_seq_no_starts_from_1.py +++ b/plenum/test/view_change/test_pp_seq_no_starts_from_1.py @@ -1,7 +1,5 @@ import pytest -from plenum.test.helper import checkViewNoForNodes, \ - sendReqsToNodesAndVerifySuffReplies -from plenum.test.pool_transactions.conftest import looper +from plenum.test.helper import checkViewNoForNodes from plenum.test.view_change.helper import ensure_view_change from plenum.test.helper import sdk_send_random_and_check diff --git a/plenum/test/view_change/test_queueing_req_from_future_view.py b/plenum/test/view_change/test_queueing_req_from_future_view.py index 27589db776..6973452729 100644 --- a/plenum/test/view_change/test_queueing_req_from_future_view.py +++ b/plenum/test/view_change/test_queueing_req_from_future_view.py @@ -1,4 +1,3 @@ - import pytest from plenum.test.view_change.helper import provoke_and_wait_for_view_change, ensure_view_change @@ -20,7 +19,7 @@ # noinspection PyIncorrectDocstring -def testQueueingReqFromFutureView(delayed_perf_chk, looper, nodeSet, up, +def testQueueingReqFromFutureView(delayed_perf_chk, looper, txnPoolNodeSet, wallet1, client1): """ Test if every node queues 3 Phase requests(PRE-PREPARE, PREPARE and COMMIT) @@ -29,7 +28,7 @@ def testQueueingReqFromFutureView(delayed_perf_chk, looper, nodeSet, up, => it starts receiving 3 phase commit messages for next view """ - lagging_node = get_last_master_non_primary_node(nodeSet) + lagging_node = get_last_master_non_primary_node(txnPoolNodeSet) old_view_no = lagging_node.viewNo # Delay processing of InstanceChange and ViewChangeDone so node stashes @@ -54,7 +53,7 @@ def chk_fut_view(view_no, is_empty): # Every node except Node A should do a view change ensure_view_change(looper, - [n for n in nodeSet if n != lagging_node], + [n for n in txnPoolNodeSet if n != lagging_node], [lagging_node]) # send more requests that will be queued for the lagged node diff --git a/plenum/test/view_change/test_reverted_unordered.py b/plenum/test/view_change/test_reverted_unordered.py index f9848a12a6..6702afdaee 100644 --- a/plenum/test/view_change/test_reverted_unordered.py +++ b/plenum/test/view_change/test_reverted_unordered.py @@ -7,11 +7,9 @@ from plenum.test.delayers import cDelay, msg_rep_delay, lsDelay from plenum.test.helper import sdk_send_batches_of_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.conftest import looper from plenum.test.test_node import getNonPrimaryReplicas, ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change - TestRunningTimeLimitSec = 150 diff --git a/plenum/test/view_change/test_that_domain_ledger_the_same_after_restart_for_all_nodes.py b/plenum/test/view_change/test_that_domain_ledger_the_same_after_restart_for_all_nodes.py index 5035b0fc8b..a75f165ae2 100644 --- a/plenum/test/view_change/test_that_domain_ledger_the_same_after_restart_for_all_nodes.py +++ b/plenum/test/view_change/test_that_domain_ledger_the_same_after_restart_for_all_nodes.py @@ -2,11 +2,9 @@ from plenum.common.constants import HS_FILE, HS_LEVELDB, HS_ROCKSDB from plenum.test.view_change.helper import ensure_view_change_by_primary_restart -from plenum.test.pool_transactions.conftest import looper from stp_core.common.log import getlogger from plenum.common.startable import Mode - logger = getlogger() @@ -24,8 +22,8 @@ def catchuped(node): def test_that_domain_ledger_the_same_after_restart_for_all_nodes( - looper, txnPoolNodeSet, tdir, tconf, - allPluginsPath, limitTestRunningTime): + looper, txnPoolNodeSet, tdir, tconf, + allPluginsPath, limitTestRunningTime): """ Test steps: 1. Collect domainLedger data for primary node, such as: @@ -73,7 +71,7 @@ def prepare_for_compare(domain_ledger): return dict_for_compare def compare(before, after): - for k,v in before.items(): + for k, v in before.items(): if k in after: if v != after[k]: logger.debug("compare_domain_ledgers: before[{}]!=after[{}]".format(k, k)) @@ -84,20 +82,19 @@ def compare(before, after): logger.debug("compare_domain_ledgers: after_dict: {}: {}".format(k, after.get(k))) assert False - pool_of_nodes = txnPoolNodeSet for __ in range(4): p_node = [node for node in pool_of_nodes if node.has_master_primary][0] before_vc_dict = prepare_for_compare(p_node.domainLedger) pool_of_nodes = ensure_view_change_by_primary_restart(looper, - pool_of_nodes, - tconf, - tdir, - allPluginsPath, - customTimeout=tconf.VIEW_CHANGE_TIMEOUT) + pool_of_nodes, + tconf, + tdir, + allPluginsPath, + customTimeout=tconf.VIEW_CHANGE_TIMEOUT) for node in pool_of_nodes: logger.debug("compare_domain_ledgers: " "primary node before view_change: {}, " "compared node: {}".format(p_node, node)) after_vc_dict = prepare_for_compare(node.domainLedger) - compare(before_vc_dict, after_vc_dict) \ No newline at end of file + compare(before_vc_dict, after_vc_dict) diff --git a/plenum/test/view_change/test_view_change.py b/plenum/test/view_change/test_view_change.py index 92cbc6c793..d38a543ef4 100644 --- a/plenum/test/view_change/test_view_change.py +++ b/plenum/test/view_change/test_view_change.py @@ -8,49 +8,49 @@ # noinspection PyIncorrectDocstring -def test_view_change_on_empty_ledger(nodeSet, up, looper): +def test_view_change_on_empty_ledger(txnPoolNodeSet, looper): """ Check that view change is done when no txns in the ldegr """ - ensure_view_change(looper, nodeSet) - ensureElectionsDone(looper=looper, nodes=nodeSet) - ensure_all_nodes_have_same_data(looper, nodes=nodeSet) + ensure_view_change(looper, txnPoolNodeSet) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) + ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) # noinspection PyIncorrectDocstring -def test_view_change_after_some_txns(looper, nodeSet, up, viewNo, +def test_view_change_after_some_txns(looper, txnPoolNodeSet, viewNo, wallet1, client1): """ Check that view change is done after processing some of txns """ sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 4) - ensure_view_change(looper, nodeSet) - ensureElectionsDone(looper=looper, nodes=nodeSet) - ensure_all_nodes_have_same_data(looper, nodes=nodeSet) + ensure_view_change(looper, txnPoolNodeSet) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) + ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) # noinspection PyIncorrectDocstring -def test_send_more_after_view_change(looper, nodeSet, up, +def test_send_more_after_view_change(looper, txnPoolNodeSet, wallet1, client1): """ Check that we can send more requests after view change """ sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 4) - ensure_view_change(looper, nodeSet) - ensureElectionsDone(looper=looper, nodes=nodeSet) - ensure_all_nodes_have_same_data(looper, nodes=nodeSet) + ensure_view_change(looper, txnPoolNodeSet) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) + ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 10) -def test_node_notified_about_primary_election_result(nodeSet, looper, up): +def test_node_notified_about_primary_election_result(txnPoolNodeSet, looper): old_counts = {node.name: get_count( - node, node.primary_selected) for node in nodeSet} - ensure_view_change(looper, nodeSet) - ensureElectionsDone(looper=looper, nodes=nodeSet) - ensure_all_nodes_have_same_data(looper, nodes=nodeSet) + node, node.primary_selected) for node in txnPoolNodeSet} + ensure_view_change(looper, txnPoolNodeSet) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) + ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) - for node in nodeSet: + for node in txnPoolNodeSet: assert get_count(node, node.primary_selected) > old_counts[node.name] diff --git a/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py b/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py index f120b736ea..9cef7de8cc 100644 --- a/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py +++ b/plenum/test/view_change/test_view_change_after_back_to_quorum_with_disconnected_primary.py @@ -3,15 +3,14 @@ from plenum.test.view_change.helper import start_stopped_node, ensure_view_change_by_primary_restart from plenum.test.test_node import get_master_primary_node -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import checkViewNoForNodes, waitForViewChange, sdk_send_random_and_check from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data - # We do 2 view changes during this test. Timeout for one view change is 60 sec. # Test running time will be expected near 2 * 60 = 120, so let's define it as 150 sec. TestRunningTimeLimitSec = 150 + def test_view_change_after_back_to_quorum_with_disconnected_primary(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client, diff --git a/plenum/test/view_change/test_view_change_done_delayed.py b/plenum/test/view_change/test_view_change_done_delayed.py index f189ea3399..57927e27e6 100644 --- a/plenum/test/view_change/test_view_change_done_delayed.py +++ b/plenum/test/view_change/test_view_change_done_delayed.py @@ -2,7 +2,6 @@ from plenum.test.helper import sdk_send_batches_of_random_and_check, sdk_send_random_and_check from plenum.test.node_catchup.helper import waitNodeDataEquality, \ ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.conftest import looper from plenum.test.test_node import getNonPrimaryReplicas from plenum.test.view_change.helper import ensure_view_change from stp_core.loop.eventually import eventually diff --git a/plenum/test/view_change/test_view_change_happens_post_timeout.py b/plenum/test/view_change/test_view_change_happens_post_timeout.py index ea3095f518..9f92381786 100644 --- a/plenum/test/view_change/test_view_change_happens_post_timeout.py +++ b/plenum/test/view_change/test_view_change_happens_post_timeout.py @@ -18,8 +18,8 @@ def is_instance_change_sent_for_view_no(node, view_no): return node.view_changer.instanceChanges.hasView(view_no) -def test_instance_change_happens_post_timeout(tconf, looper, nodeSet, up): - non_prim_node = getNonPrimaryReplicas(nodeSet)[0].node +def test_instance_change_happens_post_timeout(tconf, looper, txnPoolNodeSet): + non_prim_node = getNonPrimaryReplicas(txnPoolNodeSet)[0].node old_view_no = non_prim_node.viewNo # first sending on InstanceChange: OK diff --git a/plenum/test/view_change/test_view_change_max_catchup_rounds.py b/plenum/test/view_change/test_view_change_max_catchup_rounds.py index 650e355b70..0c73da4729 100644 --- a/plenum/test/view_change/test_view_change_max_catchup_rounds.py +++ b/plenum/test/view_change/test_view_change_max_catchup_rounds.py @@ -2,7 +2,6 @@ from plenum.test.delayers import pDelay, cDelay from plenum.test.helper import sdk_send_batches_of_random_and_check, sdk_send_random_requests from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data -from plenum.test.pool_transactions.conftest import looper from plenum.test.test_node import getNonPrimaryReplicas, ensureElectionsDone from plenum.test.view_change.helper import ensure_view_change diff --git a/plenum/test/view_change/test_view_change_min_cathup_timeout.py b/plenum/test/view_change/test_view_change_min_cathup_timeout.py index 38f8092f3c..3a6b928b1d 100644 --- a/plenum/test/view_change/test_view_change_min_cathup_timeout.py +++ b/plenum/test/view_change/test_view_change_min_cathup_timeout.py @@ -11,17 +11,17 @@ nodeCount = 7 -def patch_has_ordered_till_last_prepared_certificate(nodeSet): +def patch_has_ordered_till_last_prepared_certificate(txnPoolNodeSet): def patched_has_ordered_till_last_prepared_certificate(self) -> bool: return False - for node in nodeSet: + for node in txnPoolNodeSet: node.has_ordered_till_last_prepared_certificate = \ types.MethodType( patched_has_ordered_till_last_prepared_certificate, node) -def test_view_change_min_catchup_timeout(nodeSet, up, looper, wallet1, client1, +def test_view_change_min_catchup_timeout(txnPoolNodeSet, looper, wallet1, client1, tconf, viewNo): """ @@ -43,26 +43,26 @@ def test_view_change_min_catchup_timeout(nodeSet, up, looper, wallet1, client1, # 2. make the only condition to finish catch-up is # MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE - patch_has_ordered_till_last_prepared_certificate(nodeSet) + patch_has_ordered_till_last_prepared_certificate(txnPoolNodeSet) # 3. start view change expected_view_no = viewNo + 1 - for node in nodeSet: + for node in txnPoolNodeSet: node.view_changer.startViewChange(expected_view_no) # 4. check that it's not finished till # MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE no_view_chanage_timeout = tconf.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE - 1 with pytest.raises(EventuallyTimeoutException): - ensureElectionsDone(looper=looper, nodes=nodeSet, + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, customTimeout=no_view_chanage_timeout) # 5. make sure that view change is finished eventually # (it should be finished quite soon after we waited for MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE) - ensureElectionsDone(looper=looper, nodes=nodeSet, customTimeout=2) - waitForViewChange(looper=looper, nodeSet=nodeSet, + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, customTimeout=2) + waitForViewChange(looper=looper, txnPoolNodeSet=txnPoolNodeSet, expectedViewNo=expected_view_no) - ensure_all_nodes_have_same_data(looper, nodes=nodeSet) + ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) # 6. ensure that the pool is still functional. - ensure_pool_functional(looper, nodeSet, wallet1, client1) + ensure_pool_functional(looper, txnPoolNodeSet, wallet1, client1) diff --git a/plenum/test/view_change/test_view_change_n_minus_f_quorum.py b/plenum/test/view_change/test_view_change_n_minus_f_quorum.py index 7a852d2c21..5c41b8cf0e 100644 --- a/plenum/test/view_change/test_view_change_n_minus_f_quorum.py +++ b/plenum/test/view_change/test_view_change_n_minus_f_quorum.py @@ -5,9 +5,6 @@ from plenum.test.view_change.helper import ensure_view_change from plenum.test.helper import stopNodes -from plenum.test.pool_transactions.conftest import clientAndWallet1, \ - client1, wallet1, client1Connected, looper - def test_view_change_n_minus_f_quorum(txnPoolNodeSet, looper): """ @@ -45,5 +42,4 @@ def test_view_change_n_minus_f_quorum(txnPoolNodeSet, looper): "Delta -> Ratio: None" .format(current_view_no, current_view_no + 1)) as exc_info: - ensure_view_change(looper, active) diff --git a/plenum/test/view_change/test_view_change_on_master_degraded.py b/plenum/test/view_change/test_view_change_on_master_degraded.py index 0fece1ef76..a7ddbcee17 100644 --- a/plenum/test/view_change/test_view_change_on_master_degraded.py +++ b/plenum/test/view_change/test_view_change_on_master_degraded.py @@ -13,10 +13,11 @@ nodeCount = 7 + # noinspection PyIncorrectDocstring -def test_view_change_on_performance_degraded(looper, nodeSet, up, viewNo, +def test_view_change_on_performance_degraded(looper, txnPoolNodeSet, viewNo, wallet1, client1): """ Test that a view change is done when the performance of master goes down @@ -24,37 +25,37 @@ def test_view_change_on_performance_degraded(looper, nodeSet, up, viewNo, instance so that there is a view change. All nodes will agree that master performance degraded """ - old_primary_node = get_master_primary_node(list(nodeSet.nodes.values())) + old_primary_node = get_master_primary_node(list(txnPoolNodeSet)) - simulate_slow_master(looper, nodeSet, wallet1, client1) - waitForViewChange(looper, nodeSet, expectedViewNo=viewNo + 1) + simulate_slow_master(looper, txnPoolNodeSet, wallet1, client1) + waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=viewNo + 1) - ensureElectionsDone(looper=looper, nodes=nodeSet) - ensure_all_nodes_have_same_data(looper, nodes=nodeSet) - new_primary_node = get_master_primary_node(list(nodeSet.nodes.values())) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) + ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) + new_primary_node = get_master_primary_node(list(txnPoolNodeSet)) assert old_primary_node.name != new_primary_node.name -def test_view_change_on_quorum_of_master_degraded(nodeSet, looper, up, +def test_view_change_on_quorum_of_master_degraded(txnPoolNodeSet, looper, wallet1, client1, viewNo): """ Node will change view even though it does not find the master to be degraded when a quorum of nodes agree that master performance degraded """ - m_primary_node = get_master_primary_node(list(nodeSet.nodes.values())) + m_primary_node = get_master_primary_node(list(txnPoolNodeSet)) # Delay processing of PRE-PREPARE from all non primary replicas of master # so master's performance falls and view changes - delayNonPrimaries(nodeSet, 0, 10) + delayNonPrimaries(txnPoolNodeSet, 0, 10) - pr = getPrimaryReplica(nodeSet, 0) + pr = getPrimaryReplica(txnPoolNodeSet, 0) relucatantNode = pr.node # Count sent instance changes of all nodes sentInstChanges = {} instChngMethodName = ViewChanger.sendInstanceChange.__name__ - for n in nodeSet: + for n in txnPoolNodeSet: sentInstChanges[n.name] = n.view_changer.spylog.count(instChngMethodName) # Node reluctant to change view, never says master is degraded @@ -64,19 +65,19 @@ def test_view_change_on_quorum_of_master_degraded(nodeSet, looper, up, sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 4) # Check that view change happened for all nodes - waitForViewChange(looper, nodeSet, expectedViewNo=viewNo + 1) + waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=viewNo + 1) # All nodes except the reluctant node should have sent a view change and # thus must have called `sendInstanceChange` - for n in nodeSet: + for n in txnPoolNodeSet: if n.name != relucatantNode.name: assert n.view_changer.spylog.count(instChngMethodName) > \ - sentInstChanges.get(n.name, 0) + sentInstChanges.get(n.name, 0) else: assert n.view_changer.spylog.count(instChngMethodName) == \ - sentInstChanges.get(n.name, 0) + sentInstChanges.get(n.name, 0) - ensureElectionsDone(looper=looper, nodes=nodeSet) - new_m_primary_node = get_master_primary_node(list(nodeSet.nodes.values())) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) + new_m_primary_node = get_master_primary_node(list(txnPoolNodeSet)) assert m_primary_node.name != new_m_primary_node.name - ensure_all_nodes_have_same_data(looper, nodes=nodeSet) + ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) diff --git a/plenum/test/view_change/test_view_change_start_without_primary.py b/plenum/test/view_change/test_view_change_start_without_primary.py index 8bdaf8a804..41c7d73cfd 100644 --- a/plenum/test/view_change/test_view_change_start_without_primary.py +++ b/plenum/test/view_change/test_view_change_start_without_primary.py @@ -8,18 +8,19 @@ view_change_timeout = 10 -def test_view_change_without_primary(nodeSet, looper, +def test_view_change_without_primary(txnPoolNodeSet, looper, patched_view_change_timeout): - - first, others = stop_nodes_and_remove_first(looper, nodeSet) + first, others = stop_nodes_and_remove_first(looper, txnPoolNodeSet) start_and_connect_nodes(looper, others) - timeout = waits.expectedPoolElectionTimeout(len(nodeSet)) + patched_view_change_timeout + timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + patched_view_change_timeout + + #looper.runFor(40) - checkProtocolInstanceSetup(looper=looper, nodes=others, retryWait=1, + checkProtocolInstanceSetup(looper=looper, nodes=txnPoolNodeSet, retryWait=1, customTimeout=timeout, - numInstances=getRequiredInstances(len(nodeSet))) + numInstances=getRequiredInstances(len(txnPoolNodeSet))) def stop_nodes_and_remove_first(looper, nodes): @@ -28,7 +29,7 @@ def stop_nodes_and_remove_first(looper, nodes): looper.removeProdable(first_node) looper.runFor(3) # let the nodes stop return first_node, \ - list(filter(lambda x: x.name != first_node.name, nodes)) + list(filter(lambda x: x.name != first_node.name, nodes)) def start_and_connect_nodes(looper, nodes): @@ -38,10 +39,10 @@ def start_and_connect_nodes(looper, nodes): @pytest.fixture(scope='function') -def patched_view_change_timeout(nodeSet): - old_view_change_timeout = nodeSet[0]._view_change_timeout - for node in nodeSet: +def patched_view_change_timeout(txnPoolNodeSet): + old_view_change_timeout = txnPoolNodeSet[0]._view_change_timeout + for node in txnPoolNodeSet: node._view_change_timeout = view_change_timeout yield view_change_timeout - for node in nodeSet: + for node in txnPoolNodeSet: node._view_change_timeout = old_view_change_timeout diff --git a/plenum/test/view_change/test_view_change_timeout.py b/plenum/test/view_change/test_view_change_timeout.py index 4837ad12f3..cd69d952ad 100644 --- a/plenum/test/view_change/test_view_change_timeout.py +++ b/plenum/test/view_change/test_view_change_timeout.py @@ -14,15 +14,15 @@ @pytest.fixture() -def setup(nodeSet, looper): - m_primary_node = get_master_primary_node(list(nodeSet.nodes.values())) - initial_view_no = waitForViewChange(looper, nodeSet) +def setup(txnPoolNodeSet, looper): + m_primary_node = get_master_primary_node(list(txnPoolNodeSet)) + initial_view_no = waitForViewChange(looper, txnPoolNodeSet) # Setting view change timeout to low value to make test pass quicker - for node in nodeSet: + for node in txnPoolNodeSet: node._view_change_timeout = view_change_timeout timeout_callback_stats = {} - for node in nodeSet: + for node in txnPoolNodeSet: timeout_callback_stats[node.name] = { 'called': get_count(node, node._check_view_change_completed), 'returned_true': len(getAllReturnVals( @@ -32,33 +32,33 @@ def setup(nodeSet, looper): def test_view_change_retry_by_timeout( - nodeSet, looper, up, setup, wallet1, client1): + txnPoolNodeSet, looper, setup, wallet1, client1): """ Verifies that a view change is restarted if it is not completed in time """ m_primary_node, initial_view_no, timeout_callback_stats = setup - delay_view_change_done_msg(nodeSet) + delay_view_change_done_msg(txnPoolNodeSet) - start_view_change(nodeSet, initial_view_no + 1) + start_view_change(txnPoolNodeSet, initial_view_no + 1) # First view change should fail, because of delayed ViewChangeDone # messages. This then leads to new view change that we need. with pytest.raises(AssertionError): ensureElectionsDone(looper=looper, - nodes=nodeSet, + nodes=txnPoolNodeSet, customTimeout=view_change_timeout + 2) # Resetting delays to let second view change go well - reset_delays_and_process_delayeds(nodeSet) + reset_delays_and_process_delayeds(txnPoolNodeSet) # This view change should be completed with no problems - ensureElectionsDone(looper=looper, nodes=nodeSet) - ensure_all_nodes_have_same_data(looper, nodes=nodeSet) - new_m_primary_node = get_master_primary_node(list(nodeSet.nodes.values())) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) + ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) + new_m_primary_node = get_master_primary_node(list(txnPoolNodeSet)) assert m_primary_node.name != new_m_primary_node.name # The timeout method was called one time - for node in nodeSet: + for node in txnPoolNodeSet: assert get_count(node, node._check_view_change_completed) - \ timeout_callback_stats[node.name]['called'] == 1 @@ -68,26 +68,26 @@ def test_view_change_retry_by_timeout( timeout_callback_stats[node.name]['returned_true'] == 1 # 2 view changes have been initiated - for node in nodeSet: + for node in txnPoolNodeSet: assert node.viewNo - initial_view_no == 2 - ensure_pool_functional(looper, nodeSet, wallet1, client1) + ensure_pool_functional(looper, txnPoolNodeSet, wallet1, client1) def test_multiple_view_change_retries_by_timeouts( - nodeSet, looper, up, setup, wallet1, client1): + txnPoolNodeSet, looper, setup, wallet1, client1): """ Verifies that a view change is restarted each time when the previous one is timed out """ _, initial_view_no, timeout_callback_stats = setup - delay_view_change_done_msg(nodeSet) + delay_view_change_done_msg(txnPoolNodeSet) - start_view_change(nodeSet, initial_view_no + 1) + start_view_change(txnPoolNodeSet, initial_view_no + 1) def check_timeout_callback_called(times): - for node in nodeSet: + for node in txnPoolNodeSet: assert get_count(node, node._check_view_change_completed) - \ timeout_callback_stats[node.name]['called'] == times @@ -104,33 +104,33 @@ def check_timeout_callback_called(times): # Check that the last view change has failed with pytest.raises(AssertionError): - ensureElectionsDone(looper=looper, nodes=nodeSet, customTimeout=1) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, customTimeout=1) # Reset delays to let the next view change go well - reset_delays_and_process_delayeds(nodeSet) + reset_delays_and_process_delayeds(txnPoolNodeSet) # This view change must be completed with no problems - ensureElectionsDone(looper=looper, nodes=nodeSet) - ensure_all_nodes_have_same_data(looper, nodes=nodeSet) + ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet) + ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet) # 4 view changes must have been initiated (initial one + 3 retries) - for node in nodeSet: + for node in txnPoolNodeSet: assert node.viewNo - initial_view_no == 4 - ensure_pool_functional(looper, nodeSet, wallet1, client1) + ensure_pool_functional(looper, txnPoolNodeSet, wallet1, client1) def test_view_change_restarted_by_timeout_if_next_primary_disconnected( - nodeSet, looper, up, setup, wallet1, client1): + txnPoolNodeSet, looper, setup, wallet1, client1): """ Verifies that a view change is restarted by timeout if the next primary has been disconnected """ _, initial_view_no, timeout_callback_stats = setup - start_view_change(nodeSet, initial_view_no + 1) + start_view_change(txnPoolNodeSet, initial_view_no + 1) - alive_nodes = stop_next_primary(nodeSet) + alive_nodes = stop_next_primary(txnPoolNodeSet) ensureElectionsDone(looper=looper, nodes=alive_nodes, numInstances=3) @@ -151,7 +151,7 @@ def test_view_change_restarted_by_timeout_if_next_primary_disconnected( def stop_next_primary(nodes): m_next_primary_name = nodes[0]._elector._next_primary_node_name_for_master() - nodes[m_next_primary_name].stop() + next(node for node in nodes if node.name == m_next_primary_name).stop() alive_nodes = list(filter(lambda x: x.name != m_next_primary_name, nodes)) return alive_nodes diff --git a/plenum/test/view_change/test_view_change_without_any_reqs.py b/plenum/test/view_change/test_view_change_without_any_reqs.py index caeae77135..9f8db0c62e 100644 --- a/plenum/test/view_change/test_view_change_without_any_reqs.py +++ b/plenum/test/view_change/test_view_change_without_any_reqs.py @@ -11,8 +11,6 @@ Max3PCBatchSize = 3 from plenum.test.batching_3pc.conftest import tconf -from plenum.test.pool_transactions.conftest import looper - TestRunningTimeLimitSec = 200 @@ -38,7 +36,7 @@ def chk1(): looper.run(eventually(chk1, retryWait=1)) timeout = tconf.PerfCheckFreq + \ - waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) waitForViewChange(looper, txnPoolNodeSet, old_view_no + 1, customTimeout=timeout) diff --git a/plenum/test/view_change/test_view_changes_if_master_primary_disconnected.py b/plenum/test/view_change/test_view_changes_if_master_primary_disconnected.py index 53273a4425..400761df26 100644 --- a/plenum/test/view_change/test_view_changes_if_master_primary_disconnected.py +++ b/plenum/test/view_change/test_view_changes_if_master_primary_disconnected.py @@ -1,4 +1,3 @@ - from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected from plenum.test.spy_helpers import getAllReturnVals from plenum.test.view_change.helper import start_stopped_node @@ -7,7 +6,6 @@ from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data from plenum.test.test_node import get_master_primary_node, ensure_node_disconnected -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check, waitForViewChange diff --git a/plenum/test/view_change/test_view_not_changed.py b/plenum/test/view_change/test_view_not_changed.py index 1db41ebd2b..6a0564f0ae 100644 --- a/plenum/test/view_change/test_view_not_changed.py +++ b/plenum/test/view_change/test_view_not_changed.py @@ -1,5 +1,3 @@ -from plenum.test.pool_transactions.conftest import looper - from plenum.common.util import getMaxFailures from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check from plenum.test.delayers import ppDelay diff --git a/plenum/test/view_change/test_view_not_changed_when_primary_disconnected_from_less_than_quorum.py b/plenum/test/view_change/test_view_not_changed_when_primary_disconnected_from_less_than_quorum.py index 85b599da9a..dad39c0859 100644 --- a/plenum/test/view_change/test_view_not_changed_when_primary_disconnected_from_less_than_quorum.py +++ b/plenum/test/view_change/test_view_not_changed_when_primary_disconnected_from_less_than_quorum.py @@ -5,7 +5,6 @@ from plenum.test.node_catchup.helper import waitNodeDataEquality from plenum.test.test_node import getNonPrimaryReplicas, get_master_primary_node from stp_core.loop.eventually import eventually -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check diff --git a/plenum/test/view_change/test_view_not_changed_when_short_disconnection.py b/plenum/test/view_change/test_view_not_changed_when_short_disconnection.py index db7756ba38..48ba262724 100644 --- a/plenum/test/view_change/test_view_not_changed_when_short_disconnection.py +++ b/plenum/test/view_change/test_view_not_changed_when_short_disconnection.py @@ -1,7 +1,6 @@ import pytest from stp_core.loop.eventually import eventually -from plenum.test.pool_transactions.conftest import looper from plenum.test.helper import checkViewNoForNodes, sdk_send_random_and_check from plenum.test.test_node import get_master_primary_node @@ -33,7 +32,7 @@ def chk1(): for node in txnPoolNodeSet: if node != pr_node: assert node.spylog.count(node.lost_master_primary.__name__) \ - > lost_pr_calls[node.name] + > lost_pr_calls[node.name] def chk2(): # Schedule an instance change but do not send it @@ -41,9 +40,9 @@ def chk2(): for node in txnPoolNodeSet: if node != pr_node: assert node.spylog.count(node.propose_view_change.__name__) \ - > prp_inst_chg_calls[node.name] + > prp_inst_chg_calls[node.name] assert node.view_changer.spylog.count(node.view_changer.process_instance_change_msg.__name__) \ - == recv_inst_chg_calls[node.name] + == recv_inst_chg_calls[node.name] # Disconnect master's primary for node in txnPoolNodeSet: diff --git a/plenum/test/waits.py b/plenum/test/waits.py index ac272bcb6c..9e0e89f977 100644 --- a/plenum/test/waits.py +++ b/plenum/test/waits.py @@ -7,7 +7,6 @@ logger = getlogger() - # Peer (node/client) to peer message delivery time __Peer2PeerRequestDeliveryTime = 0.5 __Peer2PeerRequestExchangeTime = 2 * __Peer2PeerRequestDeliveryTime @@ -94,7 +93,7 @@ def expectedPoolConsistencyProof(nodeCount): """ config = getConfig() nodeCPTimeout = __Peer2PeerRequestExchangeTime + \ - config.ConsistencyProofsTimeout + config.ConsistencyProofsTimeout return nodeCount * nodeCPTimeout @@ -113,8 +112,8 @@ def expectedPoolGetReadyTimeout(nodeCount): To: the pool ledger is equal across the Nodes """ return expectedPoolInterconnectionTime(nodeCount) + \ - expectedPoolConsistencyProof(nodeCount) + \ - expectedPoolCatchupTime(nodeCount) + expectedPoolConsistencyProof(nodeCount) + \ + expectedPoolCatchupTime(nodeCount) def expectedPoolLedgerRepliedMsgPersisted(nodeCount): @@ -240,7 +239,7 @@ def expectedClientToPoolConnectionTimeout(nodeCount): # TODO check actual state config = getConfig() return config.ExpectedConnectTime * nodeCount + \ - config.RETRY_TIMEOUT_RESTRICTED + config.RETRY_TIMEOUT_RESTRICTED def expectedClientConsistencyProof(nodeCount): @@ -251,7 +250,7 @@ def expectedClientConsistencyProof(nodeCount): config = getConfig() qN = Quorums(nodeCount).commit.value return qN * __Peer2PeerRequestExchangeTime + \ - config.ConsistencyProofsTimeout + config.ConsistencyProofsTimeout def expectedClientCatchupTime(nodeCount): @@ -262,7 +261,7 @@ def expectedClientCatchupTime(nodeCount): config = getConfig() qN = Quorums(nodeCount).commit.value return qN * 2 * __Peer2PeerRequestExchangeTime + \ - config.CatchupTransactionsTimeout + config.CatchupTransactionsTimeout def expectedClientToPoolRequestDeliveryTime(nodeCount): diff --git a/plenum/test/wallet/test_wallet_storage_helper.py b/plenum/test/wallet/test_wallet_storage_helper.py index a35f6b825d..1aa1ba850c 100644 --- a/plenum/test/wallet/test_wallet_storage_helper.py +++ b/plenum/test/wallet/test_wallet_storage_helper.py @@ -94,7 +94,6 @@ def test_keyring_base_dir_exists_as_dir(tdir_hierarchy): def test_store_wallet_by_empty_path_fail( tdir_for_func, keyrings_base_dir, test_wallet): - wsh = WalletStorageHelper(keyrings_base_dir) for path in (None, ''): @@ -106,7 +105,6 @@ def test_store_wallet_by_empty_path_fail( def test_store_wallet_outside_fail( tdir_for_func, keyrings_base_dir, test_wallet): - wsh = WalletStorageHelper(keyrings_base_dir) inv_paths = [ @@ -202,7 +200,6 @@ def test_stored_wallet_data(tdir_for_func, keyrings_base_dir, test_wallet): def test_load_wallet_by_empty_path_fail(tdir_for_func, keyrings_base_dir): - wsh = WalletStorageHelper(keyrings_base_dir) for path in (None, ''): @@ -213,7 +210,6 @@ def test_load_wallet_by_empty_path_fail(tdir_for_func, keyrings_base_dir): def test_load_wallet_outside_fail(tdir_for_func, keyrings_base_dir): - wsh = WalletStorageHelper(keyrings_base_dir) inv_paths = [ diff --git a/plenum/test/zstack_tests/test_zstack_reconnection.py b/plenum/test/zstack_tests/test_zstack_reconnection.py index 5bafd9c00a..f351ead6fd 100644 --- a/plenum/test/zstack_tests/test_zstack_reconnection.py +++ b/plenum/test/zstack_tests/test_zstack_reconnection.py @@ -3,8 +3,6 @@ from stp_core.common.log import getlogger from stp_core.loop.eventually import eventually -from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ - client1, wallet1, client1Connected from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies, stopNodes, \ send_reqs_to_nodes_and_verify_all_replies from plenum.test.test_node import TestNode, ensureElectionsDone @@ -12,7 +10,6 @@ logger = getlogger() - TestRunningTimeLimitSec = 300 @@ -23,7 +20,7 @@ def tconf(tconf): def testZStackNodeReconnection(tconf, looper, txnPoolNodeSet, client1, wallet1, - tdir, client1Connected): + tdir): sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 1) npr = [n for n in txnPoolNodeSet if not n.hasPrimary]