diff --git a/hathor/consensus/consensus.py b/hathor/consensus/consensus.py index 6391a8470..c3ef7ecc2 100644 --- a/hathor/consensus/consensus.py +++ b/hathor/consensus/consensus.py @@ -72,10 +72,11 @@ def create_context(self) -> ConsensusAlgorithmContext: def update(self, base: BaseTransaction) -> None: assert base.storage is not None assert base.storage.is_only_valid_allowed() + meta = base.get_metadata() + assert meta.validation.is_valid() try: self._unsafe_update(base) except Exception: - meta = base.get_metadata() meta.add_voided_by(settings.CONSENSUS_FAIL_ID) assert base.storage is not None base.storage.save_transaction(base, only_metadata=True) diff --git a/hathor/feature_activation/feature_service.py b/hathor/feature_activation/feature_service.py index b45cc717c..9d3d82c28 100644 --- a/hathor/feature_activation/feature_service.py +++ b/hathor/feature_activation/feature_service.py @@ -50,6 +50,7 @@ def get_state(self, *, block: Block, feature: Feature) -> FeatureState: offset_to_boundary = height % self._feature_settings.evaluation_interval offset_to_previous_boundary = offset_to_boundary or self._feature_settings.evaluation_interval previous_boundary_height = height - offset_to_previous_boundary + assert previous_boundary_height >= 0 previous_boundary_block = self._get_ancestor_at_height(block=block, height=previous_boundary_height) previous_boundary_state = self.get_state(block=previous_boundary_block, feature=feature) @@ -167,6 +168,7 @@ def _get_ancestor_iteratively(*, block: Block, ancestor_height: int) -> Block: """Given a block, returns its ancestor at a specific height by iterating over its ancestors. This is slow.""" # TODO: there are further optimizations to be done here, the latest common block height could be persisted in # metadata, so we could still use the height index if the requested height is before that height. + assert ancestor_height >= 0 ancestor = block while ancestor.get_height() > ancestor_height: ancestor = ancestor.get_block_parent() diff --git a/hathor/indexes/rocksdb_height_index.py b/hathor/indexes/rocksdb_height_index.py index 022f60b0c..92d56dcc0 100644 --- a/hathor/indexes/rocksdb_height_index.py +++ b/hathor/indexes/rocksdb_height_index.py @@ -111,7 +111,7 @@ def _add(self, height: int, entry: IndexEntry, *, can_reorg: bool) -> None: raise ValueError(f'parent hash required (current height: {cur_height}, new height: {height})') elif height == cur_height + 1: self._db.put((self._cf, key), value) - elif cur_tip != entry.hash: + elif self.get(height) != entry.hash: if can_reorg: self._del_from_height(height) self._db.put((self._cf, key), value) @@ -119,7 +119,7 @@ def _add(self, height: int, entry: IndexEntry, *, can_reorg: bool) -> None: raise ValueError('adding would cause a re-org, use can_reorg=True to accept re-orgs') else: # nothing to do (there are more blocks, but the block at height currently matches the added block) - assert cur_tip == entry.hash + pass def add_new(self, height: int, block_hash: bytes, timestamp: int) -> None: self._add(height, IndexEntry(block_hash, timestamp), can_reorg=False) diff --git a/hathor/manager.py b/hathor/manager.py index b1effa871..ec92144f6 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -51,6 +51,7 @@ from hathor.transaction.exceptions import TxValidationError from hathor.transaction.storage import TransactionStorage from hathor.transaction.storage.exceptions import TransactionDoesNotExist +from hathor.transaction.storage.tx_allow_scope import TxAllowScope from hathor.types import Address, VertexId from hathor.util import EnvironmentInfo, LogDuration, Random, Reactor, calculate_min_significant_weight, not_none from hathor.wallet import BaseWallet @@ -264,6 +265,8 @@ def start(self) -> None: # Disable get transaction lock when initializing components self.tx_storage.disable_lock() + # Open scope for initialization. + self.tx_storage.set_allow_scope(TxAllowScope.VALID | TxAllowScope.PARTIAL | TxAllowScope.INVALID) # Initialize manager's components. if self._full_verification: self.tx_storage.reset_indexes() @@ -274,6 +277,7 @@ def start(self) -> None: self.tx_storage.finish_full_verification() else: self._initialize_components_new() + self.tx_storage.set_allow_scope(TxAllowScope.VALID) self.tx_storage.enable_lock() # Metric starts to capture data @@ -394,9 +398,8 @@ def _initialize_components_full_verification(self) -> None: # self.start_profiler() self.log.debug('reset all metadata') - with self.tx_storage.allow_partially_validated_context(): - for tx in self.tx_storage.get_all_transactions(): - tx.reset_metadata() + for tx in self.tx_storage.get_all_transactions(): + tx.reset_metadata() self.log.debug('load blocks and transactions') for tx in self.tx_storage._topological_sort_dfs(): @@ -431,9 +434,10 @@ def _initialize_components_full_verification(self) -> None: try: # TODO: deal with invalid tx if tx.can_validate_full(): - self.tx_storage.add_to_indexes(tx) assert tx.validate_full(skip_block_weight_verification=skip_block_weight_verification) - self.consensus_algorithm.update(tx) + self.tx_storage.add_to_indexes(tx) + with self.tx_storage.allow_only_valid_context(): + self.consensus_algorithm.update(tx) self.tx_storage.indexes.update(tx) if self.tx_storage.indexes.mempool_tips is not None: self.tx_storage.indexes.mempool_tips.update(tx) # XXX: move to indexes.update @@ -442,8 +446,7 @@ def _initialize_components_full_verification(self) -> None: self.tx_storage.save_transaction(tx, only_metadata=True) else: assert tx.validate_basic(skip_block_weight_verification=skip_block_weight_verification) - with self.tx_storage.allow_partially_validated_context(): - self.tx_storage.save_transaction(tx, only_metadata=True) + self.tx_storage.save_transaction(tx, only_metadata=True) except (InvalidNewTransaction, TxValidationError): self.log.error('unexpected error when initializing', tx=tx, exc_info=True) raise @@ -478,6 +481,8 @@ def _initialize_components_full_verification(self) -> None: # we have to have a best_block by now # assert best_block is not None + self.tx_storage.indexes._manually_initialize(self.tx_storage) + self.log.debug('done loading transactions') # Check if all checkpoints in database are ok @@ -585,7 +590,8 @@ def _verify_soft_voided_txs(self) -> None: # that already has the soft voided transactions marked for soft_voided_id in self.consensus_algorithm.soft_voided_tx_ids: try: - soft_voided_tx = self.tx_storage.get_transaction(soft_voided_id) + with self.tx_storage.allow_only_valid_context(): + soft_voided_tx = self.tx_storage.get_transaction(soft_voided_id) except TransactionDoesNotExist: # This database does not have this tx that should be soft voided # so it's fine, we will mark it as soft voided when we get it through sync @@ -654,10 +660,11 @@ def _sync_v2_resume_validations(self) -> None: for tx_hash in self.tx_storage.indexes.deps.iter(): if not self.tx_storage.transaction_exists(tx_hash): continue - tx = self.tx_storage.get_transaction(tx_hash) + with self.tx_storage.allow_partially_validated_context(): + tx = self.tx_storage.get_transaction(tx_hash) if tx.get_metadata().validation.is_final(): depended_final_txs.append(tx) - self.sync_v2_step_validations(depended_final_txs, quiet=False) + self.sync_v2_step_validations(depended_final_txs, quiet=True) self.log.debug('pending validations finished') def add_listen_address(self, addr: str) -> None: @@ -980,7 +987,7 @@ def on_new_tx(self, tx: BaseTransaction, *, conn: Optional[HathorProtocol] = Non except HathorError as e: if not fails_silently: raise InvalidNewTransaction('consensus update failed') from e - self.log.warn('on_new_tx(): consensus update failed', tx=tx.hash_hex) + self.log.warn('on_new_tx(): consensus update failed', tx=tx.hash_hex, exc_info=True) return False assert tx.validate_full(skip_block_weight_verification=True, reject_locked_reward=reject_locked_reward) @@ -1028,24 +1035,26 @@ def sync_v2_step_validations(self, txs: Iterable[BaseTransaction], *, quiet: boo for ready_tx in txs: assert ready_tx.hash is not None self.tx_storage.indexes.deps.remove_ready_for_validation(ready_tx.hash) - it_next_ready = self.tx_storage.indexes.deps.next_ready_for_validation(self.tx_storage) - for tx in map(self.tx_storage.get_transaction, it_next_ready): - assert tx.hash is not None - tx.update_initial_metadata() - try: - # XXX: `reject_locked_reward` might not apply, partial validation is only used on sync-v2 - # TODO: deal with `reject_locked_reward` on sync-v2 - assert tx.validate_full(reject_locked_reward=True) - except (AssertionError, HathorError): - # TODO - raise - else: - self.tx_storage.add_to_indexes(tx) - self.consensus_algorithm.update(tx) - self.tx_storage.indexes.update(tx) - if self.tx_storage.indexes.mempool_tips: - self.tx_storage.indexes.mempool_tips.update(tx) # XXX: move to indexes.update - self.tx_fully_validated(tx, quiet=quiet) + with self.tx_storage.allow_partially_validated_context(): + for tx in map(self.tx_storage.get_transaction, + self.tx_storage.indexes.deps.next_ready_for_validation(self.tx_storage)): + assert tx.hash is not None + tx.update_initial_metadata() + with self.tx_storage.allow_only_valid_context(): + try: + # XXX: `reject_locked_reward` might not apply, partial validation is only used on sync-v2 + # TODO: deal with `reject_locked_reward` on sync-v2 + assert tx.validate_full(reject_locked_reward=True) + except (AssertionError, HathorError): + # TODO + raise + else: + self.tx_storage.add_to_indexes(tx) + self.consensus_algorithm.update(tx) + self.tx_storage.indexes.update(tx) + if self.tx_storage.indexes.mempool_tips: + self.tx_storage.indexes.mempool_tips.update(tx) # XXX: move to indexes.update + self.tx_fully_validated(tx, quiet=quiet) def tx_fully_validated(self, tx: BaseTransaction, *, quiet: bool) -> None: """ Handle operations that need to happen once the tx becomes fully validated. diff --git a/hathor/p2p/sync_manager.py b/hathor/p2p/sync_manager.py index cc6f2b141..64db8121c 100644 --- a/hathor/p2p/sync_manager.py +++ b/hathor/p2p/sync_manager.py @@ -55,14 +55,17 @@ def is_errored(self) -> bool: """Whether the manager entered an error state""" raise NotImplementedError + @abstractmethod def is_sync_enabled(self) -> bool: """Return true if the sync is enabled.""" raise NotImplementedError + @abstractmethod def enable_sync(self) -> None: """Enable sync.""" raise NotImplementedError + @abstractmethod def disable_sync(self) -> None: """Disable sync.""" raise NotImplementedError diff --git a/hathor/simulator/fake_connection.py b/hathor/simulator/fake_connection.py index 2bb061a0d..dde2a91fd 100644 --- a/hathor/simulator/fake_connection.py +++ b/hathor/simulator/fake_connection.py @@ -41,7 +41,8 @@ def getPeerCertificate(self) -> X509: class FakeConnection: - def __init__(self, manager1: 'HathorManager', manager2: 'HathorManager', *, latency: float = 0): + def __init__(self, manager1: 'HathorManager', manager2: 'HathorManager', *, latency: float = 0, + autoreconnect: bool = False): """ :param: latency: Latency between nodes in seconds """ @@ -51,20 +52,14 @@ def __init__(self, manager1: 'HathorManager', manager2: 'HathorManager', *, late self.manager2 = manager2 self.latency = latency - self.is_connected = True - - self._proto1 = manager1.connections.server_factory.buildProtocol(HostnameAddress(b'fake', 0)) - self._proto2 = manager2.connections.client_factory.buildProtocol(HostnameAddress(b'fake', 0)) - - self.tr1 = HathorStringTransport(self._proto2.my_peer) - self.tr2 = HathorStringTransport(self._proto1.my_peer) + self.autoreconnect = autoreconnect + self.is_connected = False self._do_buffering = True self._buf1: deque[str] = deque() self._buf2: deque[str] = deque() - self._proto1.makeConnection(self.tr1) - self._proto2.makeConnection(self.tr2) + self.reconnect() @property def proto1(self): @@ -79,6 +74,35 @@ def disable_idle_timeout(self): self._proto1.disable_idle_timeout() self._proto2.disable_idle_timeout() + def is_both_synced(self) -> bool: + """Short-hand check that can be used to make "step loops" without having to guess the number of iterations.""" + from hathor.p2p.states.ready import ReadyState + conn1_aborting = self._proto1.aborting + conn2_aborting = self._proto2.aborting + if conn1_aborting or conn2_aborting: + self.log.debug('conn aborting', conn1_aborting=conn1_aborting, conn2_aborting=conn2_aborting) + return False + state1 = self._proto1.state + state2 = self._proto2.state + state1_is_ready = isinstance(state1, ReadyState) + state2_is_ready = isinstance(state2, ReadyState) + if not state1_is_ready or not state2_is_ready: + self.log.debug('peer not ready', peer1_ready=state1_is_ready, peer2_ready=state2_is_ready) + return False + assert isinstance(state1, ReadyState) # mypy can't infer this from the above + assert isinstance(state2, ReadyState) # mypy can't infer this from the above + state1_is_errored = state1.sync_manager.is_errored() + state2_is_errored = state2.sync_manager.is_errored() + if state1_is_errored or state2_is_errored: + self.log.debug('peer errored', peer1_errored=state1_is_errored, peer2_errored=state2_is_errored) + return False + state1_is_synced = state1.sync_manager.is_synced() + state2_is_synced = state2.sync_manager.is_synced() + if not state1_is_synced or not state2_is_synced: + self.log.debug('peer not synced', peer1_synced=state1_is_synced, peer2_synced=state2_is_synced) + return False + return True + def can_step(self) -> bool: """Short-hand check that can be used to make "step loops" without having to guess the number of iterations.""" from hathor.p2p.states.ready import ReadyState @@ -155,6 +179,9 @@ def run_one_step(self, debug=False, force=False): if debug: self.log.debug('[2->1] delivered', line=line2) + if self.autoreconnect and self._proto1.aborting and self._proto2.aborting: + self.reconnect() + return True def run_until_empty(self, max_steps: Optional[int] = None, debug: bool = False, force: bool = False) -> None: @@ -178,6 +205,20 @@ def disconnect(self, reason): self._proto2.connectionLost(reason) self.is_connected = False + def reconnect(self) -> None: + from twisted.python.failure import Failure + if self.is_connected: + self.disconnect(Failure(Exception('forced reconnection'))) + self._buf1.clear() + self._buf2.clear() + self._proto1 = self.manager1.connections.server_factory.buildProtocol(HostnameAddress(b'fake', 0)) + self._proto2 = self.manager2.connections.client_factory.buildProtocol(HostnameAddress(b'fake', 0)) + self.tr1 = HathorStringTransport(self._proto2.my_peer) + self.tr2 = HathorStringTransport(self._proto1.my_peer) + self._proto1.makeConnection(self.tr1) + self._proto2.makeConnection(self.tr2) + self.is_connected = True + def is_empty(self): if self._do_buffering and (self._buf1 or self._buf2): return False diff --git a/hathor/simulator/trigger.py b/hathor/simulator/trigger.py index 2df07a457..2d54831f5 100644 --- a/hathor/simulator/trigger.py +++ b/hathor/simulator/trigger.py @@ -13,9 +13,10 @@ # limitations under the License. from abc import ABC, abstractmethod -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Callable if TYPE_CHECKING: + from hathor.simulator.fake_connection import FakeConnection from hathor.simulator.miner import AbstractMiner from hathor.simulator.tx_generator import RandomTransactionGenerator from hathor.wallet import BaseWallet @@ -71,3 +72,33 @@ def reset(self) -> None: def should_stop(self) -> bool: diff = self.tx_generator.transactions_found - self.initial_counter return diff >= self.quantity + + +class StopWhenTrue(Trigger): + """Stop the simulation when a function returns true.""" + def __init__(self, fn: Callable[[], bool]) -> None: + self.fn = fn + + def should_stop(self) -> bool: + return self.fn() + + +class StopWhenSynced(Trigger): + """Stop the simulation when both agents runnning on a connection report that they have synced.""" + def __init__(self, connection: 'FakeConnection') -> None: + self.connection = connection + + def should_stop(self) -> bool: + return self.connection.is_both_synced() + + +class All(Trigger): + """Aggregator that returns True when all sub-triggers return True. + + XXX: note that not all sub-triggers will be called, this will short-circuit, in order, if one sub-trigger returns + False, which follows the same behavior of builtins.all""" + def __init__(self, sub_triggers: list[Trigger]) -> None: + self._sub_triggers = sub_triggers + + def should_stop(self) -> bool: + return all(trigger.should_stop() for trigger in self._sub_triggers) diff --git a/hathor/transaction/base_transaction.py b/hathor/transaction/base_transaction.py index f87f1193e..10c4dfd0c 100644 --- a/hathor/transaction/base_transaction.py +++ b/hathor/transaction/base_transaction.py @@ -534,6 +534,7 @@ def validate_full(self, skip_block_weight_verification: bool = False, sync_check from hathor.transaction.transaction_metadata import ValidationState meta = self.get_metadata() + # skip full validation when it is a checkpoint if meta.validation.is_checkpoint(): self.set_validation(ValidationState.CHECKPOINT_FULL) diff --git a/hathor/transaction/block.py b/hathor/transaction/block.py index 51ffacc55..d5b9950ac 100644 --- a/hathor/transaction/block.py +++ b/hathor/transaction/block.py @@ -291,7 +291,8 @@ def has_basic_block_parent(self) -> bool: if not self.storage.transaction_exists(parent_block_hash): return False metadata = self.storage.get_metadata(parent_block_hash) - assert metadata is not None + if metadata is None: + return False return metadata.validation.is_at_least_basic() def verify_basic(self, skip_block_weight_verification: bool = False) -> None: @@ -314,7 +315,7 @@ def verify_checkpoint(self, checkpoints: list[Checkpoint]) -> None: raise CheckpointError(f'Invalid new block {self.hash_hex}: checkpoint hash does not match') else: # TODO: check whether self is a parent of any checkpoint-valid block, this is left for a future PR - raise NotImplementedError + pass def verify_weight(self) -> None: """Validate minimum block difficulty.""" diff --git a/hathor/transaction/storage/rocksdb_storage.py b/hathor/transaction/storage/rocksdb_storage.py index e2bbd7b84..ec74e6227 100644 --- a/hathor/transaction/storage/rocksdb_storage.py +++ b/hathor/transaction/storage/rocksdb_storage.py @@ -49,6 +49,7 @@ def __init__(self, rocksdb_storage: RocksDBStorage, indexes: Optional[IndexesMan self._cf_attr = rocksdb_storage.get_or_create_column_family(_CF_NAME_ATTR) self._cf_migrations = rocksdb_storage.get_or_create_column_family(_CF_NAME_MIGRATIONS) + self._rocksdb_storage = rocksdb_storage self._db = rocksdb_storage.get_db() super().__init__(indexes=indexes) diff --git a/hathor/transaction/storage/transaction_storage.py b/hathor/transaction/storage/transaction_storage.py index 485483732..ea8534c6f 100644 --- a/hathor/transaction/storage/transaction_storage.py +++ b/hathor/transaction/storage/transaction_storage.py @@ -590,8 +590,12 @@ def get_metadata(self, hash_bytes: bytes) -> Optional[TransactionMetadata]: def get_all_transactions(self) -> Iterator[BaseTransaction]: """Return all vertices (transactions and blocks) within the allowed scope. """ + # It is necessary to retain a copy of the current scope because this method will yield + # and the scope may undergo changes. By doing so, we ensure the usage of the scope at the + # time of iterator creation. + scope = self.get_allow_scope() for tx in self._get_all_transactions(): - if self.get_allow_scope().is_allowed(tx): + if scope.is_allowed(tx): yield tx @abstractmethod diff --git a/hathor/util.py b/hathor/util.py index 1d57fe16a..248ee465e 100644 --- a/hathor/util.py +++ b/hathor/util.py @@ -363,6 +363,11 @@ def collect_n(it: Iterator[_T], n: int) -> tuple[list[_T], bool]: >>> collect_n(iter(range(10)), 8) ([0, 1, 2, 3, 4, 5, 6, 7], True) + + # This also works for checking (albeit destructively, because it consumes from the itreator), if it is empty + + >>> collect_n(iter(range(10)), 0) + ([], True) """ if n < 0: raise ValueError(f'n must be non-negative, got {n}') diff --git a/tests/p2p/test_sync_rate_limiter.py b/tests/p2p/test_sync_rate_limiter.py index 724448187..2bb53433c 100644 --- a/tests/p2p/test_sync_rate_limiter.py +++ b/tests/p2p/test_sync_rate_limiter.py @@ -8,7 +8,9 @@ from tests.simulation.base import SimulatorTestCase -class BaseRandomSimulatorTestCase(SimulatorTestCase): +class SyncV1RandomSimulatorTestCase(unittest.SyncV1Params, SimulatorTestCase): + __test__ = True + def test_sync_rate_limiter(self): manager1 = self.create_peer() @@ -177,16 +179,3 @@ def test_sync_rate_limiter_delayed_calls_stop(self): # All residual tasks should have been canceled for call_later in sync1._send_tips_call_later: self.assertEqual(call_later.active(), False) - - -class SyncV1RandomSimulatorTestCase(unittest.SyncV1Params, BaseRandomSimulatorTestCase): - __test__ = True - - -class SyncV2RandomSimulatorTestCase(unittest.SyncV2Params, BaseRandomSimulatorTestCase): - __test__ = True - - -# sync-bridge should behave like sync-v2 -class SyncBridgeRandomSimulatorTestCase(unittest.SyncBridgeParams, SyncV2RandomSimulatorTestCase): - __test__ = True diff --git a/tests/resources/p2p/test_healthcheck.py b/tests/resources/p2p/test_healthcheck.py index 521612897..90bf1e260 100644 --- a/tests/resources/p2p/test_healthcheck.py +++ b/tests/resources/p2p/test_healthcheck.py @@ -72,7 +72,7 @@ def test_get_ready(self): add_new_blocks(self.manager, 5) # This will make sure the peers are synced - while not self.conn1.is_empty(): + for _ in range(600): self.conn1.run_one_step(debug=True) self.clock.advance(0.1) diff --git a/tests/resources/transaction/test_tx.py b/tests/resources/transaction/test_tx.py index d5cc377e8..c6a2e72d9 100644 --- a/tests/resources/transaction/test_tx.py +++ b/tests/resources/transaction/test_tx.py @@ -63,10 +63,10 @@ def test_get_one(self): @inlineCallbacks def test_get_one_known_tx(self): + # Tx tesnet 0033784bc8443ba851fd88d81c6f06774ae529f25c1fa8f026884ad0a0e98011 # We had a bug with this endpoint in this tx because the token_data from inputs # was being copied from the output - # First add needed data on storage tx_hex = ('0001020306001c382847d8440d05da95420bee2ebeb32bc437f82a9ae47b0745c8a29a7b0d007231eee3cb6160d95172' 'a409d634d0866eafc8775f5729fff6a61e7850aba500f4dd53f84f1f0091125250b044e49023fbbd0f74f6093cdd2226' @@ -175,10 +175,10 @@ def test_get_one_known_tx(self): @inlineCallbacks def test_get_one_known_tx_with_authority(self): + # Tx tesnet 00005f234469407614bf0abedec8f722bb5e534949ad37650f6077c899741ed7 # We had a bug with this endpoint in this tx because the token_data from inputs # was not considering authority mask - # First add needed data on storage tx_hex = ('0001010202000023b318c91dcfd4b967b205dc938f9f5e2fd5114256caacfb8f6dd13db330000023b318c91dcfd4b967b20' '5dc938f9f5e2fd5114256caacfb8f6dd13db33000006946304402200f7de9e866fbc2d600d6a46eb620fa2d72c9bf032250' @@ -492,6 +492,34 @@ def test_negative_timestamp(self): data = response.json_value() self.assertFalse(data['success']) + @inlineCallbacks + def test_partially_validated_not_found(self): + # First add needed data on storage + tx_hex = ('0001020306001c382847d8440d05da95420bee2ebeb32bc437f82a9ae47b0745c8a29a7b0d007231eee3cb6160d95172' + 'a409d634d0866eafc8775f5729fff6a61e7850aba500f4dd53f84f1f0091125250b044e49023fbbd0f74f6093cdd2226' + 'fdff3e09a101006946304402205dcbb7956d95b0e123954160d369e64bca7b176e1eb136e2dae5b95e46741509022072' + '6f99a363e8a4d79963492f4359c7589667eb0f45af7effe0dd4e51fbb5543d210288c10b8b1186b8c5f6bc05855590a6' + '522af35f269ddfdb8df39426a01ca9d2dd003d3c40fb04737e1a2a848cfd2592490a71cd0248b9e7d6a626f45dec8697' + '5b00006a4730450221008741dff52d97ce5f084518e1f4cac6bd98abdc88b98e6b18d6a8666fadac05f0022068951306' + '19eaf5433526e4803187c0aa08a8b1c46d9dc4ffaa89406fb2d4940c2102dd29eaadbb21a4de015d1812d5c0ec63cb8e' + 'e921e28580b6a9f8ff08db168c0e0096fb9b1a9e5fc34a9750bcccc746564c2b73f6defa381e130d9a4ea38cb1d80000' + '6a473045022100cb6b8abfb958d4029b0e6a89c828b65357456d20b8e6a8e42ad6d9a780fcddc4022035a8a46248b9c5' + '20b0205aa99ec5c390b40ae97a0b3ccc6e68e835ce5bde972a210306f7fdc08703152348484768fc7b85af900860a3d6' + 'fa85343524150d0370770b0000000100001976a914b9987a3866a7c26225c57a62b14e901377e2f9e288ac0000000200' + '001976a914b9987a3866a7c26225c57a62b14e901377e2f9e288ac0000000301001f0460b5a2b06f76a914b9987a3866' + 'a7c26225c57a62b14e901377e2f9e288ac0000006001001976a914b9987a3866a7c26225c57a62b14e901377e2f9e288' + 'ac0000000402001976a914b9987a3866a7c26225c57a62b14e901377e2f9e288ac000002b602001976a91479ae26cf2f' + '2dc703120a77192fc16eda9ed22e1b88ac40200000218def416095b08602003d3c40fb04737e1a2a848cfd2592490a71cd' + '0248b9e7d6a626f45dec86975b00f4dd53f84f1f0091125250b044e49023fbbd0f74f6093cdd2226fdff3e09a1000002be') + tx = Transaction.create_from_struct(bytes.fromhex(tx_hex), self.manager.tx_storage) + tx.set_validation(ValidationState.BASIC) + with self.manager.tx_storage.allow_partially_validated_context(): + self.manager.tx_storage.save_transaction(tx) + + response = yield self.web.get("transaction", {b'id': bytes(tx.hash_hex, 'utf-8')}) + data = response.json_value() + self.assertFalse(data['success']) + class SyncV1TransactionTest(unittest.SyncV1Params, BaseTransactionTest): __test__ = True diff --git a/tests/simulation/test_simulator.py b/tests/simulation/test_simulator.py index c8002b7f2..d39cf81e0 100644 --- a/tests/simulation/test_simulator.py +++ b/tests/simulation/test_simulator.py @@ -1,6 +1,7 @@ import pytest from hathor.simulator import FakeConnection +from hathor.simulator.trigger import All as AllTriggers, StopWhenSynced from tests import unittest from tests.simulation.base import SimulatorTestCase @@ -56,7 +57,7 @@ def test_two_nodes(self): gen_tx1.stop() gen_tx2.stop() - self.simulator.run(5 * 60) + self.assertTrue(self.simulator.run(600, trigger=StopWhenSynced(conn12))) self.assertTrue(conn12.is_connected) self.assertTipsEqual(manager1, manager2) @@ -64,12 +65,16 @@ def test_two_nodes(self): def test_many_miners_since_beginning(self): nodes = [] miners = [] + stop_triggers = [] for hashpower in [10e6, 5e6, 1e6, 1e6, 1e6]: manager = self.create_peer() for node in nodes: - conn = FakeConnection(manager, node, latency=0.085) + # XXX: using autoreconnect is more realistic, but ideally it shouldn't be needed, but the test is + # failing without it for some reason + conn = FakeConnection(manager, node, latency=0.085, autoreconnect=True) self.simulator.add_connection(conn) + stop_triggers.append(StopWhenSynced(conn)) nodes.append(manager) @@ -82,7 +87,7 @@ def test_many_miners_since_beginning(self): for miner in miners: miner.stop() - self.simulator.run(15) + self.assertTrue(self.simulator.run(3600, trigger=AllTriggers(stop_triggers))) for node in nodes[1:]: self.assertTipsEqual(nodes[0], node) @@ -92,6 +97,7 @@ def test_new_syncing_peer(self): nodes = [] miners = [] tx_generators = [] + stop_triggers = [] manager = self.create_peer() nodes.append(manager) @@ -122,8 +128,9 @@ def test_new_syncing_peer(self): self.log.debug('adding late node') late_manager = self.create_peer() for node in nodes: - conn = FakeConnection(late_manager, node, latency=0.300) + conn = FakeConnection(late_manager, node, latency=0.300, autoreconnect=True) self.simulator.add_connection(conn) + stop_triggers.append(StopWhenSynced(conn)) self.simulator.run(600) @@ -132,7 +139,7 @@ def test_new_syncing_peer(self): for miner in miners: miner.stop() - self.simulator.run_until_complete(600) + self.assertTrue(self.simulator.run(3600, trigger=AllTriggers(stop_triggers))) for idx, node in enumerate(nodes): self.log.debug(f'checking node {idx}') diff --git a/tests/tx/test_reward_lock.py b/tests/tx/test_reward_lock.py index 374179901..e705d608f 100644 --- a/tests/tx/test_reward_lock.py +++ b/tests/tx/test_reward_lock.py @@ -3,6 +3,7 @@ from hathor.conf import HathorSettings from hathor.crypto.util import get_address_from_public_key from hathor.transaction import Transaction, TxInput, TxOutput +from hathor.transaction.exceptions import RewardLocked from hathor.transaction.scripts import P2PKH from hathor.transaction.storage import TransactionMemoryStorage from hathor.wallet import Wallet @@ -32,6 +33,16 @@ def setUp(self): blocks = add_blocks_unlock_reward(self.manager) self.last_block = blocks[-1] + def _add_reward_block(self): + reward_block = self.manager.generate_mining_block( + address=get_address_from_public_key(self.genesis_public_key) + ) + reward_block.resolve() + self.assertTrue(self.manager.propagate_tx(reward_block)) + # XXX: calculate unlock height AFTER adding the block so the height is correctly calculated + unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 + return reward_block, unlock_height + def _spend_reward_tx(self, manager, reward_block): value = reward_block.outputs[0].value address = get_address_from_public_key(self.genesis_public_key) @@ -54,13 +65,8 @@ def _spend_reward_tx(self, manager, reward_block): return tx def test_classic_reward_lock(self): - from hathor.transaction.exceptions import RewardLocked - # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # reward cannot be spent while not enough blocks are added for _ in range(settings.REWARD_SPEND_MIN_BLOCKS): @@ -76,13 +82,8 @@ def test_classic_reward_lock(self): self.assertTrue(self.manager.propagate_tx(tx, fails_silently=False)) def test_block_with_not_enough_height(self): - from hathor.transaction.exceptions import RewardLocked - # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add one less block than needed add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS - 1, advance_clock=1) @@ -100,10 +101,7 @@ def test_block_with_not_enough_height(self): def test_block_with_enough_height(self): # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add just enough blocks add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS, advance_clock=1) @@ -118,13 +116,9 @@ def test_block_with_enough_height(self): def test_mempool_tx_with_not_enough_height(self): from hathor.exception import InvalidNewTransaction - from hathor.transaction.exceptions import RewardLocked # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add one less block than needed add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS - 1, advance_clock=1) @@ -139,10 +133,7 @@ def test_mempool_tx_with_not_enough_height(self): def test_mempool_tx_with_enough_height(self): # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add just enough blocks add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS, advance_clock=1) @@ -153,13 +144,8 @@ def test_mempool_tx_with_enough_height(self): self.assertTrue(self.manager.on_new_tx(tx, fails_silently=False)) def test_mempool_tx_invalid_after_reorg(self): - from hathor.transaction.exceptions import RewardLocked - # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add just enough blocks blocks = add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS, advance_clock=1) @@ -191,13 +177,8 @@ def test_mempool_tx_invalid_after_reorg(self): @pytest.mark.xfail(reason='this is no longer the case, timestamp will not matter', strict=True) def test_classic_reward_lock_timestamp_expected_to_fail(self): - from hathor.transaction.exceptions import RewardLocked - # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # we add enough blocks that this output could be spent based on block height blocks = add_blocks_unlock_reward(self.manager)