diff --git a/hathor/builder/builder.py b/hathor/builder/builder.py index 815ed352f..d08550984 100644 --- a/hathor/builder/builder.py +++ b/hathor/builder/builder.py @@ -16,6 +16,7 @@ from typing import Any, Callable, NamedTuple, Optional, TypeAlias from structlog import get_logger +from typing_extensions import assert_never from hathor.checkpoint import Checkpoint from hathor.conf.get_settings import get_global_settings @@ -25,9 +26,11 @@ from hathor.event import EventManager from hathor.event.storage import EventMemoryStorage, EventRocksDBStorage, EventStorage from hathor.event.websocket import EventWebsocketFactory +from hathor.execution_manager import ExecutionManager from hathor.feature_activation.bit_signaling_service import BitSignalingService from hathor.feature_activation.feature import Feature from hathor.feature_activation.feature_service import FeatureService +from hathor.feature_activation.storage.feature_activation_storage import FeatureActivationStorage from hathor.indexes import IndexesManager, MemoryIndexesManager, RocksDBIndexesManager from hathor.manager import HathorManager from hathor.mining.cpu_mining_service import CpuMiningService @@ -44,7 +47,8 @@ TransactionStorage, ) from hathor.util import Random, get_environment_info, not_none -from hathor.verification.verification_service import VerificationService, VertexVerifiers +from hathor.verification.verification_service import VerificationService +from hathor.verification.vertex_verifiers import VertexVerifiers from hathor.wallet import BaseWallet, Wallet logger = get_logger() @@ -67,6 +71,7 @@ class BuildArtifacts(NamedTuple): consensus: ConsensusAlgorithm tx_storage: TransactionStorage feature_service: FeatureService + bit_signaling_service: BitSignalingService indexes: Optional[IndexesManager] wallet: Optional[BaseWallet] rocksdb_storage: Optional[RocksDBStorage] @@ -150,6 +155,8 @@ def __init__(self) -> None: self._soft_voided_tx_ids: Optional[set[bytes]] = None + self._execution_manager: ExecutionManager | None = None + def build(self) -> BuildArtifacts: if self.artifacts is not None: raise ValueError('cannot call build twice') @@ -163,8 +170,9 @@ def build(self) -> BuildArtifacts: peer_id = self._get_peer_id() + execution_manager = self._get_or_create_execution_manager() soft_voided_tx_ids = self._get_soft_voided_tx_ids() - consensus_algorithm = ConsensusAlgorithm(soft_voided_tx_ids, pubsub) + consensus_algorithm = ConsensusAlgorithm(soft_voided_tx_ids, pubsub, execution_manager=execution_manager) p2p_manager = self._get_p2p_manager() @@ -215,6 +223,7 @@ def build(self) -> BuildArtifacts: bit_signaling_service=bit_signaling_service, verification_service=verification_service, cpu_mining_service=cpu_mining_service, + execution_manager=execution_manager, **kwargs ) @@ -239,6 +248,7 @@ def build(self) -> BuildArtifacts: rocksdb_storage=self._rocksdb_storage, stratum_factory=stratum_factory, feature_service=feature_service, + bit_signaling_service=bit_signaling_service ) return self.artifacts @@ -306,6 +316,13 @@ def _get_peer_id(self) -> PeerId: return self._peer_id raise ValueError('peer_id not set') + def _get_or_create_execution_manager(self) -> ExecutionManager: + if self._execution_manager is None: + reactor = self._get_reactor() + self._execution_manager = ExecutionManager(reactor) + + return self._execution_manager + def _get_or_create_pubsub(self) -> PubSubManager: if self._pubsub is None: self._pubsub = PubSubManager(self._get_reactor()) @@ -438,7 +455,8 @@ def _get_or_create_event_manager(self) -> EventManager: reactor=reactor, pubsub=self._get_or_create_pubsub(), event_storage=storage, - event_ws_factory=factory + event_ws_factory=factory, + execution_manager=self._get_or_create_execution_manager() ) return self._event_manager @@ -460,12 +478,14 @@ def _get_or_create_bit_signaling_service(self) -> BitSignalingService: settings = self._get_or_create_settings() tx_storage = self._get_or_create_tx_storage() feature_service = self._get_or_create_feature_service() + feature_storage = self._get_or_create_feature_storage() self._bit_signaling_service = BitSignalingService( feature_settings=settings.FEATURE_ACTIVATION, feature_service=feature_service, tx_storage=tx_storage, support_features=self._support_features, not_support_features=self._not_support_features, + feature_storage=feature_storage, ) return self._bit_signaling_service @@ -477,6 +497,15 @@ def _get_or_create_verification_service(self) -> VerificationService: return self._verification_service + def _get_or_create_feature_storage(self) -> FeatureActivationStorage | None: + match self._storage_type: + case StorageType.MEMORY: return None + case StorageType.ROCKSDB: return FeatureActivationStorage( + settings=self._get_or_create_settings(), + rocksdb_storage=self._get_or_create_rocksdb_storage() + ) + case _: assert_never(self._storage_type) + def _get_or_create_vertex_verifiers(self) -> VertexVerifiers: if self._vertex_verifiers is None: settings = self._get_or_create_settings() diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index 8c1f41fff..8418b8dc0 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -13,7 +13,6 @@ # limitations under the License. import getpass -import json import os import platform import sys @@ -27,8 +26,10 @@ from hathor.daa import DifficultyAdjustmentAlgorithm from hathor.event import EventManager from hathor.exception import BuilderError +from hathor.execution_manager import ExecutionManager from hathor.feature_activation.bit_signaling_service import BitSignalingService from hathor.feature_activation.feature_service import FeatureService +from hathor.feature_activation.storage.feature_activation_storage import FeatureActivationStorage from hathor.indexes import IndexesManager, MemoryIndexesManager, RocksDBIndexesManager from hathor.manager import HathorManager from hathor.mining.cpu_mining_service import CpuMiningService @@ -94,8 +95,7 @@ def create_manager(self, reactor: Reactor) -> HathorManager: self.log = logger.new() self.reactor = reactor - peer_id = self.create_peer_id() - + peer_id = PeerId.create_from_json_path(self._args.peer) if self._args.peer else PeerId() python = f'{platform.python_version()}-{platform.python_implementation()}' self.log.info( @@ -119,6 +119,7 @@ def create_manager(self, reactor: Reactor) -> HathorManager: tx_storage: TransactionStorage event_storage: EventStorage indexes: IndexesManager + feature_storage: FeatureActivationStorage | None = None self.rocksdb_storage: Optional[RocksDBStorage] = None self.event_ws_factory: Optional[EventWebsocketFactory] = None @@ -151,6 +152,7 @@ def create_manager(self, reactor: Reactor) -> HathorManager: kwargs['indexes'] = indexes tx_storage = TransactionRocksDBStorage(self.rocksdb_storage, **kwargs) event_storage = EventRocksDBStorage(self.rocksdb_storage) + feature_storage = FeatureActivationStorage(settings=settings, rocksdb_storage=self.rocksdb_storage) self.log.info('with storage', storage_class=type(tx_storage).__name__, path=self._args.data) if self._args.cache: @@ -212,11 +214,14 @@ def create_manager(self, reactor: Reactor) -> HathorManager: event_storage=event_storage ) + execution_manager = ExecutionManager(reactor) + event_manager = EventManager( event_storage=event_storage, event_ws_factory=self.event_ws_factory, pubsub=pubsub, - reactor=reactor + reactor=reactor, + execution_manager=execution_manager, ) if self._args.wallet_index and tx_storage.indexes is not None: @@ -236,7 +241,11 @@ def create_manager(self, reactor: Reactor) -> HathorManager: full_verification = True soft_voided_tx_ids = set(settings.SOFT_VOIDED_TX_IDS) - consensus_algorithm = ConsensusAlgorithm(soft_voided_tx_ids, pubsub=pubsub) + consensus_algorithm = ConsensusAlgorithm( + soft_voided_tx_ids, + pubsub=pubsub, + execution_manager=execution_manager + ) if self._args.x_enable_event_queue: self.log.info('--x-enable-event-queue flag provided. ' @@ -252,7 +261,8 @@ def create_manager(self, reactor: Reactor) -> HathorManager: feature_service=self.feature_service, tx_storage=tx_storage, support_features=self._args.signal_support, - not_support_features=self._args.signal_not_support + not_support_features=self._args.signal_not_support, + feature_storage=feature_storage, ) test_mode = TestMode.DISABLED @@ -308,7 +318,8 @@ def create_manager(self, reactor: Reactor) -> HathorManager: feature_service=self.feature_service, bit_signaling_service=bit_signaling_service, verification_service=verification_service, - cpu_mining_service=cpu_mining_service + cpu_mining_service=cpu_mining_service, + execution_manager=execution_manager, ) if self._args.x_ipython_kernel: @@ -354,7 +365,7 @@ def create_manager(self, reactor: Reactor) -> HathorManager: self.log.warn('--memory-indexes is implied for memory storage or JSON storage') for description in self._args.listen: - p2p_manager.add_listen_address(description) + p2p_manager.add_listen_address_description(description) if self._args.peer_id_blacklist: self.log.info('with peer id blacklist', blacklist=self._args.peer_id_blacklist) @@ -384,14 +395,6 @@ def get_hostname(self) -> Optional[str]: print('Hostname discovered and set to {}'.format(hostname)) return hostname - def create_peer_id(self) -> PeerId: - if not self._args.peer: - peer_id = PeerId() - else: - data = json.load(open(self._args.peer, 'r')) - peer_id = PeerId.create_from_json(data) - return peer_id - def create_wallet(self) -> BaseWallet: if self._args.wallet == 'hd': kwargs: dict[str, Any] = { diff --git a/hathor/builder/sysctl_builder.py b/hathor/builder/sysctl_builder.py index e34cd4879..0b2131ad8 100644 --- a/hathor/builder/sysctl_builder.py +++ b/hathor/builder/sysctl_builder.py @@ -13,7 +13,13 @@ # limitations under the License. from hathor.builder import BuildArtifacts -from hathor.sysctl import ConnectionsManagerSysctl, HathorManagerSysctl, Sysctl, WebsocketManagerSysctl +from hathor.sysctl import ( + ConnectionsManagerSysctl, + FeatureActivationSysctl, + HathorManagerSysctl, + Sysctl, + WebsocketManagerSysctl, +) class SysctlBuilder: @@ -25,7 +31,11 @@ def __init__(self, artifacts: BuildArtifacts) -> None: def build(self) -> Sysctl: """Build the sysctl tree.""" root = Sysctl() - root.put_child('core', HathorManagerSysctl(self.artifacts.manager)) + + core = HathorManagerSysctl(self.artifacts.manager) + core.put_child('features', FeatureActivationSysctl(self.artifacts.bit_signaling_service)) + + root.put_child('core', core) root.put_child('p2p', ConnectionsManagerSysctl(self.artifacts.p2p_manager)) ws_factory = self.artifacts.manager.metrics.websocket_factory diff --git a/hathor/cli/main.py b/hathor/cli/main.py index a9c287cbf..a1ab960d2 100644 --- a/hathor/cli/main.py +++ b/hathor/cli/main.py @@ -49,6 +49,7 @@ def __init__(self) -> None: quick_test, replay_logs, reset_event_queue, + reset_feature_settings, run_node, shell, stratum_mining, @@ -81,6 +82,8 @@ def __init__(self) -> None: self.add_cmd('oracle', 'oracle-encode-data', oracle_encode_data, 'Encode data and sign it with a private key') self.add_cmd('events', 'reset-event-queue', reset_event_queue, 'Delete all events and related data from the ' 'database') + self.add_cmd('features', 'reset-feature-settings', reset_feature_settings, 'Delete existing Feature ' + 'Activation settings from the database') self.add_cmd('dev', 'shell', shell, 'Run a Python shell') self.add_cmd('dev', 'quick_test', quick_test, 'Similar to run_node but will quit after receiving a tx') self.add_cmd('dev', 'generate_nginx_config', nginx_config, 'Generate nginx config from OpenAPI json') diff --git a/hathor/cli/mining.py b/hathor/cli/mining.py index 491aff1e4..38c08adde 100644 --- a/hathor/cli/mining.py +++ b/hathor/cli/mining.py @@ -139,7 +139,8 @@ def execute(args: Namespace) -> None: from hathor.conf.get_settings import get_global_settings from hathor.daa import DifficultyAdjustmentAlgorithm - from hathor.verification.verification_service import VerificationService, VertexVerifiers + from hathor.verification.verification_service import VerificationService + from hathor.verification.vertex_verifiers import VertexVerifiers settings = get_global_settings() daa = DifficultyAdjustmentAlgorithm(settings=settings) verifiers = VertexVerifiers.create_defaults(settings=settings, daa=daa, feature_service=Mock()) diff --git a/hathor/cli/openapi_files/openapi_base.json b/hathor/cli/openapi_files/openapi_base.json index a3401d9a1..ea10d9442 100644 --- a/hathor/cli/openapi_files/openapi_base.json +++ b/hathor/cli/openapi_files/openapi_base.json @@ -7,7 +7,7 @@ ], "info": { "title": "Hathor API", - "version": "0.59.0" + "version": "0.60.0" }, "consumes": [ "application/json" diff --git a/hathor/cli/reset_feature_settings.py b/hathor/cli/reset_feature_settings.py new file mode 100644 index 000000000..a4c8ea9e0 --- /dev/null +++ b/hathor/cli/reset_feature_settings.py @@ -0,0 +1,49 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import ArgumentParser, Namespace + +from structlog import get_logger + +logger = get_logger() + + +def create_parser() -> ArgumentParser: + from hathor.cli.util import create_parser + + parser = create_parser() + parser.add_argument('--data', help='Data directory') + + return parser + + +def execute(args: Namespace) -> None: + from hathor.conf.get_settings import get_global_settings + from hathor.feature_activation.storage.feature_activation_storage import FeatureActivationStorage + from hathor.storage import RocksDBStorage + + assert args.data is not None, '--data is required' + + rocksdb_storage = RocksDBStorage(path=args.data) + feature_storage = FeatureActivationStorage(settings=get_global_settings(), rocksdb_storage=rocksdb_storage) + + logger.info('removing feature activation settings...') + feature_storage.reset_settings() + logger.info('reset complete') + + +def main(): + parser = create_parser() + args = parser.parse_args() + execute(args) diff --git a/hathor/cli/run_node.py b/hathor/cli/run_node.py index 89f4e37de..15b610099 100644 --- a/hathor/cli/run_node.py +++ b/hathor/cli/run_node.py @@ -221,7 +221,8 @@ def prepare(self, *, register_resources: bool = True) -> None: wallet=self.manager.wallet, rocksdb_storage=getattr(builder, 'rocksdb_storage', None), stratum_factory=self.manager.stratum_factory, - feature_service=self.manager._feature_service + feature_service=self.manager._feature_service, + bit_signaling_service=self.manager._bit_signaling_service, ) def start_sentry_if_possible(self) -> None: @@ -264,9 +265,8 @@ def register_signal_handlers(self) -> None: def signal_usr1_handler(self, sig: int, frame: Any) -> None: """Called when USR1 signal is received.""" try: - self.log.warn('USR1 received. Killing all connections...') - if self.manager and self.manager.connections: - self.manager.connections.disconnect_all_peers(force=True) + self.log.warn('USR1 received.') + self.manager.connections.reload_entrypoints_and_connections() except Exception: # see: https://docs.python.org/3/library/signal.html#note-on-signal-handlers-and-exceptions self.log.error('prevented exception from escaping the signal handler', exc_info=True) diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index 62718bf2a..682279f6c 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -423,6 +423,10 @@ def GENESIS_TX2_TIMESTAMP(self) -> int: OLD_MAX_MERKLE_PATH_LENGTH: int = 12 NEW_MAX_MERKLE_PATH_LENGTH: int = 20 + # Maximum number of tx tips to accept in the initial phase of the mempool sync 1000 is arbitrary, but it should be + # more than enough for the forseeable future + MAX_MEMPOOL_RECEIVING_TIPS: int = 1000 + # Used to enable nano contracts. # # This should NEVER be enabled for mainnet and testnet, since both networks will diff --git a/hathor/consensus/block_consensus.py b/hathor/consensus/block_consensus.py index 515b96a07..644a53238 100644 --- a/hathor/consensus/block_consensus.py +++ b/hathor/consensus/block_consensus.py @@ -13,7 +13,7 @@ # limitations under the License. from itertools import chain -from typing import TYPE_CHECKING, Iterable, Optional, cast +from typing import TYPE_CHECKING, Any, Iterable, Optional, cast from structlog import get_logger @@ -39,7 +39,7 @@ def __init__(self, context: 'ConsensusAlgorithmContext') -> None: self.context = context @classproperty - def log(cls): + def log(cls) -> Any: """ This is a workaround because of a bug on structlog (or abc). See: https://github.com/hynek/structlog/issues/229 diff --git a/hathor/consensus/consensus.py b/hathor/consensus/consensus.py index 34167d973..0317c2fab 100644 --- a/hathor/consensus/consensus.py +++ b/hathor/consensus/consensus.py @@ -18,6 +18,7 @@ from hathor.consensus.block_consensus import BlockConsensusAlgorithmFactory from hathor.consensus.context import ConsensusAlgorithmContext from hathor.consensus.transaction_consensus import TransactionConsensusAlgorithmFactory +from hathor.execution_manager import ExecutionManager from hathor.profiler import get_cpu_profiler from hathor.pubsub import HathorEvents, PubSubManager from hathor.transaction import BaseTransaction @@ -55,13 +56,20 @@ class ConsensusAlgorithm: b0 will not be propagated to the voided_by of b1, b2, and b3. """ - def __init__(self, soft_voided_tx_ids: set[bytes], pubsub: PubSubManager) -> None: + def __init__( + self, + soft_voided_tx_ids: set[bytes], + pubsub: PubSubManager, + *, + execution_manager: ExecutionManager + ) -> None: self._settings = get_global_settings() self.log = logger.new() self._pubsub = pubsub self.soft_voided_tx_ids = frozenset(soft_voided_tx_ids) self.block_algorithm_factory = BlockConsensusAlgorithmFactory() self.transaction_algorithm_factory = TransactionConsensusAlgorithmFactory() + self._execution_manager = execution_manager def create_context(self) -> ConsensusAlgorithmContext: """Handy method to create a context that can be used to access block and transaction algorithms.""" @@ -75,11 +83,11 @@ def update(self, base: BaseTransaction) -> None: assert meta.validation.is_valid() try: self._unsafe_update(base) - except Exception: + except BaseException: meta.add_voided_by(self._settings.CONSENSUS_FAIL_ID) assert base.storage is not None base.storage.save_transaction(base, only_metadata=True) - raise + self._execution_manager.crash_and_exit(reason=f'Consensus update failed for tx {base.hash_hex}') def _unsafe_update(self, base: BaseTransaction) -> None: """Run a consensus update with its own context, indexes will be updated accordingly.""" diff --git a/hathor/consensus/transaction_consensus.py b/hathor/consensus/transaction_consensus.py index 17a32202d..286a2534d 100644 --- a/hathor/consensus/transaction_consensus.py +++ b/hathor/consensus/transaction_consensus.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Iterable, cast +from typing import TYPE_CHECKING, Any, Iterable, cast from structlog import get_logger @@ -38,7 +38,7 @@ def __init__(self, context: 'ConsensusAlgorithmContext') -> None: self.context = context @classproperty - def log(cls): + def log(cls) -> Any: """ This is a workaround because of a bug on structlog (or abc). See: https://github.com/hynek/structlog/issues/229 diff --git a/hathor/daa.py b/hathor/daa.py index 680ef4dfc..ed4655f65 100644 --- a/hathor/daa.py +++ b/hathor/daa.py @@ -27,10 +27,12 @@ from hathor.conf.settings import HathorSettings from hathor.profiler import get_cpu_profiler +from hathor.types import VertexId from hathor.util import iwindows, not_none if TYPE_CHECKING: from hathor.transaction import Block, Transaction + from hathor.transaction.storage.simple_memory_storage import SimpleMemoryStorage from hathor.transaction.storage.vertex_storage_protocol import VertexStorageProtocol logger = get_logger() @@ -58,15 +60,33 @@ def __init__(self, *, settings: HathorSettings, test_mode: TestMode = TestMode.D DifficultyAdjustmentAlgorithm.singleton = self @cpu.profiler(key=lambda _, block: 'calculate_block_difficulty!{}'.format(block.hash.hex())) - def calculate_block_difficulty(self, block: 'Block') -> float: - """ Calculate block weight according to the ascendents of `block`, using calculate_next_weight.""" + def calculate_block_difficulty(self, block: 'Block', memory_storage: 'SimpleMemoryStorage') -> float: + """ Calculate block weight according to the ascendants of `block`, using calculate_next_weight.""" if self.TEST_MODE & TestMode.TEST_BLOCK_WEIGHT: return 1.0 if block.is_genesis: return self.MIN_BLOCK_WEIGHT - return self.calculate_next_weight(block.get_block_parent(), block.timestamp, not_none(block.storage)) + parent_block = memory_storage.get_parent_block(block) + + return self.calculate_next_weight(parent_block, block.timestamp, memory_storage) + + def _calculate_N(self, parent_block: 'Block') -> int: + """Calculate the N value for the `calculate_next_weight` algorithm.""" + return min(2 * self._settings.BLOCK_DIFFICULTY_N_BLOCKS, parent_block.get_height() - 1) + + def get_block_dependencies(self, block: 'Block') -> list[VertexId]: + """Return the ids of the required blocks to call `calculate_block_difficulty` for the provided block.""" + parent_block = block.get_block_parent() + N = self._calculate_N(parent_block) + ids: list[VertexId] = [not_none(parent_block.hash)] + + while len(ids) <= N + 1: + parent_block = parent_block.get_block_parent() + ids.append(not_none(parent_block.hash)) + + return ids def calculate_next_weight(self, parent_block: 'Block', timestamp: int, storage: 'VertexStorageProtocol') -> float: """ Calculate the next block weight, aka DAA/difficulty adjustment algorithm. @@ -81,7 +101,7 @@ def calculate_next_weight(self, parent_block: 'Block', timestamp: int, storage: from hathor.transaction import sum_weights root = parent_block - N = min(2 * self._settings.BLOCK_DIFFICULTY_N_BLOCKS, parent_block.get_height() - 1) + N = self._calculate_N(parent_block) K = N // 2 T = self.AVG_TIME_BETWEEN_BLOCKS S = 5 diff --git a/hathor/event/event_manager.py b/hathor/event/event_manager.py index 6306707c6..748abe90a 100644 --- a/hathor/event/event_manager.py +++ b/hathor/event/event_manager.py @@ -22,6 +22,7 @@ from hathor.event.model.node_state import NodeState from hathor.event.storage import EventStorage from hathor.event.websocket import EventWebsocketFactory +from hathor.execution_manager import ExecutionManager from hathor.pubsub import EventArguments, HathorEvents, PubSubManager from hathor.reactor import ReactorProtocol as Reactor from hathor.transaction import BaseTransaction @@ -70,6 +71,7 @@ def __init__( event_storage: EventStorage, pubsub: PubSubManager, reactor: Reactor, + execution_manager: ExecutionManager, event_ws_factory: Optional[EventWebsocketFactory] = None, ) -> None: self.log = logger.new() @@ -78,6 +80,7 @@ def __init__( self._event_storage = event_storage self._event_ws_factory = event_ws_factory self._pubsub = pubsub + self._execution_manager = execution_manager def start(self, peer_id: str) -> None: """Starts the EventManager.""" @@ -85,6 +88,7 @@ def start(self, peer_id: str) -> None: assert self._event_ws_factory is not None, 'Cannot start, EventWebsocketFactory is not set' assert self.get_event_queue_state() is True, 'Cannot start, event queue feature is disabled' + self._execution_manager.register_on_crash_callback(self.on_full_node_crash) self._previous_node_state = self._event_storage.get_node_state() if self._should_reload_events(): @@ -133,7 +137,7 @@ def _subscribe_events(self) -> None: for event in _SUBSCRIBE_EVENTS: self._pubsub.subscribe(event, self._handle_hathor_event) - def load_started(self): + def load_started(self) -> None: if not self._is_running: return @@ -143,7 +147,7 @@ def load_started(self): ) self._event_storage.save_node_state(NodeState.LOAD) - def load_finished(self): + def load_finished(self) -> None: if not self._is_running: return @@ -153,6 +157,15 @@ def load_finished(self): ) self._event_storage.save_node_state(NodeState.SYNC) + def on_full_node_crash(self) -> None: + if not self._is_running: + return + + self._handle_event( + event_type=EventType.FULL_NODE_CRASHED, + event_args=EventArguments(), + ) + def _handle_hathor_event(self, hathor_event: HathorEvents, event_args: EventArguments) -> None: """Handles a PubSub 'HathorEvents' event.""" event_type = EventType.from_hathor_event(hathor_event) diff --git a/hathor/event/model/base_event.py b/hathor/event/model/base_event.py index 8f15fca88..e59db1f7c 100644 --- a/hathor/event/model/base_event.py +++ b/hathor/event/model/base_event.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import Any, Optional from pydantic import NonNegativeInt, validator -from hathor.event.model.event_data import EventData +from hathor.event.model.event_data import BaseEventData, EventData from hathor.event.model.event_type import EventType from hathor.pubsub import EventArguments from hathor.utils.pydantic import BaseModel @@ -58,7 +58,7 @@ def from_event_arguments( ) @validator('data') - def data_type_must_match_event_type(cls, v, values): + def data_type_must_match_event_type(cls, v: BaseEventData, values: dict[str, Any]) -> BaseEventData: event_type = EventType(values['type']) expected_data_type = event_type.data_type() diff --git a/hathor/event/model/event_data.py b/hathor/event/model/event_data.py index f3003d0cd..cf22fa424 100644 --- a/hathor/event/model/event_data.py +++ b/hathor/event/model/event_data.py @@ -101,6 +101,7 @@ class TxData(BaseEventData, extra=Extra.ignore): hash: str nonce: Optional[int] = None timestamp: int + signal_bits: int version: int weight: float inputs: list['TxInput'] diff --git a/hathor/event/model/event_type.py b/hathor/event/model/event_type.py index 7c697fbc8..617ea74d8 100644 --- a/hathor/event/model/event_type.py +++ b/hathor/event/model/event_type.py @@ -25,6 +25,7 @@ class EventType(Enum): REORG_STARTED = 'REORG_STARTED' REORG_FINISHED = 'REORG_FINISHED' VERTEX_METADATA_CHANGED = 'VERTEX_METADATA_CHANGED' + FULL_NODE_CRASHED = 'FULL_NODE_CRASHED' @classmethod def from_hathor_event(cls, hathor_event: HathorEvents) -> 'EventType': @@ -53,4 +54,5 @@ def data_type(self) -> type[BaseEventData]: EventType.REORG_STARTED: ReorgData, EventType.REORG_FINISHED: EmptyData, EventType.VERTEX_METADATA_CHANGED: TxData, + EventType.FULL_NODE_CRASHED: EmptyData, } diff --git a/hathor/event/resources/event.py b/hathor/event/resources/event.py index febc2bb62..87e7ada9b 100644 --- a/hathor/event/resources/event.py +++ b/hathor/event/resources/event.py @@ -16,6 +16,7 @@ from typing import Optional from pydantic import Field, NonNegativeInt +from twisted.web.http import Request from hathor.api_util import Resource, set_cors from hathor.cli.openapi_files.register import register_resource @@ -35,7 +36,7 @@ def __init__(self, event_manager: Optional[EventManager]): super().__init__() self.event_manager = event_manager - def render_GET(self, request): + def render_GET(self, request: Request) -> bytes: request.setHeader(b'content-type', b'application/json; charset=utf-8') set_cors(request, 'GET') diff --git a/hathor/event/websocket/protocol.py b/hathor/event/websocket/protocol.py index 102617546..c8da7e1f6 100644 --- a/hathor/event/websocket/protocol.py +++ b/hathor/event/websocket/protocol.py @@ -12,13 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Callable, Optional +from typing import TYPE_CHECKING, Optional from autobahn.exception import Disconnected from autobahn.twisted.websocket import WebSocketServerProtocol from autobahn.websocket import ConnectionRequest from pydantic import ValidationError from structlog import get_logger +from typing_extensions import assert_never from hathor.event.websocket.request import AckRequest, Request, RequestWrapper, StartStreamRequest, StopStreamRequest from hathor.event.websocket.response import EventResponse, InvalidRequestResponse, InvalidRequestType, Response @@ -50,7 +51,7 @@ class EventWebsocketProtocol(WebSocketServerProtocol): # Whether the stream is enabled or not. _stream_is_active: bool = False - def __init__(self): + def __init__(self) -> None: super().__init__() self.log = logger.new() @@ -102,18 +103,11 @@ def onMessage(self, payload: bytes, isBinary: bool) -> None: def _handle_request(self, request: Request) -> None: """Handles a request message according to its type.""" - # This could be a pattern match in Python 3.10 - request_type = type(request) - handlers: dict[type, Callable] = { - StartStreamRequest: self._handle_start_stream_request, - AckRequest: self._handle_ack_request, - StopStreamRequest: lambda _: self._handle_stop_stream_request() - } - handle_fn = handlers.get(request_type) - - assert handle_fn is not None, f'cannot handle request of unknown type "{request_type}"' - - handle_fn(request) + match request: + case StartStreamRequest(): self._handle_start_stream_request(request) + case AckRequest(): self._handle_ack_request(request) + case StopStreamRequest(): self._handle_stop_stream_request() + case _: assert_never(request) def _handle_start_stream_request(self, request: StartStreamRequest) -> None: """ diff --git a/hathor/event/websocket/request.py b/hathor/event/websocket/request.py index 446c62840..64446887d 100644 --- a/hathor/event/websocket/request.py +++ b/hathor/event/websocket/request.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Annotated, Literal, Optional, Union +from typing import Annotated, Literal, Optional from pydantic import Field, NonNegativeInt @@ -54,7 +54,7 @@ class StopStreamRequest(BaseModel): type: Literal['STOP_STREAM'] -Request = Annotated[Union[StartStreamRequest, AckRequest, StopStreamRequest], Field(discriminator='type')] +Request = Annotated[StartStreamRequest | AckRequest | StopStreamRequest, Field(discriminator='type')] class RequestWrapper(BaseModel): diff --git a/hathor/execution_manager.py b/hathor/execution_manager.py new file mode 100644 index 000000000..8d788e8b0 --- /dev/null +++ b/hathor/execution_manager.py @@ -0,0 +1,65 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from typing import Callable, NoReturn + +from structlog import get_logger + +from hathor.reactor import ReactorProtocol + +logger = get_logger() + + +class ExecutionManager: + """Class to manage actions related to full node execution.""" + __slots__ = ('_log', '_reactor', '_on_crash_callbacks') + + def __init__(self, reactor: ReactorProtocol) -> None: + self._log = logger.new() + self._reactor = reactor + self._on_crash_callbacks: list[tuple[int, Callable[[], None]]] = [] + + def register_on_crash_callback(self, callback: Callable[[], None], *, priority: int = 0) -> None: + """Register a callback to be executed before the full node exits.""" + self._on_crash_callbacks.append((priority, callback)) + + def _run_on_crash_callbacks(self) -> None: + """Run all registered on crash callbacks.""" + callbacks = sorted(self._on_crash_callbacks, reverse=True) + + for _, callback in callbacks: + try: + callback() + except BaseException as e: + self._log.critical(f'Failed execution of on_crash callback "{callback}". Exception: {repr(e)}') + + def crash_and_exit(self, *, reason: str) -> NoReturn: + """ + Calling this function is a very extreme thing to do, so be careful. It should only be called when a + critical, unrecoverable failure happens. It crashes and exits the full node, maybe rendering the database + corrupted, and requiring manual intervention. In other words, a restart with a clean database (from scratch + or a snapshot) may be required. + """ + self._run_on_crash_callbacks() + self._log.critical( + 'Critical failure occurred, causing the full node to halt execution. Manual intervention is required.', + reason=reason, + exc_info=True + ) + # We sequentially call more extreme exit methods, so the full node exits as gracefully as possible, while + # guaranteeing that it will indeed exit. + self._reactor.stop() + self._reactor.crash() + sys.exit(-1) diff --git a/hathor/feature_activation/bit_signaling_service.py b/hathor/feature_activation/bit_signaling_service.py index a8f7f09a4..639eb1a5c 100644 --- a/hathor/feature_activation/bit_signaling_service.py +++ b/hathor/feature_activation/bit_signaling_service.py @@ -19,6 +19,7 @@ from hathor.feature_activation.model.criteria import Criteria from hathor.feature_activation.model.feature_state import FeatureState from hathor.feature_activation.settings import Settings as FeatureSettings +from hathor.feature_activation.storage.feature_activation_storage import FeatureActivationStorage from hathor.transaction import Block from hathor.transaction.storage import TransactionStorage @@ -32,7 +33,8 @@ class BitSignalingService: '_feature_service', '_tx_storage', '_support_features', - '_not_support_features' + '_not_support_features', + '_feature_storage', ) def __init__( @@ -42,7 +44,8 @@ def __init__( feature_service: FeatureService, tx_storage: TransactionStorage, support_features: set[Feature], - not_support_features: set[Feature] + not_support_features: set[Feature], + feature_storage: FeatureActivationStorage | None, ) -> None: self._log = logger.new() self._feature_settings = feature_settings @@ -50,14 +53,19 @@ def __init__( self._tx_storage = tx_storage self._support_features = support_features self._not_support_features = not_support_features + self._feature_storage = feature_storage self._validate_support_intersection() + self._feature_service.bit_signaling_service = self def start(self) -> None: """ Log information related to bit signaling. Must be called after the storage is ready and migrations have been applied. """ + if self._feature_storage: + self._feature_storage.validate_settings() + best_block = self._tx_storage.get_best_block() self._warn_non_signaling_features(best_block) @@ -74,21 +82,66 @@ def generate_signal_bits(self, *, block: Block, log: bool = False) -> int: Returns: a number that represents the signal bits in binary. """ - signaling_features = self._get_signaling_features(block) + feature_signals = self._calculate_feature_signals(block=block, log=log) signal_bits = 0 + for feature, (criteria, enable_bit) in feature_signals.items(): + signal_bits |= int(enable_bit) << criteria.bit + + return signal_bits + + def _calculate_feature_signals(self, *, block: Block, log: bool = False) -> dict[Feature, tuple[Criteria, bool]]: + """ + Calculate the signal value for each signaling feature. + + Args: + block: the block that is used to determine signaling features. + log: whether to log the signal for each feature. + + Returns: a dict with each feature paired with its criteria and its signal value. + """ + signaling_features = self._get_signaling_features(block) + signals: dict[Feature, tuple[Criteria, bool]] = {} + for feature, criteria in signaling_features.items(): default_enable_bit = criteria.signal_support_by_default support = feature in self._support_features not_support = feature in self._not_support_features enable_bit = (default_enable_bit or support) and not not_support + signals[feature] = (criteria, enable_bit) if log: self._log_signal_bits(feature, enable_bit, support, not_support) - signal_bits |= int(enable_bit) << criteria.bit + return signals - return signal_bits + def get_support_features(self) -> list[Feature]: + """Get a list of features with enabled support.""" + best_block = self._tx_storage.get_best_block() + feature_signals = self._calculate_feature_signals(block=best_block) + return [feature for feature, (_, enable_bit) in feature_signals.items() if enable_bit] + + def get_not_support_features(self) -> list[Feature]: + """Get a list of features with disabled support.""" + best_block = self._tx_storage.get_best_block() + feature_signals = self._calculate_feature_signals(block=best_block) + return [feature for feature, (_, enable_bit) in feature_signals.items() if not enable_bit] + + def add_feature_support(self, feature: Feature) -> None: + """Add explicit support for a feature by enabling its signaling bit.""" + self._not_support_features.discard(feature) + self._support_features.add(feature) + + def remove_feature_support(self, feature: Feature) -> None: + """Remove explicit support for a feature by disabling its signaling bit.""" + self._support_features.discard(feature) + self._not_support_features.add(feature) + + def on_must_signal(self, feature: Feature) -> None: + """ + When the MUST_SIGNAL phase is reached, feature support is automatically enabled. + """ + self.add_feature_support(feature) def _log_signal_bits(self, feature: Feature, enable_bit: bool, support: bool, not_support: bool) -> None: """Generate info log for a feature's signal.""" @@ -123,6 +176,11 @@ def _get_signaling_features(self, block: Block) -> dict[Feature, Criteria]: return signaling_features + def get_best_block_signaling_features(self) -> dict[Feature, Criteria]: + """Given the current best block, return all features that are in a signaling state.""" + best_block = self._tx_storage.get_best_block() + return self._get_signaling_features(best_block) + def _validate_support_intersection(self) -> None: """Validate that the provided support and not-support arguments do not conflict.""" if intersection := self._support_features.intersection(self._not_support_features): diff --git a/hathor/feature_activation/feature.py b/hathor/feature_activation/feature.py index 56082def8..05b08226e 100644 --- a/hathor/feature_activation/feature.py +++ b/hathor/feature_activation/feature.py @@ -16,7 +16,7 @@ @unique -class Feature(Enum): +class Feature(str, Enum): """ An enum containing all features that participate in the feature activation process, past or future, activated or not, for all networks. Features should NOT be removed from this enum, to preserve history. Their values diff --git a/hathor/feature_activation/feature_service.py b/hathor/feature_activation/feature_service.py index f02195cec..caadb62fb 100644 --- a/hathor/feature_activation/feature_service.py +++ b/hathor/feature_activation/feature_service.py @@ -13,7 +13,7 @@ # limitations under the License. from dataclasses import dataclass -from typing import TYPE_CHECKING, TypeAlias +from typing import TYPE_CHECKING, Optional, TypeAlias from hathor.feature_activation.feature import Feature from hathor.feature_activation.model.feature_description import FeatureDescription @@ -21,6 +21,7 @@ from hathor.feature_activation.settings import Settings as FeatureSettings if TYPE_CHECKING: + from hathor.feature_activation.bit_signaling_service import BitSignalingService from hathor.transaction import Block from hathor.transaction.storage import TransactionStorage @@ -41,11 +42,12 @@ class BlockIsMissingSignal: class FeatureService: - __slots__ = ('_feature_settings', '_tx_storage') + __slots__ = ('_feature_settings', '_tx_storage', 'bit_signaling_service') def __init__(self, *, feature_settings: FeatureSettings, tx_storage: 'TransactionStorage') -> None: self._feature_settings = feature_settings self._tx_storage = tx_storage + self.bit_signaling_service: Optional['BitSignalingService'] = None def is_feature_active(self, *, block: 'Block', feature: Feature) -> bool: """Returns whether a Feature is active at a certain block.""" @@ -113,6 +115,10 @@ def get_state(self, *, block: 'Block', feature: Feature) -> FeatureState: previous_state=previous_boundary_state ) + if new_state == FeatureState.MUST_SIGNAL: + assert self.bit_signaling_service is not None + self.bit_signaling_service.on_must_signal(feature) + # We cache the just calculated state of the current block _without saving it_, as it may still be unverified, # so we cannot persist its metadata. That's why we cache and save the previous boundary block above. block.set_feature_state(feature=feature, state=new_state) diff --git a/hathor/feature_activation/settings.py b/hathor/feature_activation/settings.py index aa4c119b4..3d36e052b 100644 --- a/hathor/feature_activation/settings.py +++ b/hathor/feature_activation/settings.py @@ -83,7 +83,7 @@ def _validate_conflicting_bits(cls, features: dict[Feature, Criteria]) -> dict[F first, second = overlap raise ValueError( f'At least one pair of Features have the same bit configured for an overlapping interval: ' - f'{first.feature} and {second.feature}' + f'{first.feature.value} and {second.feature.value}' ) return features diff --git a/hathor/feature_activation/storage/__init__.py b/hathor/feature_activation/storage/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/hathor/feature_activation/storage/feature_activation_storage.py b/hathor/feature_activation/storage/feature_activation_storage.py new file mode 100644 index 000000000..101f213dd --- /dev/null +++ b/hathor/feature_activation/storage/feature_activation_storage.py @@ -0,0 +1,101 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from structlog import get_logger + +from hathor.conf.settings import HathorSettings +from hathor.exception import InitializationError +from hathor.feature_activation.feature import Feature +from hathor.feature_activation.model.criteria import Criteria +from hathor.feature_activation.settings import Settings as FeatureActivationSettings +from hathor.storage import RocksDBStorage + +_CF_NAME_META = b'feature-activation-metadata' +_KEY_SETTINGS = b'feature-activation-settings' + +logger = get_logger() + + +class FeatureActivationStorage: + __slots__ = ('_log', '_settings', '_db', '_cf_meta') + + def __init__(self, *, settings: HathorSettings, rocksdb_storage: RocksDBStorage) -> None: + self._log = logger.new() + self._settings = settings + self._db = rocksdb_storage.get_db() + self._cf_meta = rocksdb_storage.get_or_create_column_family(_CF_NAME_META) + + def reset_settings(self) -> None: + """Reset feature settings from the database.""" + self._db.delete((self._cf_meta, _KEY_SETTINGS)) + + def validate_settings(self) -> None: + """Validate new feature settings against the previous configuration from the database.""" + new_settings = self._settings.FEATURE_ACTIVATION + db_settings_bytes: bytes | None = self._db.get((self._cf_meta, _KEY_SETTINGS)) + + if not db_settings_bytes: + self._save_settings(new_settings) + return + + db_settings: FeatureActivationSettings = FeatureActivationSettings.parse_raw(db_settings_bytes) + db_basic_settings = db_settings.copy(deep=True, exclude={'features'}) + new_basic_settings = new_settings.copy(deep=True, exclude={'features'}) + + self._validate_basic_settings(db_basic_settings=db_basic_settings, new_basic_settings=new_basic_settings) + self._validate_features(db_features=db_settings.features, new_features=new_settings.features) + self._save_settings(new_settings) + + def _validate_basic_settings( + self, + *, + db_basic_settings: FeatureActivationSettings, + new_basic_settings: FeatureActivationSettings + ) -> None: + """Validate that the basic feature settings are the same.""" + if new_basic_settings != db_basic_settings: + self._log.error( + 'Feature Activation basic settings are incompatible with previous settings.', + previous_settings=db_basic_settings, new_settings=new_basic_settings + ) + raise InitializationError() + + def _validate_features( + self, + *, + db_features: dict[Feature, Criteria], + new_features: dict[Feature, Criteria] + ) -> None: + """Validate that all previous features exist and are the same.""" + for db_feature, db_criteria in db_features.items(): + new_criteria = new_features.get(db_feature) + + if not new_criteria: + self._log.error( + 'Configuration for existing feature missing in new settings.', + feature=db_feature, previous_features=db_features, new_features=new_features + ) + raise InitializationError() + + if new_criteria != db_criteria: + self._log.error( + 'Criteria for feature is different than previous settings.', + feature=db_feature, previous_criteria=db_criteria, new_criteria=new_criteria + ) + raise InitializationError() + + def _save_settings(self, settings: FeatureActivationSettings) -> None: + """Save feature settings to the database.""" + settings_bytes = settings.json_dumpb() + + self._db.put((self._cf_meta, _KEY_SETTINGS), settings_bytes) diff --git a/hathor/indexes/address_index.py b/hathor/indexes/address_index.py index 9711e985f..2d08e4751 100644 --- a/hathor/indexes/address_index.py +++ b/hathor/indexes/address_index.py @@ -92,8 +92,11 @@ def get_from_address(self, address: str) -> list[bytes]: raise NotImplementedError @abstractmethod - def get_sorted_from_address(self, address: str) -> list[bytes]: + def get_sorted_from_address(self, address: str, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: """ Get a sorted list of transaction hashes of an address + + `tx_start` serves as a pagination marker, indicating the starting position for the iteration. + When tx_start is None, the iteration begins from the initial element. """ raise NotImplementedError diff --git a/hathor/indexes/memory_address_index.py b/hathor/indexes/memory_address_index.py index 25588e594..4360bda21 100644 --- a/hathor/indexes/memory_address_index.py +++ b/hathor/indexes/memory_address_index.py @@ -49,8 +49,8 @@ def add_tx(self, tx: BaseTransaction) -> None: def get_from_address(self, address: str) -> list[bytes]: return list(self._get_from_key(address)) - def get_sorted_from_address(self, address: str) -> list[bytes]: - return list(self._get_sorted_from_key(address)) + def get_sorted_from_address(self, address: str, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: + return self._get_sorted_from_key(address, tx_start) def is_address_empty(self, address: str) -> bool: return self._is_key_empty(address) diff --git a/hathor/indexes/memory_tx_group_index.py b/hathor/indexes/memory_tx_group_index.py index 5b8415905..3d99ecd35 100644 --- a/hathor/indexes/memory_tx_group_index.py +++ b/hathor/indexes/memory_tx_group_index.py @@ -14,7 +14,7 @@ from abc import abstractmethod from collections import defaultdict -from typing import Iterable, Sized, TypeVar +from typing import Iterable, Optional, Sized, TypeVar from structlog import get_logger @@ -31,7 +31,7 @@ class MemoryTxGroupIndex(TxGroupIndex[KT]): """Memory implementation of the TxGroupIndex. This class is abstract and cannot be used directly. """ - index: defaultdict[KT, set[bytes]] + index: defaultdict[KT, set[tuple[int, bytes]]] def __init__(self) -> None: self.force_clear() @@ -40,7 +40,7 @@ def force_clear(self) -> None: self.index = defaultdict(set) def _add_tx(self, key: KT, tx: BaseTransaction) -> None: - self.index[key].add(not_none(tx.hash)) + self.index[key].add((tx.timestamp, not_none(tx.hash))) @abstractmethod def _extract_keys(self, tx: BaseTransaction) -> Iterable[KT]: @@ -57,13 +57,21 @@ def remove_tx(self, tx: BaseTransaction) -> None: assert tx.hash is not None for key in self._extract_keys(tx): - self.index[key].discard(tx.hash) + self.index[key].discard((tx.timestamp, tx.hash)) def _get_from_key(self, key: KT) -> Iterable[bytes]: - yield from self.index[key] - - def _get_sorted_from_key(self, key: KT) -> Iterable[bytes]: - return sorted(self.index[key]) + for _, h in self.index[key]: + yield h + + def _get_sorted_from_key(self, key: KT, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: + sorted_elements = sorted(self.index[key]) + found = False + for _, h in sorted_elements: + if tx_start and h == tx_start.hash: + found = True + + if found or not tx_start: + yield h def _is_key_empty(self, key: KT) -> bool: return not bool(self.index[key]) diff --git a/hathor/indexes/rocksdb_address_index.py b/hathor/indexes/rocksdb_address_index.py index f9f1c0322..cd7f78096 100644 --- a/hathor/indexes/rocksdb_address_index.py +++ b/hathor/indexes/rocksdb_address_index.py @@ -66,8 +66,8 @@ def add_tx(self, tx: BaseTransaction) -> None: def get_from_address(self, address: str) -> list[bytes]: return list(self._get_from_key(address)) - def get_sorted_from_address(self, address: str) -> list[bytes]: - return list(self._get_sorted_from_key(address)) + def get_sorted_from_address(self, address: str, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: + return self._get_sorted_from_key(address, tx_start) def is_address_empty(self, address: str) -> bool: return self._is_key_empty(address) diff --git a/hathor/indexes/rocksdb_tx_group_index.py b/hathor/indexes/rocksdb_tx_group_index.py index bbbe19790..cf47e5dc8 100644 --- a/hathor/indexes/rocksdb_tx_group_index.py +++ b/hathor/indexes/rocksdb_tx_group_index.py @@ -108,9 +108,15 @@ def remove_tx(self, tx: BaseTransaction) -> None: self._db.delete((self._cf, self._to_rocksdb_key(key, tx))) def _get_from_key(self, key: KT) -> Iterable[bytes]: + return self._util_get_from_key(key) + + def _get_sorted_from_key(self, key: KT, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: + return self._util_get_from_key(key, tx_start) + + def _util_get_from_key(self, key: KT, tx: Optional[BaseTransaction] = None) -> Iterable[bytes]: self.log.debug('seek to', key=key) it = self._db.iterkeys(self._cf) - it.seek(self._to_rocksdb_key(key)) + it.seek(self._to_rocksdb_key(key, tx)) for _cf, rocksdb_key in it: key2, _, tx_hash = self._from_rocksdb_key(rocksdb_key) if key2 != key: @@ -119,9 +125,6 @@ def _get_from_key(self, key: KT) -> Iterable[bytes]: yield tx_hash self.log.debug('seek end') - def _get_sorted_from_key(self, key: KT) -> Iterable[bytes]: - return self._get_from_key(key) - def _is_key_empty(self, key: KT) -> bool: self.log.debug('seek to', key=key) it = self._db.iterkeys(self._cf) diff --git a/hathor/indexes/tx_group_index.py b/hathor/indexes/tx_group_index.py index 4041917f5..139245fe9 100644 --- a/hathor/indexes/tx_group_index.py +++ b/hathor/indexes/tx_group_index.py @@ -13,7 +13,7 @@ # limitations under the License. from abc import abstractmethod -from typing import Generic, Iterable, Sized, TypeVar +from typing import Generic, Iterable, Optional, Sized, TypeVar from structlog import get_logger @@ -49,8 +49,12 @@ def _get_from_key(self, key: KT) -> Iterable[bytes]: raise NotImplementedError @abstractmethod - def _get_sorted_from_key(self, key: KT) -> Iterable[bytes]: - """Get all transactions that have a given key, sorted by timestamp.""" + def _get_sorted_from_key(self, key: KT, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: + """Get all transactions that have a given key, sorted by timestamp. + + `tx_start` serves as a pagination marker, indicating the starting position for the iteration. + When tx_start is None, the iteration begins from the initial element. + """ raise NotImplementedError @abstractmethod diff --git a/hathor/manager.py b/hathor/manager.py index 566a7c936..928c383ad 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -41,6 +41,7 @@ RewardLockedError, SpendingVoidedError, ) +from hathor.execution_manager import ExecutionManager from hathor.feature_activation.bit_signaling_service import BitSignalingService from hathor.feature_activation.feature import Feature from hathor.feature_activation.feature_service import FeatureService @@ -56,8 +57,8 @@ from hathor.stratum import StratumFactory from hathor.transaction import BaseTransaction, Block, MergeMinedBlock, Transaction, TxVersion, sum_weights from hathor.transaction.exceptions import TxValidationError -from hathor.transaction.storage import TransactionStorage from hathor.transaction.storage.exceptions import TransactionDoesNotExist +from hathor.transaction.storage.transaction_storage import TransactionStorage from hathor.transaction.storage.tx_allow_scope import TxAllowScope from hathor.types import Address, VertexId from hathor.util import EnvironmentInfo, LogDuration, Random, calculate_min_significant_weight, not_none @@ -88,30 +89,33 @@ class UnhealthinessReason(str, Enum): # This is the interval to be used by the task to check if the node is synced CHECK_SYNC_STATE_INTERVAL = 30 # seconds - def __init__(self, - reactor: Reactor, - *, - settings: HathorSettings, - pubsub: PubSubManager, - consensus_algorithm: ConsensusAlgorithm, - daa: DifficultyAdjustmentAlgorithm, - peer_id: PeerId, - tx_storage: TransactionStorage, - p2p_manager: ConnectionsManager, - event_manager: EventManager, - feature_service: FeatureService, - bit_signaling_service: BitSignalingService, - verification_service: VerificationService, - cpu_mining_service: CpuMiningService, - network: str, - hostname: Optional[str] = None, - wallet: Optional[BaseWallet] = None, - capabilities: Optional[list[str]] = None, - checkpoints: Optional[list[Checkpoint]] = None, - rng: Optional[Random] = None, - environment_info: Optional[EnvironmentInfo] = None, - full_verification: bool = False, - enable_event_queue: bool = False): + def __init__( + self, + reactor: Reactor, + *, + settings: HathorSettings, + pubsub: PubSubManager, + consensus_algorithm: ConsensusAlgorithm, + daa: DifficultyAdjustmentAlgorithm, + peer_id: PeerId, + tx_storage: TransactionStorage, + p2p_manager: ConnectionsManager, + event_manager: EventManager, + feature_service: FeatureService, + bit_signaling_service: BitSignalingService, + verification_service: VerificationService, + cpu_mining_service: CpuMiningService, + network: str, + execution_manager: ExecutionManager, + hostname: Optional[str] = None, + wallet: Optional[BaseWallet] = None, + capabilities: Optional[list[str]] = None, + checkpoints: Optional[list[Checkpoint]] = None, + rng: Optional[Random] = None, + environment_info: Optional[EnvironmentInfo] = None, + full_verification: bool = False, + enable_event_queue: bool = False, + ) -> None: """ :param reactor: Twisted reactor which handles the mainloop and the events. :param peer_id: Id of this node. @@ -129,6 +133,7 @@ def __init__(self, 'Either enable it, or use the reset-event-queue CLI command to remove all event-related data' ) + self._execution_manager = execution_manager self._settings = settings self.daa = daa self._cmd_path: Optional[str] = None @@ -250,6 +255,15 @@ def start(self) -> None: self.is_started = True self.log.info('start manager', network=self.network) + + if self.tx_storage.is_full_node_crashed(): + self.log.error( + 'Error initializing node. The last time you executed your full node it wasn\'t stopped correctly. ' + 'The storage is not reliable anymore and, because of that, you must remove your storage and do a ' + 'full sync (either from scratch or from a snapshot).' + ) + sys.exit(-1) + # If it's a full verification, we save on the storage that we are starting it # this is required because if we stop the initilization in the middle, the metadata # saved on the storage is not reliable anymore, only if we finish it @@ -319,7 +333,7 @@ def start(self) -> None: self.stratum_factory.start() # Start running - self.tx_storage.start_running_manager() + self.tx_storage.start_running_manager(self._execution_manager) def stop(self) -> Deferred: if not self.is_started: @@ -997,13 +1011,7 @@ def on_new_tx(self, tx: BaseTransaction, *, conn: Optional[HathorProtocol] = Non tx.update_initial_metadata(save=False) self.tx_storage.save_transaction(tx) self.tx_storage.add_to_indexes(tx) - try: - self.consensus_algorithm.update(tx) - except HathorError as e: - if not fails_silently: - raise InvalidNewTransaction('consensus update failed') from e - self.log.warn('on_new_tx(): consensus update failed', tx=tx.hash_hex, exc_info=True) - return False + self.consensus_algorithm.update(tx) assert self.verification_service.validate_full( tx, @@ -1167,6 +1175,13 @@ def get_cmd_path(self) -> Optional[str]: """Return the cmd path. If no cmd path is set, returns None.""" return self._cmd_path + def set_hostname_and_reset_connections(self, new_hostname: str) -> None: + """Set the hostname and reset all connections.""" + old_hostname = self.hostname + self.hostname = new_hostname + self.connections.update_hostname_entrypoints(old_hostname=old_hostname, new_hostname=self.hostname) + self.connections.disconnect_all_peers(force=True) + class ParentTxs(NamedTuple): """ Tuple where the `must_include` hash, when present (at most 1), must be included in a pair, and a list of hashes diff --git a/hathor/p2p/manager.py b/hathor/p2p/manager.py index d180af7c8..d7e7f422b 100644 --- a/hathor/p2p/manager.py +++ b/hathor/p2p/manager.py @@ -16,8 +16,9 @@ from structlog import get_logger from twisted.internet import endpoints +from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IProtocolFactory, IStreamClientEndpoint, IStreamServerEndpoint +from twisted.internet.interfaces import IListeningPort, IProtocolFactory, IStreamClientEndpoint from twisted.internet.task import LoopingCall from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.python.failure import Failure @@ -108,8 +109,11 @@ def __init__(self, self.network = network - # List of addresses to listen for new connections (eg: [tcp:8000]) - self.listen_addresses: list[str] = [] + # List of address descriptions to listen for new connections (eg: [tcp:8000]) + self.listen_address_descriptions: list[str] = [] + + # List of actual IP address instances to listen for new connections + self._listen_addresses: list[IPv4Address | IPv6Address] = [] # List of peer discovery methods. self.peer_discoveries: list[PeerDiscovery] = [] @@ -239,9 +243,9 @@ def set_manager(self, manager: 'HathorManager') -> None: self.log.debug('enable sync-v2 indexes') indexes.enable_mempool_index() - def add_listen_address(self, addr: str) -> None: + def add_listen_address_description(self, addr: str) -> None: """Add address to listen for incoming connections.""" - self.listen_addresses.append(addr) + self.listen_address_descriptions.append(addr) def add_peer_discovery(self, peer_discovery: PeerDiscovery) -> None: """Add a peer discovery method.""" @@ -279,7 +283,7 @@ def start(self) -> None: if self._settings.ENABLE_PEER_WHITELIST: self._start_whitelist_reconnect() - for description in self.listen_addresses: + for description in self.listen_address_descriptions: self.listen(description) self.do_discovery() @@ -635,7 +639,7 @@ def connect_to(self, description: str, peer: Optional[PeerId] = None, use_ssl: O peers_count=self._get_peers_count() ) - def listen(self, description: str, use_ssl: Optional[bool] = None) -> IStreamServerEndpoint: + def listen(self, description: str, use_ssl: Optional[bool] = None) -> None: """ Start to listen for new connection according to the description. If `ssl` is True, then the connection will be wraped by a TLS. @@ -661,20 +665,43 @@ def listen(self, description: str, use_ssl: Optional[bool] = None) -> IStreamSer factory = NetfilterFactory(self, factory) - self.log.info('listen on', endpoint=description) - endpoint.listen(factory) + self.log.info('trying to listen on', endpoint=description) + deferred: Deferred[IListeningPort] = endpoint.listen(factory) + deferred.addCallback(self._on_listen_success, description) + + def _on_listen_success(self, listening_port: IListeningPort, description: str) -> None: + """Callback to be called when listening to an endpoint succeeds.""" + self.log.info('success listening on', endpoint=description) + address = listening_port.getHost() + + if not isinstance(address, (IPv4Address, IPv6Address)): + self.log.error(f'unhandled address type for endpoint "{description}": {str(type(address))}') + return + + self._listen_addresses.append(address) - # XXX: endpoint: IStreamServerEndpoint does not intrinsically have a port, but in practice all concrete cases - # that we have will have a _port attribute - port = getattr(endpoint, '_port', None) assert self.manager is not None - if self.manager.hostname and port is not None: - proto, _, _ = description.partition(':') - address = '{}://{}:{}'.format(proto, self.manager.hostname, port) - assert self.manager.my_peer is not None - self.manager.my_peer.entrypoints.append(address) + if self.manager.hostname: + self._add_hostname_entrypoint(self.manager.hostname, address) - return endpoint + def update_hostname_entrypoints(self, *, old_hostname: str | None, new_hostname: str) -> None: + """Add new hostname entrypoints according to the listen addresses, and remove any old entrypoint.""" + assert self.manager is not None + for address in self._listen_addresses: + if old_hostname is not None: + old_address_str = self._get_hostname_address_str(old_hostname, address) + if old_address_str in self.my_peer.entrypoints: + self.my_peer.entrypoints.remove(old_address_str) + + self._add_hostname_entrypoint(new_hostname, address) + + def _add_hostname_entrypoint(self, hostname: str, address: IPv4Address | IPv6Address) -> None: + hostname_address_str = self._get_hostname_address_str(hostname, address) + self.my_peer.entrypoints.append(hostname_address_str) + + @staticmethod + def _get_hostname_address_str(hostname: str, address: IPv4Address | IPv6Address) -> str: + return '{}://{}:{}'.format(address.type, hostname, address.port).lower() def get_connection_to_drop(self, protocol: HathorProtocol) -> HathorProtocol: """ When there are duplicate connections, determine which one should be dropped. @@ -796,3 +823,9 @@ def _sync_rotate_if_needed(self, *, force: bool = False) -> None: for peer_id in info.to_enable: self.connected_peers[peer_id].enable_sync() + + def reload_entrypoints_and_connections(self) -> None: + """Kill all connections and reload entrypoints from the original peer config file.""" + self.log.warn('Killing all connections and resetting entrypoints...') + self.disconnect_all_peers(force=True) + self.my_peer.reload_entrypoints_from_source_file() diff --git a/hathor/p2p/peer_id.py b/hathor/p2p/peer_id.py index 678111f1c..785af6a4f 100644 --- a/hathor/p2p/peer_id.py +++ b/hathor/p2p/peer_id.py @@ -14,6 +14,7 @@ import base64 import hashlib +import json from enum import Enum from math import inf from typing import TYPE_CHECKING, Any, Optional, cast @@ -24,6 +25,7 @@ from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import padding, rsa from OpenSSL.crypto import X509, PKey +from structlog import get_logger from twisted.internet.interfaces import ISSLTransport from twisted.internet.ssl import Certificate, CertificateOptions, TLSVersion, trustRootFromCertificates @@ -35,6 +37,8 @@ if TYPE_CHECKING: from hathor.p2p.protocol import HathorProtocol # noqa: F401 +logger = get_logger() + class InvalidPeerIdException(Exception): pass @@ -64,8 +68,10 @@ class PeerId: retry_attempts: int # how many retries were made last_seen: float # last time this peer was seen flags: set[str] + source_file: str | None def __init__(self, auto_generate_keys: bool = True) -> None: + self._log = logger.new() self._settings = get_global_settings() self.id = None self.private_key = None @@ -159,9 +165,15 @@ def verify_signature(self, signature: bytes, data: bytes) -> bool: else: return True + @classmethod + def create_from_json_path(cls, path: str) -> 'PeerId': + """Create a new PeerId from a JSON file.""" + data = json.load(open(path, 'r')) + return PeerId.create_from_json(data) + @classmethod def create_from_json(cls, data: dict[str, Any]) -> 'PeerId': - """ Create a new PeerId from a JSON. + """ Create a new PeerId from JSON data. It is used both to load a PeerId from disk and to create a PeerId from a peer connection. @@ -408,3 +420,20 @@ def validate_certificate(self, protocol: 'HathorProtocol') -> bool: return False return True + + def reload_entrypoints_from_source_file(self) -> None: + """Update this PeerId's entrypoints from the json file.""" + if not self.source_file: + raise Exception('Trying to reload entrypoints but no peer config file was provided.') + + new_peer_id = PeerId.create_from_json_path(self.source_file) + + if new_peer_id.id != self.id: + self._log.error( + 'Ignoring peer id file update because the peer_id does not match.', + current_peer_id=self.id, + new_peer_id=new_peer_id.id, + ) + return + + self.entrypoints = new_peer_id.entrypoints diff --git a/hathor/p2p/sync_v1/agent.py b/hathor/p2p/sync_v1/agent.py index 110514a83..a1a03a27b 100644 --- a/hathor/p2p/sync_v1/agent.py +++ b/hathor/p2p/sync_v1/agent.py @@ -19,7 +19,7 @@ from weakref import WeakSet from structlog import get_logger -from twisted.internet.defer import Deferred, inlineCallbacks +from twisted.internet.defer import CancelledError, Deferred, inlineCallbacks from twisted.internet.interfaces import IDelayedCall from hathor.conf.get_settings import get_global_settings @@ -685,12 +685,13 @@ def on_tx_success(self, tx: 'BaseTransaction') -> 'BaseTransaction': self.update_received_stats(tx, success) return tx - def on_get_data_failed(self, reason: 'Failure', hash_bytes: bytes) -> None: + def on_get_data_failed(self, failure: 'Failure', hash_bytes: bytes) -> None: """ Method called when get_data deferred fails. We need this errback because otherwise the sync crashes when the deferred is canceled. We should just log a warning because it will continue the sync and will try to get this tx again. """ - self.log.warn('failed to download tx', tx=hash_bytes.hex(), reason=reason) + log_func = self.log.debug if isinstance(failure.value, CancelledError) else self.log.warn + log_func('failed to download tx', tx=hash_bytes.hex(), reason=failure) def is_sync_enabled(self) -> bool: """Return True if sync is enabled for this connection.""" diff --git a/hathor/p2p/sync_v1/downloader.py b/hathor/p2p/sync_v1/downloader.py index 670b1133a..2b3b786bc 100644 --- a/hathor/p2p/sync_v1/downloader.py +++ b/hathor/p2p/sync_v1/downloader.py @@ -19,6 +19,7 @@ from structlog import get_logger from twisted.internet import defer from twisted.internet.defer import Deferred +from twisted.python.failure import Failure from hathor.conf.get_settings import get_global_settings from hathor.transaction.storage.exceptions import TransactionDoesNotExist @@ -238,10 +239,10 @@ def on_deferred_timeout(self, result: Any, timeout: int, *, tx_id: bytes) -> Non """ self.retry(tx_id) - def on_error(self, result: Any) -> None: + def on_error(self, failure: Failure) -> None: """ Errback for downloading deferred. """ - self.log.error('failed to download tx', err=result) + self.log.error('failed to download tx', err=failure, traceback=failure.getTraceback()) def on_new_tx(self, tx: 'BaseTransaction') -> None: """ This is called when a new transaction arrives. diff --git a/hathor/p2p/sync_v2/agent.py b/hathor/p2p/sync_v2/agent.py index 8382cdefc..b2ee4543b 100644 --- a/hathor/p2p/sync_v2/agent.py +++ b/hathor/p2p/sync_v2/agent.py @@ -142,7 +142,8 @@ def __init__(self, protocol: 'HathorProtocol', reactor: Reactor) -> None: # Saves if I am in the middle of a mempool sync # we don't execute any sync while in the middle of it self.mempool_manager = SyncMempoolManager(self) - self._receiving_tips: Optional[list[bytes]] = None + self._receiving_tips: Optional[list[VertexId]] = None + self.max_receiving_tips: int = self._settings.MAX_MEMPOOL_RECEIVING_TIPS # Cache for get_tx calls self._get_tx_cache: OrderedDict[bytes, BaseTransaction] = OrderedDict() @@ -476,7 +477,13 @@ def handle_tips(self, payload: str) -> None: data = json.loads(payload) data = [bytes.fromhex(x) for x in data] # filter-out txs we already have - self._receiving_tips.extend(tx_id for tx_id in data if not self.partial_vertex_exists(tx_id)) + try: + self._receiving_tips.extend(VertexId(tx_id) for tx_id in data if not self.partial_vertex_exists(tx_id)) + except ValueError: + self.protocol.send_error_and_close_connection('Invalid trasaction ID received') + # XXX: it's OK to do this *after* the extend because the payload is limited by the line protocol + if len(self._receiving_tips) > self.max_receiving_tips: + self.protocol.send_error_and_close_connection(f'Too many tips: {len(self._receiving_tips)}') def handle_tips_end(self, _payload: str) -> None: """ Handle a TIPS-END message. diff --git a/hathor/p2p/utils.py b/hathor/p2p/utils.py index 66f1bda37..4e2935a2e 100644 --- a/hathor/p2p/utils.py +++ b/hathor/p2p/utils.py @@ -33,18 +33,18 @@ from hathor.transaction.genesis import get_representation_for_all_genesis -def discover_hostname() -> Optional[str]: - """ Try to discover your hostname. It is a synchonous operation and +def discover_hostname(timeout: float | None = None) -> Optional[str]: + """ Try to discover your hostname. It is a synchronous operation and should not be called from twisted main loop. """ - return discover_ip_ipify() + return discover_ip_ipify(timeout) -def discover_ip_ipify() -> Optional[str]: +def discover_ip_ipify(timeout: float | None = None) -> Optional[str]: """ Try to discover your IP address using ipify's api. - It is a synchonous operation and should not be called from twisted main loop. + It is a synchronous operation and should not be called from twisted main loop. """ - response = requests.get('https://api.ipify.org') + response = requests.get('https://api.ipify.org', timeout=timeout) if response.ok: # It may be either an ipv4 or ipv6 in string format. ip = response.text diff --git a/hathor/profiler/resources/cpu_profiler.py b/hathor/profiler/resources/cpu_profiler.py index 88e87329b..f07b2bd8a 100644 --- a/hathor/profiler/resources/cpu_profiler.py +++ b/hathor/profiler/resources/cpu_profiler.py @@ -102,7 +102,7 @@ def render_OPTIONS(self, request: Request) -> int: CPUProfilerResource.openapi = { - '/profiler': { + '/top': { 'x-visibility': 'private', 'get': { 'operationId': 'cpu-profiler', diff --git a/hathor/simulator/tx_generator.py b/hathor/simulator/tx_generator.py index 8c977c870..ead648da5 100644 --- a/hathor/simulator/tx_generator.py +++ b/hathor/simulator/tx_generator.py @@ -13,22 +13,25 @@ # limitations under the License. from collections import deque -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Callable, TypeAlias from structlog import get_logger from hathor.conf.get_settings import get_global_settings from hathor.simulator.utils import NoCandidatesError, gen_new_double_spending, gen_new_tx +from hathor.transaction import Transaction from hathor.transaction.exceptions import RewardLocked +from hathor.types import VertexId from hathor.util import Random from hathor.wallet.exceptions import InsufficientFunds if TYPE_CHECKING: from hathor.manager import HathorManager - from hathor.transaction import Transaction logger = get_logger() +GenTxFunction: TypeAlias = Callable[['HathorManager', str, int], Transaction] + class RandomTransactionGenerator: """ Generates random transactions without mining. It is supposed to be used @@ -38,7 +41,8 @@ class RandomTransactionGenerator: MAX_LATEST_TRANSACTIONS_LEN = 10 def __init__(self, manager: 'HathorManager', rng: Random, *, - rate: float, hashpower: float, ignore_no_funds: bool = False): + rate: float, hashpower: float, ignore_no_funds: bool = False, + custom_gen_new_tx: GenTxFunction | None = None): """ :param: rate: Number of transactions per second :param: hashpower: Number of hashes per second @@ -58,11 +62,16 @@ def __init__(self, manager: 'HathorManager', rng: Random, *, self.delayedcall = None self.log = logger.new() self.rng = rng + self.gen_new_tx: GenTxFunction + if custom_gen_new_tx is not None: + self.gen_new_tx = custom_gen_new_tx + else: + self.gen_new_tx = gen_new_tx # Most recent transactions generated here. # The lowest index has the most recent transaction. self.transactions_found: int = 0 - self.latest_transactions: deque[Transaction] = deque() + self.latest_transactions: deque[VertexId] = deque() self.double_spending_only = False @@ -115,7 +124,7 @@ def new_tx_step1(self): if not self.double_spending_only: try: - tx = gen_new_tx(self.manager, address, value) + tx = self.gen_new_tx(self.manager, address, value) except (InsufficientFunds, RewardLocked): self.delayedcall = self.clock.callLater(0, self.schedule_next_transaction) return diff --git a/hathor/sysctl/__init__.py b/hathor/sysctl/__init__.py index af9d30e17..a73637650 100644 --- a/hathor/sysctl/__init__.py +++ b/hathor/sysctl/__init__.py @@ -13,6 +13,7 @@ # limitations under the License. from hathor.sysctl.core.manager import HathorManagerSysctl +from hathor.sysctl.feature_activation.manager import FeatureActivationSysctl from hathor.sysctl.p2p.manager import ConnectionsManagerSysctl from hathor.sysctl.sysctl import Sysctl from hathor.sysctl.websocket.manager import WebsocketManagerSysctl @@ -22,4 +23,5 @@ 'ConnectionsManagerSysctl', 'HathorManagerSysctl', 'WebsocketManagerSysctl', + 'FeatureActivationSysctl', ] diff --git a/hathor/sysctl/feature_activation/__init__.py b/hathor/sysctl/feature_activation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/hathor/sysctl/feature_activation/manager.py b/hathor/sysctl/feature_activation/manager.py new file mode 100644 index 000000000..2649d26b8 --- /dev/null +++ b/hathor/sysctl/feature_activation/manager.py @@ -0,0 +1,72 @@ +# Copyright 2024 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from hathor.feature_activation.bit_signaling_service import BitSignalingService +from hathor.feature_activation.feature import Feature +from hathor.sysctl.sysctl import Sysctl + + +class FeatureActivationSysctl(Sysctl): + def __init__(self, bit_signaling_service: BitSignalingService) -> None: + super().__init__() + self._bit_signaling_service = bit_signaling_service + + self.register( + path='supported_features', + getter=self.get_support_features, + setter=None, + ) + self.register( + path='not_supported_features', + getter=self.get_not_support_features, + setter=None, + ) + self.register( + path='signaling_features', + getter=self.get_signaling_features, + setter=None, + ) + self.register( + path='add_support', + getter=None, + setter=self.add_feature_support, + ) + self.register( + path='remove_support', + getter=None, + setter=self.remove_feature_support, + ) + + def get_support_features(self) -> list[str]: + """Get a list of feature names with enabled support.""" + return [feature.value for feature in self._bit_signaling_service.get_support_features()] + + def get_not_support_features(self) -> list[str]: + """Get a list of feature names with disabled support.""" + return [feature.value for feature in self._bit_signaling_service.get_not_support_features()] + + def add_feature_support(self, *features: str) -> None: + """Explicitly add support for a feature by enabling its signaling bit.""" + for feature in features: + self._bit_signaling_service.add_feature_support(Feature[feature]) + + def remove_feature_support(self, *features: str) -> None: + """Explicitly remove support for a feature by disabling its signaling bit.""" + for feature in features: + self._bit_signaling_service.remove_feature_support(Feature[feature]) + + def get_signaling_features(self) -> list[str]: + """Get a list of feature names that are currently in a signaling state.""" + features = self._bit_signaling_service.get_best_block_signaling_features().keys() + return [feature.value for feature in features] diff --git a/hathor/sysctl/p2p/manager.py b/hathor/sysctl/p2p/manager.py index ab6ef5902..e821039bd 100644 --- a/hathor/sysctl/p2p/manager.py +++ b/hathor/sysctl/p2p/manager.py @@ -16,9 +16,12 @@ from hathor.p2p.manager import ConnectionsManager from hathor.p2p.sync_version import SyncVersion +from hathor.p2p.utils import discover_hostname from hathor.sysctl.exception import SysctlException from hathor.sysctl.sysctl import Sysctl, signal_handler_safe +AUTO_HOSTNAME_TIMEOUT_SECONDS: float = 5 + def parse_text(text: str) -> list[str]: """Parse text per line skipping empty lines and comments.""" @@ -103,6 +106,21 @@ def __init__(self, connections: ConnectionsManager) -> None: None, self.set_kill_connection, ) + self.register( + 'hostname', + self.get_hostname, + self.set_hostname, + ) + self.register( + 'refresh_auto_hostname', + None, + self.refresh_auto_hostname, + ) + self.register( + 'reload_entrypoints_and_connections', + None, + self.reload_entrypoints_and_connections, + ) def set_force_sync_rotate(self) -> None: """Force a sync rotate.""" @@ -217,3 +235,32 @@ def set_kill_connection(self, peer_id: str, force: bool = False) -> None: self.log.warn('Killing connection', peer_id=peer_id) raise SysctlException('peer-id is not connected') conn.disconnect(force=force) + + def get_hostname(self) -> str | None: + """Return the configured hostname.""" + assert self.connections.manager is not None + return self.connections.manager.hostname + + def set_hostname(self, hostname: str) -> None: + """Set the hostname and reset all connections.""" + assert self.connections.manager is not None + self.connections.manager.set_hostname_and_reset_connections(hostname) + + def refresh_auto_hostname(self) -> None: + """ + Automatically discover the hostname and set it, if it's found. This operation blocks the event loop. + Then, reset all connections. + """ + assert self.connections.manager is not None + try: + hostname = discover_hostname(timeout=AUTO_HOSTNAME_TIMEOUT_SECONDS) + except Exception as e: + self.log.error(f'Could not refresh hostname. Error: {str(e)}') + return + + if hostname: + self.connections.manager.set_hostname_and_reset_connections(hostname) + + def reload_entrypoints_and_connections(self) -> None: + """Kill all connections and reload entrypoints from the peer config file.""" + self.connections.reload_entrypoints_and_connections() diff --git a/hathor/sysctl/sysctl.py b/hathor/sysctl/sysctl.py index 79bf3c5b0..28339365d 100644 --- a/hathor/sysctl/sysctl.py +++ b/hathor/sysctl/sysctl.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Iterator, NamedTuple, Optional +from typing import Any, Callable, Iterator, NamedTuple, Optional, ParamSpec, TypeVar from pydantic import validate_arguments from structlog import get_logger @@ -21,16 +21,18 @@ Getter = Callable[[], Any] Setter = Callable[..., None] +P = ParamSpec('P') +T = TypeVar('T') logger = get_logger() -def signal_handler_safe(f): +def signal_handler_safe(f: Callable[P, T]) -> Callable[P, T]: """Decorator to mark methods as signal handler safe. It should only be used if that method can be executed during a signal handling. Notice that a signal handling can pause the code execution at any point and the execution will resume after.""" - f._signal_handler_safe = True + f._signal_handler_safe = True # type: ignore[attr-defined] return f diff --git a/hathor/transaction/aux_pow.py b/hathor/transaction/aux_pow.py index c6772ac88..103f6997c 100644 --- a/hathor/transaction/aux_pow.py +++ b/hathor/transaction/aux_pow.py @@ -18,6 +18,8 @@ logger = get_logger() +MAX_MERKLE_PATH_COUNT = 100 + class BitcoinAuxPow(NamedTuple): header_head: bytes # 36 bytes @@ -96,8 +98,11 @@ def from_bytes(cls, b: bytes) -> 'BitcoinAuxPow': coinbase_head = read_bytes(a) coinbase_tail = read_bytes(a) c = read_varint(a) + if c > MAX_MERKLE_PATH_COUNT: + raise ValueError(f'invalid merkle path count: {c} > {MAX_MERKLE_PATH_COUNT}') merkle_path = [] for _ in range(c): + assert len(a) >= 32 merkle_path.append(bytes(a[:32])) del a[:32] header_tail = read_nbytes(a, 12) diff --git a/hathor/transaction/base_transaction.py b/hathor/transaction/base_transaction.py index 958c59c05..79104f3d5 100644 --- a/hathor/transaction/base_transaction.py +++ b/hathor/transaction/base_transaction.py @@ -168,7 +168,7 @@ def __init__(self, self.outputs = outputs or [] self.parents = parents or [] self.storage = storage - self.hash = hash # Stored as bytes. + self._hash: VertexId | None = hash # Stored as bytes. @classproperty def log(cls): @@ -253,7 +253,7 @@ def __eq__(self, other: object) -> bool: """ if not isinstance(other, BaseTransaction): return NotImplemented - if self.hash and other.hash: + if self._hash and other._hash: return self.hash == other.hash return False @@ -265,7 +265,6 @@ def __bytes__(self) -> bytes: return self.get_struct() def __hash__(self) -> int: - assert self.hash is not None return hash(self.hash) @abstractmethod @@ -276,10 +275,19 @@ def calculate_height(self) -> int: def calculate_min_height(self) -> int: raise NotImplementedError + @property + def hash(self) -> VertexId: + assert self._hash is not None, 'Vertex hash must be initialized.' + return self._hash + + @hash.setter + def hash(self, value: VertexId) -> None: + self._hash = value + @property def hash_hex(self) -> str: """Return the current stored hash in hex string format""" - if self.hash is not None: + if self._hash is not None: return self.hash.hex() else: return '' @@ -332,7 +340,7 @@ def is_genesis(self) -> bool: :rtype: bool """ - if self.hash is None: + if self._hash is None: return False from hathor.transaction.genesis import is_genesis return is_genesis(self.hash, settings=self._settings) @@ -451,7 +459,7 @@ def can_validate_full(self) -> bool: """ Check if this transaction is ready to be fully validated, either all deps are full-valid or one is invalid. """ assert self.storage is not None - assert self.hash is not None + assert self._hash is not None if self.is_genesis: return True deps = self.get_all_dependencies() @@ -608,7 +616,6 @@ def get_metadata(self, *, force_reload: bool = False, use_storage: bool = True) else: metadata = getattr(self, '_metadata', None) if not metadata and use_storage and self.storage: - assert self.hash is not None metadata = self.storage.get_metadata(self.hash) self._metadata = metadata if not metadata: @@ -619,7 +626,7 @@ def get_metadata(self, *, force_reload: bool = False, use_storage: bool = True) score = self.weight if self.is_genesis else 0 metadata = TransactionMetadata( - hash=self.hash, + hash=self._hash, accumulated_weight=self.weight, height=height, score=score, @@ -627,7 +634,7 @@ def get_metadata(self, *, force_reload: bool = False, use_storage: bool = True) ) self._metadata = metadata if not metadata.hash: - metadata.hash = self.hash + metadata.hash = self._hash metadata._tx_ref = weakref.ref(self) return metadata @@ -638,7 +645,7 @@ def reset_metadata(self) -> None: from hathor.transaction.transaction_metadata import ValidationState assert self.storage is not None score = self.weight if self.is_genesis else 0 - self._metadata = TransactionMetadata(hash=self.hash, + self._metadata = TransactionMetadata(hash=self._hash, score=score, accumulated_weight=self.weight) if self.is_genesis: @@ -724,7 +731,7 @@ def _update_reward_lock_metadata(self) -> None: def _update_parents_children_metadata(self) -> None: """Update the txs/block parent's children metadata.""" - assert self.hash is not None + assert self._hash is not None assert self.storage is not None for parent in self.get_parents(existing_only=True): @@ -792,7 +799,6 @@ def to_json(self, decode_script: bool = False, include_metadata: bool = False) - return data def to_json_extended(self) -> dict[str, Any]: - assert self.hash is not None assert self.storage is not None def serialize_output(tx: BaseTransaction, tx_out: TxOutput) -> dict[str, Any]: @@ -824,7 +830,6 @@ def serialize_output(tx: BaseTransaction, tx_out: TxOutput) -> dict[str, Any]: tx2 = self.storage.get_transaction(tx_in.tx_id) tx2_out = tx2.outputs[tx_in.index] output = serialize_output(tx2, tx2_out) - assert tx2.hash is not None output['tx_id'] = tx2.hash_hex output['index'] = tx_in.index ret['inputs'].append(output) @@ -837,7 +842,7 @@ def serialize_output(tx: BaseTransaction, tx_out: TxOutput) -> dict[str, Any]: return ret - def clone(self, *, include_metadata: bool = True) -> 'BaseTransaction': + def clone(self, *, include_metadata: bool = True, include_storage: bool = True) -> 'BaseTransaction': """Return exact copy without sharing memory, including metadata if loaded. :return: Transaction or Block copy @@ -846,7 +851,8 @@ def clone(self, *, include_metadata: bool = True) -> 'BaseTransaction': if hasattr(self, '_metadata') and include_metadata: assert self._metadata is not None # FIXME: is this actually true or do we have to check if not None new_tx._metadata = self._metadata.clone() - new_tx.storage = self.storage + if include_storage: + new_tx.storage = self.storage return new_tx @abstractmethod diff --git a/hathor/transaction/storage/simple_memory_storage.py b/hathor/transaction/storage/simple_memory_storage.py new file mode 100644 index 000000000..6e521f052 --- /dev/null +++ b/hathor/transaction/storage/simple_memory_storage.py @@ -0,0 +1,99 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from hathor.transaction import Block, Transaction +from hathor.transaction.base_transaction import BaseTransaction +from hathor.transaction.storage import TransactionStorage +from hathor.transaction.storage.exceptions import TransactionDoesNotExist +from hathor.types import VertexId + + +class SimpleMemoryStorage: + """ + Instances of this class simply facilitate storing some data in memory, specifically for pre-fetched verification + dependencies. + """ + __slots__ = ('_blocks', '_transactions',) + + def __init__(self) -> None: + self._blocks: dict[VertexId, BaseTransaction] = {} + self._transactions: dict[VertexId, BaseTransaction] = {} + + @property + def _vertices(self) -> dict[VertexId, BaseTransaction]: + """Blocks and Transactions together.""" + return {**self._blocks, **self._transactions} + + def get_block(self, block_id: VertexId) -> Block: + """Return a block from the storage, throw if it's not found.""" + block = self._get_vertex(self._blocks, block_id) + assert isinstance(block, Block) + return block + + def get_transaction(self, tx_id: VertexId) -> Transaction: + """Return a transaction from the storage, throw if it's not found.""" + tx = self._get_vertex(self._transactions, tx_id) + assert isinstance(tx, Transaction) + return tx + + @staticmethod + def _get_vertex(storage: dict[VertexId, BaseTransaction], vertex_id: VertexId) -> BaseTransaction: + """Return a vertex from a storage, throw if it's not found.""" + if vertex := storage.get(vertex_id): + return vertex + + raise TransactionDoesNotExist(f'Vertex "{vertex_id.hex()}" does not exist in this SimpleMemoryStorage.') + + def get_parent_block(self, block: Block) -> Block: + """Get the parent block of a block.""" + parent_hash = block.get_block_parent_hash() + + return self.get_block(parent_hash) + + def add_vertices_from_storage(self, storage: TransactionStorage, ids: list[VertexId]) -> None: + """ + Add multiple vertices to this storage. It automatically fetches data from the provided TransactionStorage + and a list of ids. + """ + for vertex_id in ids: + self.add_vertex_from_storage(storage, vertex_id) + + def add_vertex_from_storage(self, storage: TransactionStorage, vertex_id: VertexId) -> None: + """ + Add a vertex to this storage. It automatically fetches data from the provided TransactionStorage and a list + of ids. + """ + if vertex_id in self._vertices: + return + + vertex = storage.get_transaction(vertex_id) + clone = vertex.clone(include_metadata=True, include_storage=False) + + if isinstance(vertex, Block): + self._blocks[vertex_id] = clone + return + + if isinstance(vertex, Transaction): + self._transactions[vertex_id] = clone + return + + raise NotImplementedError + + def get_vertex(self, vertex_id: VertexId) -> BaseTransaction: + # TODO: Currently unused, will be implemented in a next PR. + raise NotImplementedError + + def get_best_block_tips(self) -> list[VertexId]: + # TODO: Currently unused, will be implemented in a next PR. + raise NotImplementedError diff --git a/hathor/transaction/storage/transaction_storage.py b/hathor/transaction/storage/transaction_storage.py index 5b56431cb..fd57323a9 100644 --- a/hathor/transaction/storage/transaction_storage.py +++ b/hathor/transaction/storage/transaction_storage.py @@ -24,6 +24,7 @@ from structlog import get_logger from hathor.conf.get_settings import get_global_settings +from hathor.execution_manager import ExecutionManager from hathor.indexes import IndexesManager from hathor.indexes.height_index import HeightInfo from hathor.profiler import get_cpu_profiler @@ -72,6 +73,7 @@ class TransactionStorage(ABC): pubsub: Optional[PubSubManager] indexes: Optional[IndexesManager] + _latest_n_height_tips: list[HeightInfo] log = get_logger() @@ -84,6 +86,9 @@ class TransactionStorage(ABC): # Key storage attribute to save if the manager is running _manager_running_attribute: str = 'manager_running' + # Key storage attribute to save if the full node crashed + _full_node_crashed_attribute: str = 'full_node_crashed' + # Ket storage attribute to save the last time the node started _last_start_attribute: str = 'last_start' @@ -968,9 +973,10 @@ def is_running_full_verification(self) -> bool: """ return self.get_value(self._running_full_verification_attribute) == '1' - def start_running_manager(self) -> None: + def start_running_manager(self, execution_manager: ExecutionManager) -> None: """ Save on storage that manager is running """ + execution_manager.register_on_crash_callback(self.on_full_node_crash) self.add_value(self._manager_running_attribute, '1') def stop_running_manager(self) -> None: @@ -983,6 +989,14 @@ def is_running_manager(self) -> bool: """ return self.get_value(self._manager_running_attribute) == '1' + def on_full_node_crash(self) -> None: + """Save on storage that the full node crashed and cannot be recovered.""" + self.add_value(self._full_node_crashed_attribute, '1') + + def is_full_node_crashed(self) -> bool: + """Return whether the full node was crashed.""" + return self.get_value(self._full_node_crashed_attribute) == '1' + def get_last_started_at(self) -> int: """ Return the timestamp when the database was last started. """ diff --git a/hathor/transaction/token_creation_tx.py b/hathor/transaction/token_creation_tx.py index 08156ce90..61a676b2a 100644 --- a/hathor/transaction/token_creation_tx.py +++ b/hathor/transaction/token_creation_tx.py @@ -65,7 +65,6 @@ def update_hash(self) -> None: """ When we update the hash, we also have to update the tokens uid list """ super().update_hash() - assert self.hash is not None self.tokens = [self.hash] def get_funds_fields_from_struct(self, buf: bytes, *, verbose: VerboseCallback = None) -> bytes: @@ -221,7 +220,6 @@ def _get_token_info_from_inputs(self) -> dict[TokenUid, TokenInfo]: token_dict = super()._get_token_info_from_inputs() # we add the created token's info to token_dict, as the creation tx allows for mint/melt - assert self.hash is not None token_dict[self.hash] = TokenInfo(0, True, True) return token_dict diff --git a/hathor/transaction/transaction.py b/hathor/transaction/transaction.py index 54189693d..a9d9fec5a 100644 --- a/hathor/transaction/transaction.py +++ b/hathor/transaction/transaction.py @@ -360,7 +360,7 @@ def is_double_spending(self) -> bool: tx = self.storage.get_transaction(tx_in.tx_id) meta = tx.get_metadata() spent_by = meta.get_output_spent_by(tx_in.index) - if spent_by and spent_by != self.hash: + if spent_by and spent_by != self._hash: return True return False diff --git a/hathor/types.py b/hathor/types.py index f035a8c80..40ad6dead 100644 --- a/hathor/types.py +++ b/hathor/types.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import TypeAlias + # XXX There is a lot of refactor to be done before we can use `NewType`. # So, let's skip using NewType until everything is refactored. -VertexId = bytes # NewType('TxId', bytes) -Address = bytes # NewType('Address', bytes) -AddressB58 = str -TxOutputScript = bytes # NewType('TxOutputScript', bytes) -Timestamp = int # NewType('Timestamp', int) -TokenUid = VertexId # NewType('TokenUid', VertexId) -Amount = int # NewType('Amount', int) +VertexId: TypeAlias = bytes # NewType('TxId', bytes) +Address: TypeAlias = bytes # NewType('Address', bytes) +AddressB58: TypeAlias = str +TxOutputScript: TypeAlias = bytes # NewType('TxOutputScript', bytes) +Timestamp: TypeAlias = int # NewType('Timestamp', int) +TokenUid: TypeAlias = VertexId # NewType('TokenUid', VertexId) +Amount: TypeAlias = int # NewType('Amount', int) diff --git a/hathor/util.py b/hathor/util.py index 1f409d0f1..b66acee2a 100644 --- a/hathor/util.py +++ b/hathor/util.py @@ -30,7 +30,7 @@ import hathor from hathor.conf.get_settings import get_global_settings -from hathor.types import TokenUid +from hathor.types import TokenUid, VertexId if TYPE_CHECKING: import structlog @@ -495,7 +495,11 @@ def _tx_progress(iter_tx: Iterator['BaseTransaction'], *, log: 'structlog.stdlib if total: progress_ = count / total elapsed_time = t_log - t_start - remaining_time = LogDuration(elapsed_time / progress_ - elapsed_time) + remaining_time: str | LogDuration + if progress_ == 0: + remaining_time = '?' + else: + remaining_time = LogDuration(elapsed_time / progress_ - elapsed_time) log.info( f'loading... {math.floor(progress_ * 100):2.0f}%', progress=progress_, @@ -806,3 +810,23 @@ def calculate_min_significant_weight(score: float, tol: float) -> float: """ This function will return the min significant weight to increase score by tol. """ return score + math.log2(2 ** tol - 1) + + +def bytes_to_vertexid(data: bytes) -> VertexId: + # XXX: using raw string for the docstring so we can more easily write byte literals + r""" Function to validate bytes and return a VertexId, raises ValueError if not valid. + + >>> bytes_to_vertexid(b'\0' * 32) + b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + >>> bytes_to_vertexid(b'\0' * 31) + Traceback (most recent call last): + ... + ValueError: length must be exactly 32 bytes + >>> bytes_to_vertexid(b'\0' * 33) + Traceback (most recent call last): + ... + ValueError: length must be exactly 32 bytes + """ + if len(data) != 32: + raise ValueError('length must be exactly 32 bytes') + return VertexId(data) diff --git a/hathor/verification/block_verifier.py b/hathor/verification/block_verifier.py index b1184aea5..ff0c74a86 100644 --- a/hathor/verification/block_verifier.py +++ b/hathor/verification/block_verifier.py @@ -25,6 +25,8 @@ TransactionDataError, WeightError, ) +from hathor.transaction.storage.simple_memory_storage import SimpleMemoryStorage +from hathor.util import not_none class BlockVerifier: @@ -51,7 +53,11 @@ def verify_height(self, block: Block) -> None: def verify_weight(self, block: Block) -> None: """Validate minimum block difficulty.""" - min_block_weight = self._daa.calculate_block_difficulty(block) + memory_storage = SimpleMemoryStorage() + dependencies = self._daa.get_block_dependencies(block) + memory_storage.add_vertices_from_storage(not_none(block.storage), dependencies) + + min_block_weight = self._daa.calculate_block_difficulty(block, memory_storage) if block.weight < min_block_weight - self._settings.WEIGHT_TOL: raise WeightError(f'Invalid new block {block.hash_hex}: weight ({block.weight}) is ' f'smaller than the minimum weight ({min_block_weight})') diff --git a/hathor/version.py b/hathor/version.py index 895f8bdd7..b1afb04ca 100644 --- a/hathor/version.py +++ b/hathor/version.py @@ -19,7 +19,7 @@ from structlog import get_logger -BASE_VERSION = '0.59.0' +BASE_VERSION = '0.60.0' DEFAULT_VERSION_SUFFIX = "local" BUILD_VERSION_FILE_PATH = "./BUILD_VERSION" diff --git a/hathor/wallet/resources/thin_wallet/address_history.py b/hathor/wallet/resources/thin_wallet/address_history.py index 8e0ffc0ca..044494bb0 100644 --- a/hathor/wallet/resources/thin_wallet/address_history.py +++ b/hathor/wallet/resources/thin_wallet/address_history.py @@ -21,6 +21,7 @@ from hathor.cli.openapi_files.register import register_resource from hathor.conf.get_settings import get_global_settings from hathor.crypto.util import decode_address +from hathor.transaction.storage.exceptions import TransactionDoesNotExist from hathor.util import json_dumpb, json_loadb from hathor.wallet.exceptions import InvalidAddress @@ -69,10 +70,6 @@ def render_POST(self, request: Request) -> bytes: def render_GET(self, request: Request) -> bytes: """ GET request for /thin_wallet/address_history/ - If 'paginate' parameter exists, it calls the new resource method - otherwise, it will call the old and deprecated one because it's - a request from a wallet still in an older version - Expects 'addresses[]' as request args, and 'hash' as optional args to be used in pagination @@ -124,24 +121,18 @@ def render_GET(self, request: Request) -> bytes: return json_dumpb({'success': False}) raw_args = get_args(request) - paginate = b'paginate' in raw_args and raw_args[b'paginate'][0].decode('utf-8') == 'true' - if paginate: - # New resource - if b'addresses[]' not in raw_args: - return get_missing_params_msg('addresses[]') + if b'addresses[]' not in raw_args: + return get_missing_params_msg('addresses[]') - addresses = raw_args[b'addresses[]'] + addresses = raw_args[b'addresses[]'] - ref_hash = None - if b'hash' in raw_args: - # If hash parameter is in the request, it must be a valid hex - ref_hash = raw_args[b'hash'][0].decode('utf-8') + ref_hash = None + if b'hash' in raw_args: + # If hash parameter is in the request, it must be a valid hex + ref_hash = raw_args[b'hash'][0].decode('utf-8') - return self.get_address_history([address.decode('utf-8') for address in addresses], ref_hash) - else: - # Old and deprecated resource - return self.deprecated_resource(request) + return self.get_address_history([address.decode('utf-8') for address in addresses], ref_hash) def get_address_history(self, addresses: list[str], ref_hash: Optional[str]) -> bytes: ref_hash_bytes = None @@ -166,12 +157,6 @@ def get_address_history(self, addresses: list[str], ref_hash: Optional[str]) -> history = [] seen: set[bytes] = set() - # XXX In this algorithm we need to sort all transactions of an address - # and find one specific (in case of a pagination request) - # so if this address has many txs, this could become slow - # I've done some tests with 10k txs in one address and the request - # returned in less than 50ms, so we will move forward with it for now - # but this could be improved in the future for idx, address in enumerate(addresses): try: decode_address(address) @@ -181,31 +166,28 @@ def get_address_history(self, addresses: list[str], ref_hash: Optional[str]) -> 'message': 'The address {} is invalid'.format(address) }) - hashes = addresses_index.get_sorted_from_address(address) - start_index = 0 - if ref_hash_bytes and idx == 0: - # It's not the first request, so we must continue from the hash - # but we do it only for the first address + tx = None + if ref_hash_bytes: try: - # Find index where the hash is - start_index = hashes.index(ref_hash_bytes) - except ValueError: - # ref_hash is not in the list + tx = self.manager.tx_storage.get_transaction(ref_hash_bytes) + except TransactionDoesNotExist: return json_dumpb({ 'success': False, - 'message': 'Hash {} is not a transaction from the address {}.'.format(ref_hash, address) + 'message': 'Hash {} is not a transaction hash.'.format(ref_hash) }) - # Slice the hashes array from the start_index - to_iterate = hashes[start_index:] + # The address index returns an iterable that starts at `tx`. + hashes = addresses_index.get_sorted_from_address(address, tx) did_break = False - for index, tx_hash in enumerate(to_iterate): + for tx_hash in hashes: if total_added == self._settings.MAX_TX_ADDRESSES_HISTORY: # If already added the max number of elements possible, then break # I need to add this if at the beginning of the loop to handle the case # when the first tx of the address exceeds the limit, so we must return # that the next request should start in the first tx of this address did_break = True + # Saving the first tx hash for the next request + first_hash = tx_hash.hex() break if tx_hash not in seen: @@ -216,6 +198,8 @@ def get_address_history(self, addresses: list[str], ref_hash: Optional[str]) -> # It's important to validate also the maximum number of inputs and outputs because some txs # are really big and the response payload becomes too big did_break = True + # Saving the first tx hash for the next request + first_hash = tx_hash.hex() break seen.add(tx_hash) @@ -226,10 +210,8 @@ def get_address_history(self, addresses: list[str], ref_hash: Optional[str]) -> if did_break: # We stopped in the middle of the txs of this address # So we return that we still have more data to send - break_index = start_index + index has_more = True # The hash to start the search and which address this hash belongs - first_hash = hashes[break_index].hex() first_address = address break @@ -242,38 +224,6 @@ def get_address_history(self, addresses: list[str], ref_hash: Optional[str]) -> } return json_dumpb(data) - def deprecated_resource(self, request: Request) -> bytes: - """ This resource is deprecated. It's here only to keep - compatibility with old wallet versions - """ - raw_args = get_args(request) - if b'addresses[]' not in raw_args: - return get_missing_params_msg('addresses[]') - - addresses_index = self.manager.tx_storage.indexes.addresses - - addresses = raw_args[b'addresses[]'] - history = [] - seen: set[bytes] = set() - for address_to_decode in addresses: - address = address_to_decode.decode('utf-8') - try: - decode_address(address) - except InvalidAddress: - return json_dumpb({ - 'success': False, - 'message': 'The address {} is invalid'.format(address) - }) - - for tx_hash in addresses_index.get_from_address(address): - tx = self.manager.tx_storage.get_transaction(tx_hash) - if tx_hash not in seen: - seen.add(tx_hash) - history.append(tx.to_json_extended()) - - data = {'history': history} - return json_dumpb(data) - AddressHistoryResource.openapi = { '/thin_wallet/address_history': { @@ -376,60 +326,6 @@ def deprecated_resource(self, request: Request) -> bytes: ] } }, - 'deprecated_success': { - 'summary': 'Deprecated success', - 'value': { - 'history': [ - { - "hash": "00000299670db5814f69cede8b347f83" - "0f73985eaa4cd1ce87c9a7c793771336", - "timestamp": 1552422415, - "is_voided": False, - 'parents': [ - '00000b8792cb13e8adb51cc7d866541fc29b532e8dec95ae4661cf3da4d42cb5', - '00001417652b9d7bd53eb14267834eab08f27e5cbfaca45a24370e79e0348bb1' - ], - "inputs": [ - { - "value": 42500000044, - "script": "dqkURJPA8tDMJHU8tqv3SiO18ZCLEPaIrA==", - "decoded": { - "type": "P2PKH", - "address": "17Fbx9ouRUD1sd32bp4ptGkmgNzg7p2Krj", - "timelock": None - }, - "token": "00", - "tx": "000002d28696f94f89d639022ae81a1d" - "870d55d189c27b7161d9cb214ad1c90c", - "index": 0 - } - ], - "outputs": [ - { - "value": 42499999255, - "script": "dqkU/B6Jbf5OnslsQrvHXQ4WKDTSEGKIrA==", - "decoded": { - "type": "P2PKH", - "address": "1Pz5s5WVL52MK4EwBy9XVQUzWjF2LWWKiS", - "timelock": None - }, - "token": "00" - }, - { - "value": 789, - "script": "dqkUrWoWhiP+qPeI/qwfwb5fgnmtd4CIrA==", - "decoded": { - "type": "P2PKH", - "address": "1GovzJvbzLw6x4H2a1hHb529cpEWzh3YRd", - "timelock": None - }, - "token": "00" - } - ] - } - ] - } - }, 'error': { 'summary': 'Invalid address', 'value': { diff --git a/poetry.lock b/poetry.lock index 475743517..163ae443e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiohttp" @@ -1061,38 +1061,38 @@ files = [ [[package]] name = "mypy" -version = "1.8.0" +version = "1.9.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3"}, - {file = "mypy-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4"}, - {file = "mypy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d"}, - {file = "mypy-1.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9"}, - {file = "mypy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410"}, - {file = "mypy-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae"}, - {file = "mypy-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3"}, - {file = "mypy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817"}, - {file = "mypy-1.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d"}, - {file = "mypy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835"}, - {file = "mypy-1.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd"}, - {file = "mypy-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55"}, - {file = "mypy-1.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218"}, - {file = "mypy-1.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3"}, - {file = "mypy-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e"}, - {file = "mypy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6"}, - {file = "mypy-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66"}, - {file = "mypy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6"}, - {file = "mypy-1.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d"}, - {file = "mypy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02"}, - {file = "mypy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8"}, - {file = "mypy-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259"}, - {file = "mypy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b"}, - {file = "mypy-1.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592"}, - {file = "mypy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a"}, - {file = "mypy-1.8.0-py3-none-any.whl", hash = "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d"}, - {file = "mypy-1.8.0.tar.gz", hash = "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, + {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, + {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, + {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, + {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, + {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, + {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, + {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, + {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, + {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, + {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, + {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, + {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, + {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, + {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, + {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, + {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, + {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, ] [package.dependencies] @@ -1119,17 +1119,17 @@ files = [ [[package]] name = "mypy-zope" -version = "1.0.3" +version = "1.0.4" description = "Plugin for mypy to support zope interfaces" optional = false python-versions = "*" files = [ - {file = "mypy-zope-1.0.3.tar.gz", hash = "sha256:149081bd2754d947747baefac569bb1c2bc127b4a2cc1fa505492336946bb3b4"}, - {file = "mypy_zope-1.0.3-py3-none-any.whl", hash = "sha256:7a30ce1a2589173f0be66662c9a9179f75737afc40e4104df4c76fb5a8421c14"}, + {file = "mypy-zope-1.0.4.tar.gz", hash = "sha256:a9569e73ae85a65247787d98590fa6d4290e76f26aabe035d1c3e94a0b9ab6ee"}, + {file = "mypy_zope-1.0.4-py3-none-any.whl", hash = "sha256:c7298f93963a84f2b145c2b5cc98709fc2a5be4adf54bfe23fa7fdd8fd19c975"}, ] [package.dependencies] -mypy = ">=1.0.0,<1.9.0" +mypy = ">=1.0.0,<1.10.0" "zope.interface" = "*" "zope.schema" = "*" @@ -2490,4 +2490,4 @@ sentry = ["sentry-sdk", "structlog-sentry"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<4" -content-hash = "cce7b9832ae2d13cc56fb572af82face7a824307ddd6953387737a27d6e7088a" +content-hash = "1eed0fc6c02c4ddb7b4a6634d6c5ba4873ce5a82c6b3d4197ca88b4644474c53" diff --git a/pyproject.toml b/pyproject.toml index e27cfb609..f5ac12f1c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ [tool.poetry] name = "hathor" -version = "0.59.0" +version = "0.60.0" description = "Hathor Network full-node" authors = ["Hathor Team "] license = "Apache-2.0" @@ -38,8 +38,8 @@ hathor-cli = 'hathor.cli.main:main' [tool.poetry.dev-dependencies] flake8 = "~6.1.0" isort = {version = "~5.12.0", extras = ["colors"]} -mypy = {version = "^1.5.1", markers = "implementation_name == 'cpython'"} -mypy-zope = {version = "^1.0.1", markers = "implementation_name == 'cpython'"} +mypy = {version = "^1.9.0", markers = "implementation_name == 'cpython'"} +mypy-zope = {version = "^1.0.4", markers = "implementation_name == 'cpython'"} pytest = "~7.4.3" pytest-cov = "~4.1.0" flaky = "~3.7.0" @@ -97,6 +97,8 @@ multi_line_output = 3 pretty = true disallow_incomplete_defs = true no_implicit_optional = true +extra_checks = true +disallow_untyped_decorators = true warn_redundant_casts = true warn_unused_configs = true warn_unused_ignores = true @@ -131,6 +133,37 @@ module = [ ] ignore_missing_imports = true +# This override enables stricter rules for some specific modules. +# Currently, we have only two options from strict-mode that are disabled, but we have to opt-in instead of opt-out +# because setting strict=true doesn't work for module-level settings. +# Reference: https://mypy.readthedocs.io/en/stable/existing_code.html#introduce-stricter-options +[[tool.mypy.overrides]] +module = [ + "hathor.consensus.*", + "hathor.feature_activation.*", + "hathor.event.*", + "hathor.verification.*", + "tests.consensus.*", + "tests.crypto.*", + "tests.event.*", + "tests.execution_manager.*", + "tests.feature_activation.*", + "tests.p2p.*", + "tests.pubsub.*", + "tests.simulation.*", + "tests.unittest", + "tests.utils", +] +strict_equality = true +strict_concatenate = true +check_untyped_defs = true +disallow_any_generics = true +disallow_untyped_defs = true +no_implicit_reexport = true +warn_return_any = true +# disallow_subclassing_any = true +# disallow_untyped_calls = true + [tool.pydantic-mypy] init_typed = true init_forbid_extra = true diff --git a/tests/consensus/test_consensus.py b/tests/consensus/test_consensus.py index caa455a54..5d9a9db31 100644 --- a/tests/consensus/test_consensus.py +++ b/tests/consensus/test_consensus.py @@ -1,7 +1,9 @@ -from unittest.mock import MagicMock +from unittest.mock import MagicMock, Mock +from hathor.execution_manager import ExecutionManager from hathor.simulator.utils import add_new_block, add_new_blocks, gen_new_tx from hathor.transaction.storage import TransactionMemoryStorage +from hathor.util import not_none from tests import unittest from tests.utils import add_blocks_unlock_reward, add_new_double_spending, add_new_transactions @@ -9,14 +11,14 @@ class BaseConsensusTestCase(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() self.tx_storage = TransactionMemoryStorage() self.genesis = self.tx_storage.get_all_genesis() self.genesis_blocks = [tx for tx in self.genesis if tx.is_block] self.genesis_txs = [tx for tx in self.genesis if not tx.is_block] - def test_unhandled_exception(self): + def test_unhandled_exception(self) -> None: manager = self.create_peer('testnet', tx_storage=self.tx_storage) # Mine a few blocks in a row with no transaction but the genesis @@ -30,16 +32,21 @@ def test_unhandled_exception(self): class MyError(Exception): pass + execution_manager_mock = Mock(spec_set=ExecutionManager) + manager.consensus_algorithm._execution_manager = execution_manager_mock manager.consensus_algorithm._unsafe_update = MagicMock(side_effect=MyError) - with self.assertRaises(MyError): - manager.propagate_tx(tx, fails_silently=False) + manager.propagate_tx(tx, fails_silently=False) + + execution_manager_mock.crash_and_exit.assert_called_once_with( + reason=f"Consensus update failed for tx {tx.hash_hex}" + ) tx2 = manager.tx_storage.get_transaction(tx.hash) meta2 = tx2.get_metadata() self.assertEqual({self._settings.CONSENSUS_FAIL_ID}, meta2.voided_by) - def test_revert_block_high_weight(self): + def test_revert_block_high_weight(self) -> None: """ A conflict transaction will be propagated. At first, it will be voided. But, a new block with high weight will verify it, which will flip it to executed. """ @@ -102,7 +109,7 @@ def test_revert_block_high_weight(self): self.assertConsensusValid(manager) - def test_dont_revert_block_low_weight(self): + def test_dont_revert_block_low_weight(self) -> None: """ A conflict transaction will be propagated and voided. A new block with low weight will verify it, which won't be enough to flip to executed. So, it will remain voided. @@ -156,7 +163,7 @@ def test_dont_revert_block_low_weight(self): self.assertConsensusValid(manager) - def test_dont_revert_block_high_weight_transaction_verify_other(self): + def test_dont_revert_block_high_weight_transaction_verify_other(self) -> None: """ A conflict transaction will be propagated and voided. But this transaction verifies its conflicting transaction. So, its accumulated weight will always be smaller than the others and it will never be executed. @@ -174,8 +181,8 @@ def test_dont_revert_block_high_weight_transaction_verify_other(self): # Create a double spending transaction. conflicting_tx = add_new_double_spending(manager, tx=txs[-1]) meta = conflicting_tx.get_metadata() - self.assertEqual(len(meta.conflict_with), 1) - self.assertIn(list(meta.conflict_with)[0], conflicting_tx.parents) + self.assertEqual(len(not_none(meta.conflict_with)), 1) + self.assertIn(not_none(meta.conflict_with)[0], conflicting_tx.parents) # Add a few transactions. add_new_transactions(manager, 10, advance_clock=15) @@ -213,7 +220,7 @@ def test_dont_revert_block_high_weight_transaction_verify_other(self): self.assertConsensusValid(manager) - def test_dont_revert_block_high_weight_verify_both(self): + def test_dont_revert_block_high_weight_verify_both(self) -> None: """ A conflicting transaction will be propagated and voided. But the block with high weight verifies both the conflicting transactions, so this block will always be voided. """ diff --git a/tests/consensus/test_consensus2.py b/tests/consensus/test_consensus2.py index d8993c69e..da40a5703 100644 --- a/tests/consensus/test_consensus2.py +++ b/tests/consensus/test_consensus2.py @@ -1,12 +1,15 @@ from hathor.graphviz import GraphvizVisualizer +from hathor.manager import HathorManager from hathor.simulator.utils import gen_new_tx +from hathor.transaction import Transaction +from hathor.util import not_none from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import add_custom_tx class BaseConsensusSimulatorTestCase(SimulatorTestCase): - def checkConflict(self, tx1, tx2): + def checkConflict(self, tx1: Transaction, tx2: Transaction) -> None: meta1 = tx1.get_metadata() meta2 = tx2.get_metadata() self.assertIn(tx1.hash, meta2.conflict_with) @@ -19,7 +22,7 @@ def checkConflict(self, tx1, tx2): cnt += 1 self.assertLessEqual(cnt, 1) - def do_step(self, i, manager1, tx_base): + def do_step(self, i: int, manager1: HathorManager, tx_base: Transaction) -> Transaction: txA = add_custom_tx(manager1, [(tx_base, 0)], n_outputs=2) self.graphviz.labels[txA.hash] = f'txA-{i}' @@ -52,7 +55,7 @@ def do_step(self, i, manager1, tx_base): return txH - def test_two_conflicts_intertwined_once(self): + def test_two_conflicts_intertwined_once(self) -> None: manager1 = self.create_peer() manager1.allow_mining_without_peers() @@ -72,6 +75,7 @@ def test_two_conflicts_intertwined_once(self): self.graphviz = GraphvizVisualizer(manager1.tx_storage, include_verifications=True, include_funds=True) + assert manager1.wallet is not None address = manager1.wallet.get_unused_address(mark_as_used=False) value = 10 initial = gen_new_tx(manager1, address, value) @@ -87,7 +91,7 @@ def test_two_conflicts_intertwined_once(self): # dot = self.graphviz.dot() # dot.render('dot0') - def test_two_conflicts_intertwined_multiple_times(self): + def test_two_conflicts_intertwined_multiple_times(self) -> None: manager1 = self.create_peer() manager1.allow_mining_without_peers() @@ -107,13 +111,14 @@ def test_two_conflicts_intertwined_multiple_times(self): self.graphviz = GraphvizVisualizer(manager1.tx_storage, include_verifications=True, include_funds=True) + assert manager1.wallet is not None address = manager1.wallet.get_unused_address(mark_as_used=False) value = 10 initial = gen_new_tx(manager1, address, value) initial.weight = 25 initial.update_hash() manager1.propagate_tx(initial, fails_silently=False) - self.graphviz.labels[initial.hash] = 'initial' + self.graphviz.labels[not_none(initial.hash)] = 'initial' x = initial x = self.do_step(0, manager1, x) diff --git a/tests/consensus/test_consensus3.py b/tests/consensus/test_consensus3.py index 70099975c..dad7ca70b 100644 --- a/tests/consensus/test_consensus3.py +++ b/tests/consensus/test_consensus3.py @@ -6,14 +6,14 @@ class DoubleSpendingTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: super().setUp() self.network = 'testnet' self.manager1 = self.create_peer(self.network, unlock_wallet=True, enable_sync_v1=True, enable_sync_v2=False) @pytest.mark.xfail(strict=True) - def test_double_spending_attempt_1(self): + def test_double_spending_attempt_1(self) -> None: manager = self.manager1 add_new_blocks(manager, 5, advance_clock=15) @@ -38,7 +38,7 @@ def test_double_spending_attempt_1(self): manager.cpu_mining_service.resolve(tx_fund0) self.assertTrue(manager.propagate_tx(tx_fund0)) - def do_step(tx_fund): + def do_step(tx_fund: Transaction) -> Transaction: inputs = [WalletInputInfo(tx_fund.hash, 0, manager.wallet.get_private_key(addr))] outputs = [WalletOutputInfo(decode_address(addr), 1, None)] tx1 = manager.wallet.prepare_transaction(Transaction, inputs, outputs, tx_fund.timestamp+1) @@ -79,7 +79,7 @@ def do_step(tx_fund): outputs = [] outputs.append(WalletOutputInfo(decode_address(addr), 1, None)) outputs.append(WalletOutputInfo(decode_address(addr), 2*tx_fund.outputs[1].value, None)) - tx5 = manager.wallet.prepare_transaction(Transaction, inputs, outputs, tx2.timestamp+1) + tx5: Transaction = manager.wallet.prepare_transaction(Transaction, inputs, outputs, tx2.timestamp+1) tx5.weight = tx3.weight - tx1.weight + 0.1 tx5.parents = [tx2.hash, tx4.hash] manager.cpu_mining_service.resolve(tx5) @@ -105,7 +105,7 @@ def do_step(tx_fund): self.assertConsensusValid(manager) @pytest.mark.xfail(strict=True) - def test_double_spending_attempt_2(self): + def test_double_spending_attempt_2(self) -> None: manager = self.manager1 add_new_blocks(manager, 5, advance_clock=15) @@ -128,7 +128,7 @@ def test_double_spending_attempt_2(self): manager.cpu_mining_service.resolve(tx_fund0) self.assertTrue(manager.propagate_tx(tx_fund0)) - def do_step(tx_fund): + def do_step(tx_fund: Transaction) -> Transaction: inputs = [WalletInputInfo(tx_fund.hash, 0, manager.wallet.get_private_key(addr))] outputs = [WalletOutputInfo(decode_address(addr), 1, None)] tx1 = manager.wallet.prepare_transaction(Transaction, inputs, outputs, tx_fund.timestamp+1) @@ -174,7 +174,7 @@ def do_step(tx_fund): outputs.append(WalletOutputInfo(decode_address(addr), 1, None)) outputs.append(WalletOutputInfo(decode_address(addr), 1, None)) outputs.append(WalletOutputInfo(decode_address(addr), 2*tx_fund.outputs[2].value, None)) - tx5 = manager.wallet.prepare_transaction(Transaction, inputs, outputs, tx4.timestamp+1) + tx5: Transaction = manager.wallet.prepare_transaction(Transaction, inputs, outputs, tx4.timestamp+1) tx5.weight = 1 tx5.parents = manager.get_new_tx_parents(tx5.timestamp) manager.cpu_mining_service.resolve(tx5) diff --git a/tests/consensus/test_consensus4.py b/tests/consensus/test_consensus4.py index bdc3f2047..906a82b10 100644 --- a/tests/consensus/test_consensus4.py +++ b/tests/consensus/test_consensus4.py @@ -1,4 +1,7 @@ from hathor.graphviz import GraphvizVisualizer +from hathor.manager import HathorManager +from hathor.transaction import Block +from hathor.types import VertexId from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import gen_custom_tx @@ -6,7 +9,14 @@ class BaseConsensusSimulatorTestCase(SimulatorTestCase): - def create_chain(self, manager, first_parent_block_hash, length, prefix, tx_parents=None): + def create_chain( + self, + manager: HathorManager, + first_parent_block_hash: VertexId, + length: int, + prefix: str, + tx_parents: list[VertexId] | None = None + ) -> list[Block]: current = first_parent_block_hash v = [] for i in range(length): @@ -23,7 +33,7 @@ def create_chain(self, manager, first_parent_block_hash, length, prefix, tx_pare current = blk.hash return v - def test_conflict_with_parent_tx(self): + def test_conflict_with_parent_tx(self) -> None: manager1 = self.create_peer() manager1.allow_mining_without_peers() diff --git a/tests/consensus/test_consensus5.py b/tests/consensus/test_consensus5.py index c6e4bebc7..19feaded3 100644 --- a/tests/consensus/test_consensus5.py +++ b/tests/consensus/test_consensus5.py @@ -1,4 +1,7 @@ from hathor.graphviz import GraphvizVisualizer +from hathor.manager import HathorManager +from hathor.transaction import Block +from hathor.types import VertexId from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import gen_custom_tx @@ -6,7 +9,14 @@ class BaseConsensusSimulatorTestCase(SimulatorTestCase): - def create_chain(self, manager, first_parent_block_hash, length, prefix, tx_parents=None): + def create_chain( + self, + manager: HathorManager, + first_parent_block_hash: VertexId, + length: int, + prefix: str, + tx_parents: list[VertexId] | None = None + ) -> list[Block]: current = first_parent_block_hash v = [] for i in range(length): @@ -23,7 +33,7 @@ def create_chain(self, manager, first_parent_block_hash, length, prefix, tx_pare current = blk.hash return v - def test_conflict_with_parent_tx(self): + def test_conflict_with_parent_tx(self) -> None: manager1 = self.create_peer() manager1.allow_mining_without_peers() diff --git a/tests/consensus/test_soft_voided.py b/tests/consensus/test_soft_voided.py index 97bb0d562..fd43b0f7e 100644 --- a/tests/consensus/test_soft_voided.py +++ b/tests/consensus/test_soft_voided.py @@ -1,7 +1,11 @@ +from typing import Iterator + from hathor.graphviz import GraphvizVisualizer -from hathor.simulator import FakeConnection, Simulator +from hathor.simulator import FakeConnection, RandomTransactionGenerator, Simulator from hathor.simulator.trigger import StopAfterNTransactions from hathor.simulator.utils import gen_new_tx +from hathor.transaction import Block +from hathor.types import VertexId from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import add_custom_tx @@ -10,14 +14,19 @@ class BaseSoftVoidedTestCase(SimulatorTestCase): seed_config = 5988775361793628170 - def assertNoParentsAreSoftVoided(self, tx): + def assertNoParentsAreSoftVoided(self, tx: Block) -> None: + assert tx.storage is not None for h in tx.parents: tx2 = tx.storage.get_transaction(h) tx2_meta = tx2.get_metadata() tx2_voided_by = tx2_meta.voided_by or set() self.assertNotIn(self._settings.SOFT_VOIDED_ID, tx2_voided_by) - def _run_test(self, simulator, soft_voided_tx_ids): + def _run_test( + self, + simulator: Simulator, + soft_voided_tx_ids: set[VertexId] + ) -> Iterator[RandomTransactionGenerator]: manager1 = self.create_peer(soft_voided_tx_ids=soft_voided_tx_ids, simulator=simulator) manager1.allow_mining_without_peers() @@ -30,7 +39,6 @@ def _run_test(self, simulator, soft_voided_tx_ids): simulator.run(300) manager2 = self.create_peer(soft_voided_tx_ids=soft_voided_tx_ids, simulator=simulator) - manager2.soft_voided_tx_ids = soft_voided_tx_ids graphviz = GraphvizVisualizer(manager2.tx_storage, include_verifications=True, include_funds=True) @@ -74,6 +82,7 @@ def _run_test(self, simulator, soft_voided_tx_ids): metaD1 = txD1.get_metadata() self.assertEqual({txA.hash, txD1.hash}, metaD1.voided_by) + assert manager2.wallet is not None address = manager2.wallet.get_unused_address(mark_as_used=False) value = 1 txC = gen_new_tx(manager2, address, value) @@ -127,7 +136,7 @@ def _run_test(self, simulator, soft_voided_tx_ids): # dot = graphviz.dot() # dot.render('dot0') - def _get_txA_hash(self): + def _get_txA_hash(self) -> VertexId: simulator = Simulator(seed=self.simulator.seed) simulator.start() @@ -140,7 +149,7 @@ def _get_txA_hash(self): return txA_hash - def test_soft_voided(self): + def test_soft_voided(self) -> None: txA_hash = self._get_txA_hash() soft_voided_tx_ids = set([ txA_hash, diff --git a/tests/consensus/test_soft_voided2.py b/tests/consensus/test_soft_voided2.py index 3e03de57a..116761235 100644 --- a/tests/consensus/test_soft_voided2.py +++ b/tests/consensus/test_soft_voided2.py @@ -1,6 +1,12 @@ +from typing import Iterator + from hathor.graphviz import GraphvizVisualizer +from hathor.manager import HathorManager from hathor.simulator import Simulator from hathor.simulator.utils import gen_new_tx +from hathor.transaction import Block, Transaction +from hathor.types import VertexId +from hathor.wallet import HDWallet from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import BURN_ADDRESS, add_custom_tx @@ -9,7 +15,7 @@ class BaseConsensusSimulatorTestCase(SimulatorTestCase): seed_config = 5988775361793628169 - def assertValidConflictResolution(self, tx1, tx2): + def assertValidConflictResolution(self, tx1: Transaction, tx2: Transaction) -> None: meta1 = tx1.get_metadata() meta2 = tx2.get_metadata() @@ -28,8 +34,9 @@ def assertValidConflictResolution(self, tx1, tx2): cnt += 1 self.assertLessEqual(cnt, 1) - def do_step(self, i, manager1, tx_base): + def do_step(self, i: int, manager1: HathorManager, tx_base: Transaction) -> Transaction: wallet = manager1.wallet + assert isinstance(wallet, HDWallet) address = wallet.get_address(wallet.get_key_at_index(0)) txA = add_custom_tx(manager1, [(tx_base, 0)], n_outputs=2, address=address) @@ -86,7 +93,7 @@ def do_step(self, i, manager1, tx_base): return txH - def gen_block(self, manager1, tx, parent_block=None): + def gen_block(self, manager1: HathorManager, tx: Transaction, parent_block: Block | None = None) -> Block: parent_block_hash = parent_block.hash if parent_block else None block = manager1.generate_mining_block(parent_block_hash=parent_block_hash, address=BURN_ADDRESS) block.parents[1] = tx.hash @@ -96,8 +103,8 @@ def gen_block(self, manager1, tx, parent_block=None): self.assertTrue(manager1.propagate_tx(block, fails_silently=False)) return block - def _run_test(self, simulator, soft_voided_tx_ids): - self.txF_hashes = [] + def _run_test(self, simulator: Simulator, soft_voided_tx_ids: set[VertexId]) -> Iterator[None]: + self.txF_hashes: list[VertexId] = [] manager1 = self.create_peer(soft_voided_tx_ids=soft_voided_tx_ids, simulator=simulator) manager1.allow_mining_without_peers() @@ -112,6 +119,7 @@ def _run_test(self, simulator, soft_voided_tx_ids): self.graphviz = GraphvizVisualizer(manager1.tx_storage, include_verifications=True, include_funds=True) + assert manager1.wallet is not None address = manager1.wallet.get_unused_address(mark_as_used=False) value = 10 initial = gen_new_tx(manager1, address, value) @@ -180,7 +188,7 @@ def _run_test(self, simulator, soft_voided_tx_ids): # dot = self.graphviz.dot() # dot.render('dot0') - def _get_txF_hashes(self): + def _get_txF_hashes(self) -> list[VertexId]: self.skip_asserts = True simulator = Simulator(seed=self.simulator.seed) simulator.start() @@ -194,7 +202,7 @@ def _get_txF_hashes(self): return list(self.txF_hashes) - def test_soft_voided(self): + def test_soft_voided(self) -> None: txF_hashes = self._get_txF_hashes() self.assertEqual(10, len(txF_hashes)) soft_voided_tx_ids = set(txF_hashes) diff --git a/tests/consensus/test_soft_voided3.py b/tests/consensus/test_soft_voided3.py index 92af7c201..807d6fe8c 100644 --- a/tests/consensus/test_soft_voided3.py +++ b/tests/consensus/test_soft_voided3.py @@ -1,7 +1,11 @@ +from typing import Iterator + from hathor.graphviz import GraphvizVisualizer -from hathor.simulator import FakeConnection, Simulator +from hathor.simulator import FakeConnection, RandomTransactionGenerator, Simulator from hathor.simulator.trigger import StopAfterNTransactions from hathor.simulator.utils import gen_new_tx +from hathor.transaction import BaseTransaction +from hathor.types import VertexId from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import add_custom_tx, gen_custom_tx @@ -10,14 +14,19 @@ class BaseSoftVoidedTestCase(SimulatorTestCase): seed_config = 5988775361793628169 - def assertNoParentsAreSoftVoided(self, tx): + def assertNoParentsAreSoftVoided(self, tx: BaseTransaction) -> None: + assert tx.storage is not None for h in tx.parents: tx2 = tx.storage.get_transaction(h) tx2_meta = tx2.get_metadata() tx2_voided_by = tx2_meta.voided_by or set() self.assertNotIn(self._settings.SOFT_VOIDED_ID, tx2_voided_by) - def _run_test(self, simulator, soft_voided_tx_ids): + def _run_test( + self, + simulator: Simulator, + soft_voided_tx_ids: set[VertexId] + ) -> Iterator[RandomTransactionGenerator]: manager1 = self.create_peer(soft_voided_tx_ids=soft_voided_tx_ids, simulator=simulator) manager1.allow_mining_without_peers() @@ -30,7 +39,6 @@ def _run_test(self, simulator, soft_voided_tx_ids): simulator.run(300) manager2 = self.create_peer(soft_voided_tx_ids=soft_voided_tx_ids, simulator=simulator) - manager2.soft_voided_tx_ids = soft_voided_tx_ids graphviz = GraphvizVisualizer(manager2.tx_storage, include_verifications=True, include_funds=True) @@ -83,6 +91,7 @@ def _run_test(self, simulator, soft_voided_tx_ids): graphviz.labels[blk1.hash] = 'blk1' simulator.run(10) + assert manager2.wallet is not None address = manager2.wallet.get_unused_address(mark_as_used=True) txC = gen_new_tx(manager2, address, 6400) if txD1.hash not in txC.parents: @@ -109,7 +118,7 @@ def _run_test(self, simulator, soft_voided_tx_ids): # dot = graphviz.dot() # dot.render('test_soft_voided3') - def _get_txA_hash(self): + def _get_txA_hash(self) -> VertexId: simulator = Simulator(seed=self.simulator.seed) simulator.start() @@ -122,7 +131,7 @@ def _get_txA_hash(self): return txA_hash - def test_soft_voided(self): + def test_soft_voided(self) -> None: txA_hash = self._get_txA_hash() soft_voided_tx_ids = set([ txA_hash, diff --git a/tests/consensus/test_soft_voided4.py b/tests/consensus/test_soft_voided4.py index bd914b341..cb9e1db7d 100644 --- a/tests/consensus/test_soft_voided4.py +++ b/tests/consensus/test_soft_voided4.py @@ -1,7 +1,11 @@ +from typing import Iterator + from hathor.graphviz import GraphvizVisualizer -from hathor.simulator import FakeConnection, Simulator +from hathor.simulator import FakeConnection, RandomTransactionGenerator, Simulator from hathor.simulator.trigger import StopAfterNTransactions from hathor.simulator.utils import gen_new_double_spending +from hathor.transaction import Transaction +from hathor.types import VertexId from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import add_custom_tx @@ -10,7 +14,11 @@ class BaseSoftVoidedTestCase(SimulatorTestCase): seed_config = 5988775361793628169 - def _run_test(self, simulator, soft_voided_tx_ids): + def _run_test( + self, + simulator: Simulator, + soft_voided_tx_ids: list[VertexId] + ) -> Iterator[RandomTransactionGenerator]: manager1 = self.create_peer(soft_voided_tx_ids=set(soft_voided_tx_ids), simulator=simulator) manager1.allow_mining_without_peers() @@ -24,7 +32,6 @@ def _run_test(self, simulator, soft_voided_tx_ids): gen_tx1.stop() manager2 = self.create_peer(soft_voided_tx_ids=set(soft_voided_tx_ids), simulator=simulator) - manager2.soft_voided_tx_ids = set(soft_voided_tx_ids) self.graphviz = GraphvizVisualizer(manager2.tx_storage, include_verifications=True, include_funds=True) @@ -54,6 +61,7 @@ def _run_test(self, simulator, soft_voided_tx_ids): gen_tx2.stop() + assert isinstance(soft_voided_tx_ids, list) self.assertEqual(2, len(soft_voided_tx_ids)) txA_hash = soft_voided_tx_ids[0] txB_hash = soft_voided_tx_ids[1] @@ -61,9 +69,11 @@ def _run_test(self, simulator, soft_voided_tx_ids): self.graphviz.labels[txB_hash] = 'txB' txB = manager2.tx_storage.get_transaction(txB_hash) + assert isinstance(txB, Transaction) # Get the tx confirmed by the soft voided that will be voided tx_base = manager2.tx_storage.get_transaction(txB.parents[0]) + assert isinstance(tx_base, Transaction) txC = gen_new_double_spending(manager2, use_same_parents=False, tx=tx_base) self.graphviz.labels[tx_base.hash] = 'tx_base' txC.weight = 30 @@ -125,12 +135,12 @@ def _run_test(self, simulator, soft_voided_tx_ids): metaD = txD.get_metadata() self.assertEqual(metaD.voided_by, {tx_base.hash}) - def _get_txA_hash(self): + def _get_txA_hash(self) -> VertexId: simulator = Simulator(seed=self.simulator.seed) simulator.start() try: - it = self._run_test(simulator, set()) + it = self._run_test(simulator, []) gen_tx = next(it) txA_hash = gen_tx.latest_transactions[0] finally: @@ -138,12 +148,12 @@ def _get_txA_hash(self): return txA_hash - def _get_txB_hash(self, txA_hash): + def _get_txB_hash(self, txA_hash: VertexId) -> VertexId: simulator = Simulator(seed=self.simulator.seed) simulator.start() try: - it = self._run_test(simulator, set([txA_hash])) + it = self._run_test(simulator, [txA_hash]) _ = next(it) _ = next(it) gen_tx = next(it) @@ -153,7 +163,7 @@ def _get_txB_hash(self, txA_hash): return txB_hash - def test_soft_voided(self): + def test_soft_voided(self) -> None: txA_hash = self._get_txA_hash() txB_hash = self._get_txB_hash(txA_hash) self.assertNotEqual(txA_hash, txB_hash) diff --git a/tests/crypto/test_util.py b/tests/crypto/test_util.py index e8ba0353b..17e611037 100644 --- a/tests/crypto/test_util.py +++ b/tests/crypto/test_util.py @@ -13,22 +13,24 @@ class CryptoUtilTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: super().setUp() - self.private_key = ec.generate_private_key(ec.SECP256K1(), default_backend()) + key = ec.generate_private_key(ec.SECP256K1(), default_backend()) + assert isinstance(key, ec.EllipticCurvePrivateKeyWithSerialization) + self.private_key = key self.public_key = self.private_key.public_key() - def test_privkey_serialization(self): + def test_privkey_serialization(self) -> None: private_key_bytes = get_private_key_bytes(self.private_key) self.assertEqual(self.private_key.private_numbers(), get_private_key_from_bytes(private_key_bytes).private_numbers()) - def test_address(self): + def test_address(self) -> None: address = get_address_from_public_key(self.public_key) address_b58 = get_address_b58_from_public_key(self.public_key) self.assertEqual(address, decode_address(address_b58)) - def test_invalid_address(self): + def test_invalid_address(self) -> None: from hathor.wallet.exceptions import InvalidAddress address_b58 = get_address_b58_from_public_key(self.public_key) address_b58 += '0' # 0 is invalid in base58 diff --git a/tests/event/event_simulation_tester.py b/tests/event/event_simulation_tester.py index 338a90241..3e2bc4659 100644 --- a/tests/event/event_simulation_tester.py +++ b/tests/event/event_simulation_tester.py @@ -47,6 +47,7 @@ def _create_artifacts(self) -> None: self.settings = artifacts.settings event_ws_factory = self.manager._event_manager._event_ws_factory + assert event_ws_factory is not None event_ws_factory.openHandshakeTimeout = 0 self.protocol = event_ws_factory.buildProtocol(addr=Mock()) diff --git a/tests/event/test_base_event.py b/tests/event/test_base_event.py index 5751ae988..300157944 100644 --- a/tests/event/test_base_event.py +++ b/tests/event/test_base_event.py @@ -23,7 +23,7 @@ @pytest.mark.parametrize('event_id', [0, 1, 1000]) @pytest.mark.parametrize('group_id', [None, 0, 1, 1000]) -def test_create_base_event(event_id, group_id): +def test_create_base_event(event_id: int, group_id: int | None) -> None: event = BaseEvent( id=event_id, timestamp=123.3, @@ -40,6 +40,7 @@ def test_create_base_event(event_id, group_id): hash='abc', nonce=123, timestamp=456, + signal_bits=0, version=1, weight=10.0, inputs=[], @@ -70,8 +71,8 @@ def test_create_base_event(event_id, group_id): assert event.dict() == expected -@pytest.mark.parametrize('event_id', [None, -1, -1000]) -def test_create_base_event_fail_id(event_id): +@pytest.mark.parametrize('event_id', [-1, -1000]) +def test_create_base_event_fail_id(event_id: int) -> None: with pytest.raises(ValidationError): BaseEvent( id=event_id, @@ -82,7 +83,7 @@ def test_create_base_event_fail_id(event_id): @pytest.mark.parametrize('group_id', [-1, -1000]) -def test_create_base_event_fail_group_id(group_id): +def test_create_base_event_fail_group_id(group_id: int) -> None: with pytest.raises(ValidationError): BaseEvent( id=0, @@ -93,7 +94,7 @@ def test_create_base_event_fail_group_id(group_id): ) -def test_create_base_event_fail_data_type(): +def test_create_base_event_fail_data_type() -> None: with pytest.raises(ValidationError): BaseEvent( id=0, diff --git a/tests/event/test_event_manager.py b/tests/event/test_event_manager.py index e8a09c83b..289d5b0f5 100644 --- a/tests/event/test_event_manager.py +++ b/tests/event/test_event_manager.py @@ -1,13 +1,14 @@ from hathor.event.model.event_type import EventType from hathor.event.storage.memory_storage import EventMemoryStorage from hathor.pubsub import HathorEvents +from hathor.util import not_none from tests import unittest class BaseEventManagerTest(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() self.network = 'testnet' self.event_storage = EventMemoryStorage() @@ -18,37 +19,37 @@ def setUp(self): event_storage=self.event_storage ) - def test_if_event_is_persisted(self): + def test_if_event_is_persisted(self) -> None: block = self.manager.tx_storage.get_best_block() self.manager.pubsub.publish(HathorEvents.NETWORK_NEW_TX_ACCEPTED, tx=block) self.run_to_completion() self.assertIsNotNone(self.event_storage.get_event(0)) - def _fake_reorg_started(self): + def _fake_reorg_started(self) -> None: block = self.manager.tx_storage.get_best_block() # XXX: since we're faking these events, they don't neet to be consistent self.manager.pubsub.publish(HathorEvents.REORG_STARTED, old_best_height=1, old_best_block=block, new_best_height=1, new_best_block=block, reorg_size=1, common_block=block) - def _fake_reorg_finished(self): + def _fake_reorg_finished(self) -> None: self.manager.pubsub.publish(HathorEvents.REORG_FINISHED) - def test_event_group(self): + def test_event_group(self) -> None: self._fake_reorg_started() self._fake_reorg_finished() self._fake_reorg_started() self._fake_reorg_finished() self.run_to_completion() - event0 = self.event_storage.get_event(0) - event1 = self.event_storage.get_event(1) - event2 = self.event_storage.get_event(2) - event3 = self.event_storage.get_event(3) - event4 = self.event_storage.get_event(4) - event5 = self.event_storage.get_event(5) - event6 = self.event_storage.get_event(6) - event7 = self.event_storage.get_event(7) - event8 = self.event_storage.get_event(8) + event0 = not_none(self.event_storage.get_event(0)) + event1 = not_none(self.event_storage.get_event(1)) + event2 = not_none(self.event_storage.get_event(2)) + event3 = not_none(self.event_storage.get_event(3)) + event4 = not_none(self.event_storage.get_event(4)) + event5 = not_none(self.event_storage.get_event(5)) + event6 = not_none(self.event_storage.get_event(6)) + event7 = not_none(self.event_storage.get_event(7)) + event8 = not_none(self.event_storage.get_event(8)) self.assertEqual(EventType(event0.type), EventType.LOAD_STARTED) self.assertEqual(EventType(event1.type), EventType.NEW_VERTEX_ACCEPTED) @@ -66,19 +67,19 @@ def test_event_group(self): self.assertIsNotNone(event7.group_id) self.assertEqual(event7.group_id, event8.group_id) - def test_cannot_start_group_twice(self): + def test_cannot_start_group_twice(self) -> None: self._fake_reorg_started() self.run_to_completion() with self.assertRaises(AssertionError): self._fake_reorg_started() self.run_to_completion() - def test_cannot_finish_group_that_was_not_started(self): + def test_cannot_finish_group_that_was_not_started(self) -> None: with self.assertRaises(AssertionError): self._fake_reorg_finished() self.run_to_completion() - def test_cannot_finish_group_twice(self): + def test_cannot_finish_group_twice(self) -> None: self._fake_reorg_started() self._fake_reorg_finished() self.run_to_completion() diff --git a/tests/event/test_event_reorg.py b/tests/event/test_event_reorg.py index 81648f456..97606e8e9 100644 --- a/tests/event/test_event_reorg.py +++ b/tests/event/test_event_reorg.py @@ -8,7 +8,7 @@ class BaseEventReorgTest(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() self.network = 'testnet' self.event_storage = EventMemoryStorage() @@ -23,7 +23,7 @@ def setUp(self): self.genesis_private_key = get_genesis_key() self.genesis_public_key = self.genesis_private_key.public_key() - def test_reorg_events(self): + def test_reorg_events(self) -> None: assert self._settings.REWARD_SPEND_MIN_BLOCKS == 10, 'this test was made with this hardcoded value in mind' # add some blocks @@ -44,7 +44,7 @@ def test_reorg_events(self): # check events actual_events = list(self.event_storage.iter_from_event(0)) - expected_events = [ + expected_events: list[tuple[EventType, dict[str, str | int]]] = [ (EventType.LOAD_STARTED, {}), (EventType.NEW_VERTEX_ACCEPTED, {'hash': self._settings.GENESIS_BLOCK_HASH.hex()}), (EventType.NEW_VERTEX_ACCEPTED, {'hash': self._settings.GENESIS_TX1_HASH.hex()}), diff --git a/tests/event/test_event_simulation_responses.py b/tests/event/test_event_simulation_responses.py index 2bc628088..50557296c 100644 --- a/tests/event/test_event_simulation_responses.py +++ b/tests/event/test_event_simulation_responses.py @@ -282,7 +282,7 @@ def test_restart_with_ack_too_small(self) -> None: # get response response = self._get_error_response() - assert response.type == InvalidRequestType.ACK_TOO_SMALL.value + assert str(response.type) == InvalidRequestType.ACK_TOO_SMALL.value def test_multiple_interactions(self) -> None: miner = self.simulator.create_miner(self.manager, hashpower=1e6) @@ -333,7 +333,8 @@ def test_multiple_interactions(self) -> None: # get response response = self._get_error_response() - assert response.type == InvalidRequestType.ACK_TOO_SMALL.value # ACK too small because we've already sent it + # ACK too small because we've already sent it + assert str(response.type) == InvalidRequestType.ACK_TOO_SMALL.value # new ack ack = AckRequest(type='ACK', window_size=4, ack_event_id=5) diff --git a/tests/event/test_event_simulation_scenarios.py b/tests/event/test_event_simulation_scenarios.py index 65847491d..d3189093c 100644 --- a/tests/event/test_event_simulation_scenarios.py +++ b/tests/event/test_event_simulation_scenarios.py @@ -44,6 +44,7 @@ class BaseEventSimulationScenariosTest(BaseEventSimulationTester): def test_only_load(self) -> None: stream_id = self.manager._event_manager._stream_id + assert stream_id is not None Scenario.ONLY_LOAD.simulate(self.simulator, self.manager) self._start_stream() @@ -53,9 +54,9 @@ def test_only_load(self) -> None: # LOAD_STATED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=0, timestamp=1578878880.0, type=EventType.LOAD_STARTED, data=EmptyData(), group_id=None), latest_event_id=4, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for each genesis (1 block and 2 txs) - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=4, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=4, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=4, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=4, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=4, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=4, stream_id=stream_id), # noqa: E501 # LOAD_FINISHED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=4, timestamp=1578878880.0, type=EventType.LOAD_FINISHED, data=EmptyData(), group_id=None), latest_event_id=4, stream_id=stream_id) # noqa: E501 ] @@ -64,8 +65,9 @@ def test_only_load(self) -> None: expected = _remove_timestamp(expected) assert responses == expected, f'expected: {expected}\n\nactual: {responses}' - def test_single_chain_one_block(self): + def test_single_chain_one_block(self) -> None: stream_id = self.manager._event_manager._stream_id + assert stream_id is not None Scenario.SINGLE_CHAIN_ONE_BLOCK.simulate(self.simulator, self.manager) self._start_stream() @@ -75,25 +77,26 @@ def test_single_chain_one_block(self): # LOAD_STATED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=0, timestamp=1578878880.0, type=EventType.LOAD_STARTED, data=EmptyData(), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for each genesis (1 block and 2 txs) - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 # LOAD_FINISHED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=4, timestamp=1578878880.0, type=EventType.LOAD_FINISHED, data=EmptyData(), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=5, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=6, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=7, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=5, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=6, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=7, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new block - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=8, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id) # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=8, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=8, stream_id=stream_id) # noqa: E501 ] responses = _remove_timestamp(responses) expected = _remove_timestamp(expected) assert responses == expected, f'expected: {expected}\n\nactual: {responses}' - def test_single_chain_blocks_and_transactions(self): + def test_single_chain_blocks_and_transactions(self) -> None: stream_id = self.manager._event_manager._stream_id + assert stream_id is not None Scenario.SINGLE_CHAIN_BLOCKS_AND_TRANSACTIONS.simulate(self.simulator, self.manager) self._start_stream() @@ -103,62 +106,63 @@ def test_single_chain_blocks_and_transactions(self): # LOAD_STATED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=0, timestamp=1578878880.0, type=EventType.LOAD_STARTED, data=EmptyData(), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for each genesis (1 block and 2 txs) - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # LOAD_FINISHED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=4, timestamp=1578878880.0, type=EventType.LOAD_FINISHED, data=EmptyData(), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=5, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=6, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', 'fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', 'eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', 'f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=7, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', 'fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', 'eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', 'f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=5, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=6, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', 'fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', 'eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', 'f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=7, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', 'fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', 'eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', 'f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new block - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=8, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=8, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED and one NEW_VERTEX_ACCEPTED for 10 new blocks - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=9, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', nonce=0, timestamp=1578878911, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUXRFxfhIYOXURHjiAlx9XPuMh7E2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HF1E8Aibb17Rha6r1cM1oCp74DRmYqP61V', timelock=None))], parents=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8'], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=10, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', nonce=0, timestamp=1578878911, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUXRFxfhIYOXURHjiAlx9XPuMh7E2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HF1E8Aibb17Rha6r1cM1oCp74DRmYqP61V', timelock=None))], parents=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8'], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=11, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', nonce=0, timestamp=1578878912, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUu9S/kjy3HbglEu3bA4JargdORiiIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPeHcEFtRZvMBijqFwccicDMkN17hoNq21', timelock=None))], parents=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393'], twins=[], accumulated_weight=2.0, score=4.584962500721156, first_block=None, height=3, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=12, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', nonce=0, timestamp=1578878912, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUu9S/kjy3HbglEu3bA4JargdORiiIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPeHcEFtRZvMBijqFwccicDMkN17hoNq21', timelock=None))], parents=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393'], twins=[], accumulated_weight=2.0, score=4.584962500721156, first_block=None, height=3, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=13, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', nonce=0, timestamp=1578878913, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUzskI6jayLvTobJDhpVZiuMu7zt+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HRNWR1HpdAiDx7va9VkNUuqqSo2MGW5iE6', timelock=None))], parents=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49'], twins=[], accumulated_weight=2.0, score=4.807354922057604, first_block=None, height=4, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=14, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', nonce=0, timestamp=1578878913, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUzskI6jayLvTobJDhpVZiuMu7zt+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HRNWR1HpdAiDx7va9VkNUuqqSo2MGW5iE6', timelock=None))], parents=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49'], twins=[], accumulated_weight=2.0, score=4.807354922057604, first_block=None, height=4, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=15, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', nonce=0, timestamp=1578878914, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU7B7Cf/pnj2DglfhnqyiRzxNg+K2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HU3chqobPRBt8pjYXt4WahKERjV8UMCWbd', timelock=None))], parents=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3'], twins=[], accumulated_weight=2.0, score=5.0, first_block=None, height=5, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=16, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', nonce=0, timestamp=1578878914, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU7B7Cf/pnj2DglfhnqyiRzxNg+K2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HU3chqobPRBt8pjYXt4WahKERjV8UMCWbd', timelock=None))], parents=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3'], twins=[], accumulated_weight=2.0, score=5.0, first_block=None, height=5, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=17, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', nonce=0, timestamp=1578878915, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUZmTJ0of2Ce9iuycIVpFCVU08WmKIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFrY3outhFVXGLEvaVKVFkd2nB1ihumXCr', timelock=None))], parents=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e'], twins=[], accumulated_weight=2.0, score=5.169925001442312, first_block=None, height=6, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=18, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', nonce=0, timestamp=1578878915, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUZmTJ0of2Ce9iuycIVpFCVU08WmKIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFrY3outhFVXGLEvaVKVFkd2nB1ihumXCr', timelock=None))], parents=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e'], twins=[], accumulated_weight=2.0, score=5.169925001442312, first_block=None, height=6, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=19, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', nonce=0, timestamp=1578878916, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPNN8M/qangqd2wYSzu0u+3OmwDmIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC4kH6pnYBofzTSFWRpA71Po7geNURh5p2', timelock=None))], parents=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d'], twins=[], accumulated_weight=2.0, score=5.321928094887363, first_block=None, height=7, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=20, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', nonce=0, timestamp=1578878916, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPNN8M/qangqd2wYSzu0u+3OmwDmIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC4kH6pnYBofzTSFWRpA71Po7geNURh5p2', timelock=None))], parents=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d'], twins=[], accumulated_weight=2.0, score=5.321928094887363, first_block=None, height=7, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=21, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', nonce=0, timestamp=1578878917, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUxbNqvpWbgNtk9km/VuYhzHHMp76IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HQYUSF8ytNmm92GYMCS8XPYkt3JeKkBDyj', timelock=None))], parents=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6'], twins=[], accumulated_weight=2.0, score=5.459431618637297, first_block=None, height=8, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=22, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', nonce=0, timestamp=1578878917, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUxbNqvpWbgNtk9km/VuYhzHHMp76IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HQYUSF8ytNmm92GYMCS8XPYkt3JeKkBDyj', timelock=None))], parents=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6'], twins=[], accumulated_weight=2.0, score=5.459431618637297, first_block=None, height=8, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=23, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', nonce=0, timestamp=1578878918, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU48C0XcFpiaWq2gwTICyEVdvJXcCIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HTHNdEhmQeECj5brwUzHK4Sq3fFrFiEvaK', timelock=None))], parents=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7'], twins=[], accumulated_weight=2.0, score=5.584962500721156, first_block=None, height=9, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=24, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', nonce=0, timestamp=1578878918, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU48C0XcFpiaWq2gwTICyEVdvJXcCIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HTHNdEhmQeECj5brwUzHK4Sq3fFrFiEvaK', timelock=None))], parents=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7'], twins=[], accumulated_weight=2.0, score=5.584962500721156, first_block=None, height=9, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=25, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', nonce=0, timestamp=1578878919, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUmQRjqRyxq26raJZnhnpRJsrS9n2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLUD2fi9udkg3ysPKdGvbWDyHFWdXBY1i1', timelock=None))], parents=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=5.700439718141092, first_block=None, height=10, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=26, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', nonce=0, timestamp=1578878919, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUmQRjqRyxq26raJZnhnpRJsrS9n2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLUD2fi9udkg3ysPKdGvbWDyHFWdXBY1i1', timelock=None))], parents=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=5.700439718141092, first_block=None, height=10, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=27, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', nonce=0, timestamp=1578878920, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUYFHjcujZZHs0JWZkriEbn5jTv/aIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFJRMUG7GTjdqG5f6e5tqnrnquBMFCvvs2', timelock=None))], parents=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=5.807354922057604, first_block=None, height=11, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=28, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', nonce=0, timestamp=1578878920, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUYFHjcujZZHs0JWZkriEbn5jTv/aIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFJRMUG7GTjdqG5f6e5tqnrnquBMFCvvs2', timelock=None))], parents=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=5.807354922057604, first_block=None, height=11, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=9, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', nonce=0, timestamp=1578878911, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUXRFxfhIYOXURHjiAlx9XPuMh7E2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HF1E8Aibb17Rha6r1cM1oCp74DRmYqP61V', timelock=None))], parents=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8'], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=10, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', nonce=0, timestamp=1578878911, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUXRFxfhIYOXURHjiAlx9XPuMh7E2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HF1E8Aibb17Rha6r1cM1oCp74DRmYqP61V', timelock=None))], parents=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8'], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=11, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', nonce=0, timestamp=1578878912, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUu9S/kjy3HbglEu3bA4JargdORiiIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPeHcEFtRZvMBijqFwccicDMkN17hoNq21', timelock=None))], parents=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393'], twins=[], accumulated_weight=2.0, score=4.584962500721156, first_block=None, height=3, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=12, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', nonce=0, timestamp=1578878912, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUu9S/kjy3HbglEu3bA4JargdORiiIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPeHcEFtRZvMBijqFwccicDMkN17hoNq21', timelock=None))], parents=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393'], twins=[], accumulated_weight=2.0, score=4.584962500721156, first_block=None, height=3, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=13, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', nonce=0, timestamp=1578878913, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUzskI6jayLvTobJDhpVZiuMu7zt+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HRNWR1HpdAiDx7va9VkNUuqqSo2MGW5iE6', timelock=None))], parents=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49'], twins=[], accumulated_weight=2.0, score=4.807354922057604, first_block=None, height=4, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=14, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', nonce=0, timestamp=1578878913, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUzskI6jayLvTobJDhpVZiuMu7zt+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HRNWR1HpdAiDx7va9VkNUuqqSo2MGW5iE6', timelock=None))], parents=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49'], twins=[], accumulated_weight=2.0, score=4.807354922057604, first_block=None, height=4, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=15, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', nonce=0, timestamp=1578878914, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU7B7Cf/pnj2DglfhnqyiRzxNg+K2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HU3chqobPRBt8pjYXt4WahKERjV8UMCWbd', timelock=None))], parents=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3'], twins=[], accumulated_weight=2.0, score=5.0, first_block=None, height=5, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=16, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', nonce=0, timestamp=1578878914, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU7B7Cf/pnj2DglfhnqyiRzxNg+K2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HU3chqobPRBt8pjYXt4WahKERjV8UMCWbd', timelock=None))], parents=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3'], twins=[], accumulated_weight=2.0, score=5.0, first_block=None, height=5, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=17, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', nonce=0, timestamp=1578878915, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUZmTJ0of2Ce9iuycIVpFCVU08WmKIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFrY3outhFVXGLEvaVKVFkd2nB1ihumXCr', timelock=None))], parents=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e'], twins=[], accumulated_weight=2.0, score=5.169925001442312, first_block=None, height=6, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=18, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', nonce=0, timestamp=1578878915, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUZmTJ0of2Ce9iuycIVpFCVU08WmKIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFrY3outhFVXGLEvaVKVFkd2nB1ihumXCr', timelock=None))], parents=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e'], twins=[], accumulated_weight=2.0, score=5.169925001442312, first_block=None, height=6, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=19, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', nonce=0, timestamp=1578878916, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPNN8M/qangqd2wYSzu0u+3OmwDmIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC4kH6pnYBofzTSFWRpA71Po7geNURh5p2', timelock=None))], parents=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d'], twins=[], accumulated_weight=2.0, score=5.321928094887363, first_block=None, height=7, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=20, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', nonce=0, timestamp=1578878916, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPNN8M/qangqd2wYSzu0u+3OmwDmIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC4kH6pnYBofzTSFWRpA71Po7geNURh5p2', timelock=None))], parents=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d'], twins=[], accumulated_weight=2.0, score=5.321928094887363, first_block=None, height=7, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=21, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', nonce=0, timestamp=1578878917, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUxbNqvpWbgNtk9km/VuYhzHHMp76IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HQYUSF8ytNmm92GYMCS8XPYkt3JeKkBDyj', timelock=None))], parents=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6'], twins=[], accumulated_weight=2.0, score=5.459431618637297, first_block=None, height=8, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=22, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', nonce=0, timestamp=1578878917, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUxbNqvpWbgNtk9km/VuYhzHHMp76IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HQYUSF8ytNmm92GYMCS8XPYkt3JeKkBDyj', timelock=None))], parents=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6'], twins=[], accumulated_weight=2.0, score=5.459431618637297, first_block=None, height=8, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=23, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', nonce=0, timestamp=1578878918, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU48C0XcFpiaWq2gwTICyEVdvJXcCIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HTHNdEhmQeECj5brwUzHK4Sq3fFrFiEvaK', timelock=None))], parents=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7'], twins=[], accumulated_weight=2.0, score=5.584962500721156, first_block=None, height=9, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=24, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', nonce=0, timestamp=1578878918, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU48C0XcFpiaWq2gwTICyEVdvJXcCIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HTHNdEhmQeECj5brwUzHK4Sq3fFrFiEvaK', timelock=None))], parents=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7'], twins=[], accumulated_weight=2.0, score=5.584962500721156, first_block=None, height=9, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=25, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', nonce=0, timestamp=1578878919, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUmQRjqRyxq26raJZnhnpRJsrS9n2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLUD2fi9udkg3ysPKdGvbWDyHFWdXBY1i1', timelock=None))], parents=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=5.700439718141092, first_block=None, height=10, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=26, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', nonce=0, timestamp=1578878919, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUmQRjqRyxq26raJZnhnpRJsrS9n2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLUD2fi9udkg3ysPKdGvbWDyHFWdXBY1i1', timelock=None))], parents=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=5.700439718141092, first_block=None, height=10, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=27, timestamp=1578878910.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', nonce=0, timestamp=1578878920, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUYFHjcujZZHs0JWZkriEbn5jTv/aIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFJRMUG7GTjdqG5f6e5tqnrnquBMFCvvs2', timelock=None))], parents=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=5.807354922057604, first_block=None, height=11, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=28, timestamp=1578878910.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', nonce=0, timestamp=1578878920, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUYFHjcujZZHs0JWZkriEbn5jTv/aIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFJRMUG7GTjdqG5f6e5tqnrnquBMFCvvs2', timelock=None))], parents=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=5.807354922057604, first_block=None, height=11, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new tx (below), and one VERTEX_METADATA_CHANGED for a block, adding the new tx as spending their output # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=29, timestamp=1578878970.5, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', nonce=0, timestamp=1578878970, version=1, weight=18.656776158409354, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=18.656776158409354, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=30, timestamp=1578878970.5, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[SpentOutput(index=0, tx_ids=['5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650'])], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=29, timestamp=1578878970.5, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', nonce=0, timestamp=1578878970, signal_bits=0, version=1, weight=18.656776158409354, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=18.656776158409354, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=30, timestamp=1578878970.5, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[SpentOutput(index=0, tx_ids=['5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650'])], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new tx - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=31, timestamp=1578878970.5, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', nonce=0, timestamp=1578878970, version=1, weight=18.656776158409354, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=18.656776158409354, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=31, timestamp=1578878970.5, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', nonce=0, timestamp=1578878970, signal_bits=0, version=1, weight=18.656776158409354, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=18.656776158409354, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new tx (below), and one VERTEX_METADATA_CHANGED for a tx, adding the new tx as spending their output and children # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=32, timestamp=1578879030.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', nonce=0, timestamp=1578879030, version=1, weight=18.4904519466213, inputs=[TxInput(tx_id='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', index=0, spent_output=TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)))], outputs=[TxOutput(value=3400, token_data=0, script='dqkUmkey79Rbhjq4BtHYCm2mT8hDprWIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLatLcoaATFMqECb5fD5rdW2nF9WGyw9os', timelock=None)), TxOutput(value=2000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=18.4904519466213, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=33, timestamp=1578879030.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', nonce=0, timestamp=1578878970, version=1, weight=18.656776158409354, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', spent_outputs=[SpentOutput(index=0, tx_ids=['d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6'])], conflict_with=[], voided_by=[], received_by=[], children=['d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6'], twins=[], accumulated_weight=18.656776158409354, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=32, timestamp=1578879030.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', nonce=0, timestamp=1578879030, signal_bits=0, version=1, weight=18.4904519466213, inputs=[TxInput(tx_id='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', index=0, spent_output=TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)))], outputs=[TxOutput(value=3400, token_data=0, script='dqkUmkey79Rbhjq4BtHYCm2mT8hDprWIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLatLcoaATFMqECb5fD5rdW2nF9WGyw9os', timelock=None)), TxOutput(value=2000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=18.4904519466213, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=33, timestamp=1578879030.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', nonce=0, timestamp=1578878970, signal_bits=0, version=1, weight=18.656776158409354, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', spent_outputs=[SpentOutput(index=0, tx_ids=['d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6'])], conflict_with=[], voided_by=[], received_by=[], children=['d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6'], twins=[], accumulated_weight=18.656776158409354, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new tx - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=34, timestamp=1578879030.75, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', nonce=0, timestamp=1578879030, version=1, weight=18.4904519466213, inputs=[TxInput(tx_id='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', index=0, spent_output=TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)))], outputs=[TxOutput(value=3400, token_data=0, script='dqkUmkey79Rbhjq4BtHYCm2mT8hDprWIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLatLcoaATFMqECb5fD5rdW2nF9WGyw9os', timelock=None)), TxOutput(value=2000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=18.4904519466213, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=34, timestamp=1578879030.75, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', nonce=0, timestamp=1578879030, signal_bits=0, version=1, weight=18.4904519466213, inputs=[TxInput(tx_id='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', index=0, spent_output=TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)))], outputs=[TxOutput(value=3400, token_data=0, script='dqkUmkey79Rbhjq4BtHYCm2mT8hDprWIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLatLcoaATFMqECb5fD5rdW2nF9WGyw9os', timelock=None)), TxOutput(value=2000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=18.4904519466213, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each confirmed transaction (first block changed) # noqa E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=35, timestamp=1578879091.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', nonce=0, timestamp=1578879090, version=0, weight=8.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUTisHvpM4sDeINzxF5auK/8bP6UaIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HDeSe6qKqjSLwtnjLBV84NddtZQyNb9HUU', timelock=None))], parents=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', 'd2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', '5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=8.0, score=19.576585413276128, first_block=None, height=12, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=36, timestamp=1578879091.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', nonce=0, timestamp=1578879030, version=1, weight=18.4904519466213, inputs=[TxInput(tx_id='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', index=0, spent_output=TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)))], outputs=[TxOutput(value=3400, token_data=0, script='dqkUmkey79Rbhjq4BtHYCm2mT8hDprWIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLatLcoaATFMqECb5fD5rdW2nF9WGyw9os', timelock=None)), TxOutput(value=2000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9'], twins=[], accumulated_weight=18.4904519466213, score=0.0, first_block='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=37, timestamp=1578879091.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', nonce=0, timestamp=1578878970, version=1, weight=18.656776158409354, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', spent_outputs=[SpentOutput(index=0, tx_ids=['d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6'])], conflict_with=[], voided_by=[], received_by=[], children=['d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', '7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9'], twins=[], accumulated_weight=18.656776158409354, score=0.0, first_block='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=35, timestamp=1578879091.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', nonce=0, timestamp=1578879090, signal_bits=0, version=0, weight=8.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUTisHvpM4sDeINzxF5auK/8bP6UaIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HDeSe6qKqjSLwtnjLBV84NddtZQyNb9HUU', timelock=None))], parents=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', 'd2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', '5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=8.0, score=19.576585413276128, first_block=None, height=12, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=36, timestamp=1578879091.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', nonce=0, timestamp=1578879030, signal_bits=0, version=1, weight=18.4904519466213, inputs=[TxInput(tx_id='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', index=0, spent_output=TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)))], outputs=[TxOutput(value=3400, token_data=0, script='dqkUmkey79Rbhjq4BtHYCm2mT8hDprWIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLatLcoaATFMqECb5fD5rdW2nF9WGyw9os', timelock=None)), TxOutput(value=2000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9'], twins=[], accumulated_weight=18.4904519466213, score=0.0, first_block='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=37, timestamp=1578879091.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', nonce=0, timestamp=1578878970, signal_bits=0, version=1, weight=18.656776158409354, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650', spent_outputs=[SpentOutput(index=0, tx_ids=['d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6'])], conflict_with=[], voided_by=[], received_by=[], children=['d2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', '7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9'], twins=[], accumulated_weight=18.656776158409354, score=0.0, first_block='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new block - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=38, timestamp=1578879091.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', nonce=0, timestamp=1578879090, version=0, weight=8.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUTisHvpM4sDeINzxF5auK/8bP6UaIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HDeSe6qKqjSLwtnjLBV84NddtZQyNb9HUU', timelock=None))], parents=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', 'd2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', '5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=8.0, score=19.576585413276128, first_block=None, height=12, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id) # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=38, timestamp=1578879091.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', nonce=0, timestamp=1578879090, signal_bits=0, version=0, weight=8.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUTisHvpM4sDeINzxF5auK/8bP6UaIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HDeSe6qKqjSLwtnjLBV84NddtZQyNb9HUU', timelock=None))], parents=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', 'd2bd5f83fcbfa5dee2b602ddc18ebd4f7714e1ecf928824f862efb0559dcb4d6', '5453759e15a6413a06390868cbb56509704c6f3f7d25f443556d8d6b2dacc650'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='7c7449a44a6adf26fb9b68f8c2b7751905c788b417946c43b8a999d0b66f76d9', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=8.0, score=19.576585413276128, first_block=None, height=12, validation='full'), aux_pow=None), group_id=None), latest_event_id=38, stream_id=stream_id) # noqa: E501 ] responses = _remove_timestamp(responses) expected = _remove_timestamp(expected) assert responses == expected, f'expected: {expected}\n\nactual: {responses}' - def test_reorg(self): + def test_reorg(self) -> None: stream_id = self.manager._event_manager._stream_id + assert stream_id is not None Scenario.REORG.simulate(self.simulator, self.manager) self._start_stream() @@ -168,45 +172,46 @@ def test_reorg(self): # LOAD_STATED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=0, timestamp=1578878880.0, type=EventType.LOAD_STARTED, data=EmptyData(), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for each genesis (1 block and 2 txs) - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, timestamp=1578878880.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 # LOAD_FINISHED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=4, timestamp=1578878880.0, type=EventType.LOAD_FINISHED, data=EmptyData(), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=5, timestamp=1578878940.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', nonce=0, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=6, timestamp=1578878940.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a'], twins=[], accumulated_weight=2.0, score=2.0, first_block='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=7, timestamp=1578878940.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a'], twins=[], accumulated_weight=2.0, score=2.0, first_block='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=5, timestamp=1578878940.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', nonce=0, timestamp=1578878940, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=6, timestamp=1578878940.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a'], twins=[], accumulated_weight=2.0, score=2.0, first_block='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=7, timestamp=1578878940.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a'], twins=[], accumulated_weight=2.0, score=2.0, first_block='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new block from manager1 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=8, timestamp=1578878940.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', nonce=0, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=8, timestamp=1578878940.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', nonce=0, timestamp=1578878940, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa: E501 # Also one VERTEX_METADATA_CHANGED for the previous block, voiding it - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=9, timestamp=1578879064.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', nonce=0, timestamp=1578879000, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HHqKa5Y6viZ8fkH2bd1qQBdsZnrtsmruqS', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', spent_outputs=[], conflict_with=[], voided_by=['1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=10, timestamp=1578879064.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', nonce=0, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', spent_outputs=[], conflict_with=[], voided_by=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=11, timestamp=1578879064.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', '1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=12, timestamp=1578879064.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', '1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=9, timestamp=1578879064.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', nonce=0, timestamp=1578879000, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HHqKa5Y6viZ8fkH2bd1qQBdsZnrtsmruqS', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', spent_outputs=[], conflict_with=[], voided_by=['1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=10, timestamp=1578879064.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', nonce=0, timestamp=1578878940, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', spent_outputs=[], conflict_with=[], voided_by=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=11, timestamp=1578879064.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', '1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=12, timestamp=1578879064.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', '1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new block from manager2 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=13, timestamp=1578879064.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', nonce=0, timestamp=1578879000, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HHqKa5Y6viZ8fkH2bd1qQBdsZnrtsmruqS', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', spent_outputs=[], conflict_with=[], voided_by=['1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=13, timestamp=1578879064.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', nonce=0, timestamp=1578879000, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HHqKa5Y6viZ8fkH2bd1qQBdsZnrtsmruqS', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', spent_outputs=[], conflict_with=[], voided_by=['1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id), # noqa: E501 # REORG_STARTED caused by a new block from manager2 (below) EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=14, timestamp=1578879064.25, type=EventType.REORG_STARTED, data=ReorgData(reorg_size=1, previous_best_block='82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', new_best_block='38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1', common_block='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792'), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa: E501 # Also one VERTEX_METADATA_CHANGED for the previous block, un-voiding it as it's now part of the best blockchain # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=15, timestamp=1578879064.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1', nonce=0, timestamp=1578879001, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HJHSdTickduA1MF9PTbzBQi6Z7stNAzwAu', timelock=None))], parents=['1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=16, timestamp=1578879064.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', nonce=0, timestamp=1578879000, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HHqKa5Y6viZ8fkH2bd1qQBdsZnrtsmruqS', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=17, timestamp=1578879064.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', '1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', '38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1'], twins=[], accumulated_weight=2.0, score=2.0, first_block='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=18, timestamp=1578879064.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', '1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', '38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1'], twins=[], accumulated_weight=2.0, score=2.0, first_block='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=15, timestamp=1578879064.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1', nonce=0, timestamp=1578879001, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HJHSdTickduA1MF9PTbzBQi6Z7stNAzwAu', timelock=None))], parents=['1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=16, timestamp=1578879064.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', nonce=0, timestamp=1578879000, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HHqKa5Y6viZ8fkH2bd1qQBdsZnrtsmruqS', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=17, timestamp=1578879064.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', '1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', '38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1'], twins=[], accumulated_weight=2.0, score=2.0, first_block='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=18, timestamp=1578879064.25, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['82afedcd590f7ad34d09475fc1dfd00e5a0f8ad6b70508ca4659351709c90f9a', '1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', '38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1'], twins=[], accumulated_weight=2.0, score=2.0, first_block='1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 # REORG_FINISHED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=19, timestamp=1578879064.25, type=EventType.REORG_FINISHED, data=EmptyData(), group_id=0), latest_event_id=20, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new block from manager2 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=20, timestamp=1578879064.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1', nonce=0, timestamp=1578879001, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HJHSdTickduA1MF9PTbzBQi6Z7stNAzwAu', timelock=None))], parents=['1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id) # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=20, timestamp=1578879064.25, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1', nonce=0, timestamp=1578879001, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HJHSdTickduA1MF9PTbzBQi6Z7stNAzwAu', timelock=None))], parents=['1204b8c30f0236ae6f1841d0c4805a47089c4d5e3ccd0dcab8aa65f0e4991533', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='38e7f91420ae78ae01707f80c29abe692beebf9d5575cc7c9248e9bdc78169c1', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=20, stream_id=stream_id) # noqa: E501 ] responses = _remove_timestamp(responses) expected = _remove_timestamp(expected) assert responses == expected, f'expected: {expected}\n\nactual: {responses}' - def test_unvoided_transaction(self): + def test_unvoided_transaction(self) -> None: stream_id = self.manager._event_manager._stream_id + assert stream_id is not None Scenario.UNVOIDED_TRANSACTION.simulate(self.simulator, self.manager) self._start_stream() @@ -216,56 +221,56 @@ def test_unvoided_transaction(self): # LOAD_STATED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=0, type=EventType.LOAD_STARTED, timestamp=0, data=EmptyData(), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for each genesis (1 block and 2 txs) - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=1, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', nonce=0, timestamp=1572636343, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=100000000000, token_data=0, script='dqkU/QUFm2AGJJVDuC82h2oXxz/SJnuIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HVayMofEDh4XGsaQJeRJKhutYxYodYNop6', timelock=None))], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=2, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=3, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # LOAD_FINISHED EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=4, type=EventType.LOAD_FINISHED, timestamp=0, data=EmptyData(), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=5, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=6, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', 'fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', 'eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', 'f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=7, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', 'fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', 'eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', 'f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=5, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=6, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', 'fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', 'eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', 'f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=7, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, signal_bits=0, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', 'fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', 'eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', 'f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=2.0, first_block='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new block - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=8, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=8, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED and one NEW_VERTEX_ACCEPTED for 10 new blocks - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=9, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', nonce=0, timestamp=1578878911, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUXRFxfhIYOXURHjiAlx9XPuMh7E2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HF1E8Aibb17Rha6r1cM1oCp74DRmYqP61V', timelock=None))], parents=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8'], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=10, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', nonce=0, timestamp=1578878911, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUXRFxfhIYOXURHjiAlx9XPuMh7E2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HF1E8Aibb17Rha6r1cM1oCp74DRmYqP61V', timelock=None))], parents=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8'], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=11, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', nonce=0, timestamp=1578878912, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUu9S/kjy3HbglEu3bA4JargdORiiIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPeHcEFtRZvMBijqFwccicDMkN17hoNq21', timelock=None))], parents=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393'], twins=[], accumulated_weight=2.0, score=4.584962500721156, first_block=None, height=3, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=12, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', nonce=0, timestamp=1578878912, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUu9S/kjy3HbglEu3bA4JargdORiiIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPeHcEFtRZvMBijqFwccicDMkN17hoNq21', timelock=None))], parents=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393'], twins=[], accumulated_weight=2.0, score=4.584962500721156, first_block=None, height=3, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=13, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', nonce=0, timestamp=1578878913, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUzskI6jayLvTobJDhpVZiuMu7zt+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HRNWR1HpdAiDx7va9VkNUuqqSo2MGW5iE6', timelock=None))], parents=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49'], twins=[], accumulated_weight=2.0, score=4.807354922057604, first_block=None, height=4, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=14, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', nonce=0, timestamp=1578878913, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUzskI6jayLvTobJDhpVZiuMu7zt+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HRNWR1HpdAiDx7va9VkNUuqqSo2MGW5iE6', timelock=None))], parents=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49'], twins=[], accumulated_weight=2.0, score=4.807354922057604, first_block=None, height=4, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=15, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', nonce=0, timestamp=1578878914, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU7B7Cf/pnj2DglfhnqyiRzxNg+K2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HU3chqobPRBt8pjYXt4WahKERjV8UMCWbd', timelock=None))], parents=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3'], twins=[], accumulated_weight=2.0, score=5.0, first_block=None, height=5, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=16, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', nonce=0, timestamp=1578878914, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU7B7Cf/pnj2DglfhnqyiRzxNg+K2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HU3chqobPRBt8pjYXt4WahKERjV8UMCWbd', timelock=None))], parents=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3'], twins=[], accumulated_weight=2.0, score=5.0, first_block=None, height=5, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=17, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', nonce=0, timestamp=1578878915, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUZmTJ0of2Ce9iuycIVpFCVU08WmKIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFrY3outhFVXGLEvaVKVFkd2nB1ihumXCr', timelock=None))], parents=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e'], twins=[], accumulated_weight=2.0, score=5.169925001442312, first_block=None, height=6, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=18, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', nonce=0, timestamp=1578878915, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUZmTJ0of2Ce9iuycIVpFCVU08WmKIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFrY3outhFVXGLEvaVKVFkd2nB1ihumXCr', timelock=None))], parents=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e'], twins=[], accumulated_weight=2.0, score=5.169925001442312, first_block=None, height=6, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=19, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', nonce=0, timestamp=1578878916, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPNN8M/qangqd2wYSzu0u+3OmwDmIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC4kH6pnYBofzTSFWRpA71Po7geNURh5p2', timelock=None))], parents=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d'], twins=[], accumulated_weight=2.0, score=5.321928094887363, first_block=None, height=7, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=20, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', nonce=0, timestamp=1578878916, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPNN8M/qangqd2wYSzu0u+3OmwDmIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC4kH6pnYBofzTSFWRpA71Po7geNURh5p2', timelock=None))], parents=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d'], twins=[], accumulated_weight=2.0, score=5.321928094887363, first_block=None, height=7, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=21, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', nonce=0, timestamp=1578878917, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUxbNqvpWbgNtk9km/VuYhzHHMp76IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HQYUSF8ytNmm92GYMCS8XPYkt3JeKkBDyj', timelock=None))], parents=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6'], twins=[], accumulated_weight=2.0, score=5.459431618637297, first_block=None, height=8, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=22, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', nonce=0, timestamp=1578878917, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUxbNqvpWbgNtk9km/VuYhzHHMp76IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HQYUSF8ytNmm92GYMCS8XPYkt3JeKkBDyj', timelock=None))], parents=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6'], twins=[], accumulated_weight=2.0, score=5.459431618637297, first_block=None, height=8, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=23, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', nonce=0, timestamp=1578878918, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU48C0XcFpiaWq2gwTICyEVdvJXcCIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HTHNdEhmQeECj5brwUzHK4Sq3fFrFiEvaK', timelock=None))], parents=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7'], twins=[], accumulated_weight=2.0, score=5.584962500721156, first_block=None, height=9, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=24, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', nonce=0, timestamp=1578878918, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU48C0XcFpiaWq2gwTICyEVdvJXcCIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HTHNdEhmQeECj5brwUzHK4Sq3fFrFiEvaK', timelock=None))], parents=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7'], twins=[], accumulated_weight=2.0, score=5.584962500721156, first_block=None, height=9, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=25, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', nonce=0, timestamp=1578878919, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUmQRjqRyxq26raJZnhnpRJsrS9n2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLUD2fi9udkg3ysPKdGvbWDyHFWdXBY1i1', timelock=None))], parents=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=5.700439718141092, first_block=None, height=10, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=26, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', nonce=0, timestamp=1578878919, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUmQRjqRyxq26raJZnhnpRJsrS9n2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLUD2fi9udkg3ysPKdGvbWDyHFWdXBY1i1', timelock=None))], parents=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=5.700439718141092, first_block=None, height=10, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=27, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', nonce=0, timestamp=1578878920, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUYFHjcujZZHs0JWZkriEbn5jTv/aIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFJRMUG7GTjdqG5f6e5tqnrnquBMFCvvs2', timelock=None))], parents=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=5.807354922057604, first_block=None, height=11, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=28, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', nonce=0, timestamp=1578878920, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUYFHjcujZZHs0JWZkriEbn5jTv/aIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFJRMUG7GTjdqG5f6e5tqnrnquBMFCvvs2', timelock=None))], parents=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=5.807354922057604, first_block=None, height=11, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=9, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', nonce=0, timestamp=1578878911, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUXRFxfhIYOXURHjiAlx9XPuMh7E2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HF1E8Aibb17Rha6r1cM1oCp74DRmYqP61V', timelock=None))], parents=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8'], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=10, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', nonce=0, timestamp=1578878911, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUXRFxfhIYOXURHjiAlx9XPuMh7E2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HF1E8Aibb17Rha6r1cM1oCp74DRmYqP61V', timelock=None))], parents=['9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8'], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=11, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', nonce=0, timestamp=1578878912, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUu9S/kjy3HbglEu3bA4JargdORiiIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPeHcEFtRZvMBijqFwccicDMkN17hoNq21', timelock=None))], parents=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393'], twins=[], accumulated_weight=2.0, score=4.584962500721156, first_block=None, height=3, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=12, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', nonce=0, timestamp=1578878912, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUu9S/kjy3HbglEu3bA4JargdORiiIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPeHcEFtRZvMBijqFwccicDMkN17hoNq21', timelock=None))], parents=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393'], twins=[], accumulated_weight=2.0, score=4.584962500721156, first_block=None, height=3, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=13, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', nonce=0, timestamp=1578878913, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUzskI6jayLvTobJDhpVZiuMu7zt+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HRNWR1HpdAiDx7va9VkNUuqqSo2MGW5iE6', timelock=None))], parents=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49'], twins=[], accumulated_weight=2.0, score=4.807354922057604, first_block=None, height=4, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=14, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', nonce=0, timestamp=1578878913, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUzskI6jayLvTobJDhpVZiuMu7zt+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HRNWR1HpdAiDx7va9VkNUuqqSo2MGW5iE6', timelock=None))], parents=['32fea29451e575e9e001f55878f4df61a2f6cf0212c4b9cbfb8125691d5377a8', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49'], twins=[], accumulated_weight=2.0, score=4.807354922057604, first_block=None, height=4, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=15, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', nonce=0, timestamp=1578878914, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU7B7Cf/pnj2DglfhnqyiRzxNg+K2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HU3chqobPRBt8pjYXt4WahKERjV8UMCWbd', timelock=None))], parents=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3'], twins=[], accumulated_weight=2.0, score=5.0, first_block=None, height=5, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=16, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', nonce=0, timestamp=1578878914, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU7B7Cf/pnj2DglfhnqyiRzxNg+K2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HU3chqobPRBt8pjYXt4WahKERjV8UMCWbd', timelock=None))], parents=['896593a8103553e6f54c46901f8c14e62618efe7f18c5afd48cf26e96db9e393', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3'], twins=[], accumulated_weight=2.0, score=5.0, first_block=None, height=5, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=17, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', nonce=0, timestamp=1578878915, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUZmTJ0of2Ce9iuycIVpFCVU08WmKIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFrY3outhFVXGLEvaVKVFkd2nB1ihumXCr', timelock=None))], parents=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e'], twins=[], accumulated_weight=2.0, score=5.169925001442312, first_block=None, height=6, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=18, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', nonce=0, timestamp=1578878915, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUZmTJ0of2Ce9iuycIVpFCVU08WmKIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFrY3outhFVXGLEvaVKVFkd2nB1ihumXCr', timelock=None))], parents=['0b71c21b8000f05241283a848b99e38f27a94a188def7ef1b93f8b0828caba49', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e'], twins=[], accumulated_weight=2.0, score=5.169925001442312, first_block=None, height=6, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=19, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', nonce=0, timestamp=1578878916, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPNN8M/qangqd2wYSzu0u+3OmwDmIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC4kH6pnYBofzTSFWRpA71Po7geNURh5p2', timelock=None))], parents=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d'], twins=[], accumulated_weight=2.0, score=5.321928094887363, first_block=None, height=7, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=20, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', nonce=0, timestamp=1578878916, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPNN8M/qangqd2wYSzu0u+3OmwDmIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC4kH6pnYBofzTSFWRpA71Po7geNURh5p2', timelock=None))], parents=['97b711632054189cbeb1ef4707b7d48c84e6af9a0395a4484030fb3202e691e3', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d'], twins=[], accumulated_weight=2.0, score=5.321928094887363, first_block=None, height=7, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=21, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', nonce=0, timestamp=1578878917, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUxbNqvpWbgNtk9km/VuYhzHHMp76IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HQYUSF8ytNmm92GYMCS8XPYkt3JeKkBDyj', timelock=None))], parents=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6'], twins=[], accumulated_weight=2.0, score=5.459431618637297, first_block=None, height=8, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=22, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', nonce=0, timestamp=1578878917, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUxbNqvpWbgNtk9km/VuYhzHHMp76IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HQYUSF8ytNmm92GYMCS8XPYkt3JeKkBDyj', timelock=None))], parents=['6b5e6201d81381a49fa7febe15f46d440360d8e7b1a0ddbe42e59889f32af56e', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6'], twins=[], accumulated_weight=2.0, score=5.459431618637297, first_block=None, height=8, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=23, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', nonce=0, timestamp=1578878918, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU48C0XcFpiaWq2gwTICyEVdvJXcCIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HTHNdEhmQeECj5brwUzHK4Sq3fFrFiEvaK', timelock=None))], parents=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7'], twins=[], accumulated_weight=2.0, score=5.584962500721156, first_block=None, height=9, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=24, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', nonce=0, timestamp=1578878918, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkU48C0XcFpiaWq2gwTICyEVdvJXcCIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HTHNdEhmQeECj5brwUzHK4Sq3fFrFiEvaK', timelock=None))], parents=['fdc65dbd3675a01a39343dd0c4a05eea471c3bd7015bb96cea0bde7143e24c5d', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7'], twins=[], accumulated_weight=2.0, score=5.584962500721156, first_block=None, height=9, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=25, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', nonce=0, timestamp=1578878919, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUmQRjqRyxq26raJZnhnpRJsrS9n2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLUD2fi9udkg3ysPKdGvbWDyHFWdXBY1i1', timelock=None))], parents=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=5.700439718141092, first_block=None, height=10, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=26, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', nonce=0, timestamp=1578878919, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUmQRjqRyxq26raJZnhnpRJsrS9n2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HLUD2fi9udkg3ysPKdGvbWDyHFWdXBY1i1', timelock=None))], parents=['eb3c4684dfad95a5b9d1c88f3463b91fe44bbe7b00e4b810648ca9e9ff5685a6', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb'], twins=[], accumulated_weight=2.0, score=5.700439718141092, first_block=None, height=10, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=27, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', nonce=0, timestamp=1578878920, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUYFHjcujZZHs0JWZkriEbn5jTv/aIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFJRMUG7GTjdqG5f6e5tqnrnquBMFCvvs2', timelock=None))], parents=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=5.807354922057604, first_block=None, height=11, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=28, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', nonce=0, timestamp=1578878920, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUYFHjcujZZHs0JWZkriEbn5jTv/aIrA==', decoded=DecodedTxOutput(type='P2PKH', address='HFJRMUG7GTjdqG5f6e5tqnrnquBMFCvvs2', timelock=None))], parents=['1eb8f2c848828831c0e50f13b6ea54cac99494031ebad0318c7b142acb5540b7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=5.807354922057604, first_block=None, height=11, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new tx (below), and one VERTEX_METADATA_CHANGED for a block, adding the new tx as spending their output # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=29, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', nonce=0, timestamp=1578878970, version=1, weight=19.0005, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=19.0005, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=30, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[SpentOutput(index=0, tx_ids=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'])], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=29, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', nonce=0, timestamp=1578878970, signal_bits=0, version=1, weight=19.0005, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=19.0005, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=30, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[SpentOutput(index=0, tx_ids=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'])], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new tx - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=31, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', nonce=0, timestamp=1578878970, version=1, weight=19.0005, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=19.0005, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=31, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', nonce=0, timestamp=1578878970, signal_bits=0, version=1, weight=19.0005, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=19.0005, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new tx (below), one VERTEX_METADATA_CHANGED for a block, adding the new tx as spending their output, and one VERTEX_METADATA_CHANGED adding the new tx as twin/conflict of the previous tx # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=32, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', nonce=0, timestamp=1578879030, version=1, weight=19.0, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', spent_outputs=[], conflict_with=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], voided_by=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], received_by=[], children=[], twins=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], accumulated_weight=19.0, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=33, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', nonce=0, timestamp=1578878970, version=1, weight=19.0005, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', spent_outputs=[], conflict_with=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], voided_by=[], received_by=[], children=[], twins=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], accumulated_weight=19.0005, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=34, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[SpentOutput(index=0, tx_ids=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', '0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'])], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=32, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', nonce=0, timestamp=1578879030, signal_bits=0, version=1, weight=19.0, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', spent_outputs=[], conflict_with=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], voided_by=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], received_by=[], children=[], twins=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], accumulated_weight=19.0, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=33, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', nonce=0, timestamp=1578878970, signal_bits=0, version=1, weight=19.0005, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', spent_outputs=[], conflict_with=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], voided_by=[], received_by=[], children=[], twins=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], accumulated_weight=19.0005, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=34, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', nonce=0, timestamp=1578878910, signal_bits=0, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', spent_outputs=[SpentOutput(index=0, tx_ids=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', '0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'])], conflict_with=[], voided_by=[], received_by=[], children=['8ab45f3b35f8dc437fb4a246d9b7dd3d3d5cfb7270e516076718a7a94598cf2f'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new tx that is a twin of the previous one. It's voided. - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=35, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', nonce=0, timestamp=1578879030, version=1, weight=19.0, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', spent_outputs=[], conflict_with=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], voided_by=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], received_by=[], children=[], twins=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], accumulated_weight=19.0, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=35, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', nonce=0, timestamp=1578879030, signal_bits=0, version=1, weight=19.0, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', spent_outputs=[], conflict_with=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], voided_by=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], received_by=[], children=[], twins=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], accumulated_weight=19.0, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each twin tx, inverting the voided state of them. # noqa E501 # The order of events is important, we receive the voided txs first, then reverse topological ordering. - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=36, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', nonce=0, timestamp=1578878970, version=1, weight=19.0005, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', spent_outputs=[], conflict_with=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], voided_by=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], received_by=[], children=[], twins=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], accumulated_weight=19.0005, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=37, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', nonce=0, timestamp=1578879090, version=0, weight=8.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUFgE9a6rVMusN303z18sYfjdpYGqIrA==', decoded=DecodedTxOutput(type='P2PKH', address='H8XUjiUx24WLXUN63da34hX6bEs29GJjSs', timelock=None))], parents=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=8.0, score=19.000858282039708, first_block=None, height=12, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=38, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', nonce=0, timestamp=1578879030, version=1, weight=19.0, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', spent_outputs=[], conflict_with=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], voided_by=[], received_by=[], children=['24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a'], twins=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], accumulated_weight=19.000704269011248, score=0.0, first_block='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=36, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', nonce=0, timestamp=1578878970, signal_bits=0, version=1, weight=19.0005, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4', spent_outputs=[], conflict_with=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], voided_by=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], received_by=[], children=[], twins=['0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], accumulated_weight=19.0005, score=0.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=37, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', nonce=0, timestamp=1578879090, signal_bits=0, version=0, weight=8.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUFgE9a6rVMusN303z18sYfjdpYGqIrA==', decoded=DecodedTxOutput(type='P2PKH', address='H8XUjiUx24WLXUN63da34hX6bEs29GJjSs', timelock=None))], parents=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=8.0, score=19.000858282039708, first_block=None, height=12, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=38, type=EventType.VERTEX_METADATA_CHANGED, timestamp=0, data=TxData(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', nonce=0, timestamp=1578879030, signal_bits=0, version=1, weight=19.0, inputs=[TxInput(tx_id='9b83e5dbc7145a5a161c34da4bec4e1a64dc02a3f2495a2db78457426c9ee6bf', index=0, spent_output=TxOutput(value=6400, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None)))], outputs=[TxOutput(value=5400, token_data=0, script='dqkUutgaVG8W5OnzgAEVUqB4XgmDgm2IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HPZ4x7a2NXdrMa5ksPfeGMZmjhJHTjDZ9Q', timelock=None)), TxOutput(value=1000, token_data=0, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', decoded=DecodedTxOutput(type='P2PKH', address='HC846khX278aM1utqAgPzkKAxBTfftaRDm', timelock=None))], parents=['16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88', spent_outputs=[], conflict_with=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], voided_by=[], received_by=[], children=['24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a'], twins=['cba55aadc9fd8d5bdb6f394d8f5eb00cc775db12c2512c9e37df8e31ca3841f4'], accumulated_weight=19.000704269011248, score=0.0, first_block='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 # One NEW_VERTEX_ACCEPTED for a new block - EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=39, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', nonce=0, timestamp=1578879090, version=0, weight=8.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUFgE9a6rVMusN303z18sYfjdpYGqIrA==', decoded=DecodedTxOutput(type='P2PKH', address='H8XUjiUx24WLXUN63da34hX6bEs29GJjSs', timelock=None))], parents=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=8.0, score=19.000858282039708, first_block=None, height=12, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 + EventResponse(type='EVENT', peer_id=self.peer_id, network='unittests', event=BaseEvent(id=39, type=EventType.NEW_VERTEX_ACCEPTED, timestamp=0, data=TxData(hash='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', nonce=0, timestamp=1578879090, signal_bits=0, version=0, weight=8.0, inputs=[], outputs=[TxOutput(value=6400, token_data=0, script='dqkUFgE9a6rVMusN303z18sYfjdpYGqIrA==', decoded=DecodedTxOutput(type='P2PKH', address='H8XUjiUx24WLXUN63da34hX6bEs29GJjSs', timelock=None))], parents=['f349fc0f570a636a440ed3853cc533faa2c4616160e1d9eb6f5d656a90da30fb', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '0639e93ff22647ed06af3ac3a3bc7dd2ca8db18c67fdd9a039318b4d6bf51a88'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='24707288e7c72c5e74c68241ee32d64239902533e64946de6e6cddb66ef3432a', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=8.0, score=19.000858282039708, first_block=None, height=12, validation='full'), aux_pow=None), group_id=None), latest_event_id=39, stream_id=stream_id), # noqa: E501 ] responses = _remove_timestamp(responses) diff --git a/tests/event/test_event_storage.py b/tests/event/test_event_storage.py index 1013fb25f..f9b2651a4 100644 --- a/tests/event/test_event_storage.py +++ b/tests/event/test_event_storage.py @@ -2,6 +2,7 @@ import pytest +from hathor.event.model.base_event import BaseEvent from hathor.event.model.node_state import NodeState from hathor.event.storage import EventStorage from hathor.event.storage.memory_storage import EventMemoryStorage @@ -16,18 +17,18 @@ class EventStorageBaseTest(unittest.TestCase): event_storage: EventStorage - def setUp(self): + def setUp(self) -> None: super().setUp() self.event_mocker = EventMocker(self.rng) - def test_save_event_and_retrieve(self): + def test_save_event_and_retrieve(self) -> None: event = self.event_mocker.generate_mocked_event() self.event_storage.save_event(event) event_retrieved = self.event_storage.get_event(event.id) assert event_retrieved == event - def test_save_events_and_retrieve(self): + def test_save_events_and_retrieve(self) -> None: event1 = self.event_mocker.generate_mocked_event() event2 = self.event_mocker.generate_mocked_event() self.event_storage.save_events([event1, event2]) @@ -37,7 +38,7 @@ def test_save_events_and_retrieve(self): assert event1_retrieved == event1 assert event2_retrieved == event2 - def test_get_negative_key(self): + def test_get_negative_key(self) -> None: with self.assertRaises(ValueError) as cm: self.event_storage.get_event(-1) @@ -46,20 +47,22 @@ def test_get_negative_key(self): str(cm.exception) ) - def test_get_nonexistent_event(self): + def test_get_nonexistent_event(self) -> None: assert self.event_storage.get_event(0) is None assert self.event_storage.get_event(9999) is None - def test_save_events_and_retrieve_the_last(self): - last_event = None + def test_save_events_and_retrieve_the_last(self) -> None: + last_event: BaseEvent | None = None for i in range(10): last_event = self.event_mocker.generate_mocked_event(i) self.event_storage.save_event(last_event) event_retrieved = self.event_storage.get_last_event() + assert event_retrieved is not None + assert last_event is not None assert event_retrieved.id == last_event.id - def test_save_non_sequential(self): + def test_save_non_sequential(self) -> None: for i in range(10): event = self.event_mocker.generate_mocked_event(i) self.event_storage.save_event(event) @@ -74,16 +77,16 @@ def test_save_non_sequential(self): str(cm.exception) ) - def test_iter_from_event_empty(self): + def test_iter_from_event_empty(self) -> None: self._test_iter_from_event(0) - def test_iter_from_event_single(self): + def test_iter_from_event_single(self) -> None: self._test_iter_from_event(1) - def test_iter_from_event_multiple(self): + def test_iter_from_event_multiple(self) -> None: self._test_iter_from_event(20) - def _test_iter_from_event(self, n_events): + def _test_iter_from_event(self, n_events: int) -> None: expected_events = [] for i in range(n_events): event = self.event_mocker.generate_mocked_event(i) @@ -94,7 +97,7 @@ def _test_iter_from_event(self, n_events): self.assertEqual(expected_events, actual_events) - def test_iter_from_event_negative_key(self): + def test_iter_from_event_negative_key(self) -> None: with self.assertRaises(ValueError) as cm: events = self.event_storage.iter_from_event(-10) list(events) @@ -104,7 +107,7 @@ def test_iter_from_event_negative_key(self): str(cm.exception) ) - def test_save_events_and_retrieve_last_group_id(self): + def test_save_events_and_retrieve_last_group_id(self) -> None: expected_group_id = 4 self._populate_events_and_last_group_id(n_events=10, last_group_id=expected_group_id) @@ -119,38 +122,38 @@ def _populate_events_and_last_group_id(self, n_events: int, last_group_id: int) event = self.event_mocker.generate_mocked_event(i, group_id) self.event_storage.save_event(event) - def test_get_empty_node_state(self): + def test_get_empty_node_state(self) -> None: node_state = self.event_storage.get_node_state() assert node_state is None - def test_save_node_state_and_retrieve(self): + def test_save_node_state_and_retrieve(self) -> None: self.event_storage.save_node_state(NodeState.SYNC) node_state = self.event_storage.get_node_state() assert node_state == NodeState.SYNC - def test_get_empty_event_queue_state(self): + def test_get_empty_event_queue_state(self) -> None: enabled = self.event_storage.get_event_queue_state() assert enabled is False - def test_save_event_queue_enabled_and_retrieve(self): + def test_save_event_queue_enabled_and_retrieve(self) -> None: self.event_storage.save_event_queue_state(True) enabled = self.event_storage.get_event_queue_state() assert enabled is True - def test_save_event_queue_disabled_and_retrieve(self): + def test_save_event_queue_disabled_and_retrieve(self) -> None: self.event_storage.save_event_queue_state(False) enabled = self.event_storage.get_event_queue_state() assert enabled is False - def test_reset_events_empty_database(self): + def test_reset_events_empty_database(self) -> None: self._test_reset_events() - def test_reset_all_empty_database(self): + def test_reset_all_empty_database(self) -> None: self._test_reset_events() def _test_reset_events(self) -> None: @@ -179,7 +182,7 @@ def _test_reset_all(self) -> None: assert node_state is None assert event_queue_state is False - def test_reset_events_full_database(self): + def test_reset_events_full_database(self) -> None: n_events = 10 expected_last_group_id = 4 expected_node_state = NodeState.SYNC @@ -206,7 +209,7 @@ def test_reset_events_full_database(self): assert node_state == expected_node_state assert event_queue_state is True - def test_reset_all_full_database(self): + def test_reset_all_full_database(self) -> None: n_events = 10 expected_last_group_id = 4 expected_node_state = NodeState.SYNC @@ -238,7 +241,7 @@ def test_reset_all_full_database(self): class EventStorageRocksDBTest(EventStorageBaseTest): __test__ = True - def setUp(self): + def setUp(self) -> None: super().setUp() self.directory = tempfile.mkdtemp() self.tmpdirs.append(self.directory) @@ -249,6 +252,6 @@ def setUp(self): class EventStorageMemoryTest(EventStorageBaseTest): __test__ = True - def setUp(self): + def setUp(self) -> None: super().setUp() self.event_storage = EventMemoryStorage() diff --git a/tests/event/websocket/test_factory.py b/tests/event/websocket/test_factory.py index b100fc6d5..6c703a609 100644 --- a/tests/event/websocket/test_factory.py +++ b/tests/event/websocket/test_factory.py @@ -24,7 +24,7 @@ from tests.utils import EventMocker -def test_started_register(): +def test_started_register() -> None: factory = _get_factory() connection = Mock(spec_set=EventWebsocketProtocol) connection.send_invalid_request_response = Mock() @@ -35,7 +35,7 @@ def test_started_register(): connection.send_invalid_request_response.assert_not_called() -def test_non_started_register(): +def test_non_started_register() -> None: factory = _get_factory() connection = Mock(spec_set=EventWebsocketProtocol) connection.send_invalid_request_response = Mock() @@ -45,7 +45,7 @@ def test_non_started_register(): connection.send_invalid_request_response.assert_called_once_with(InvalidRequestType.EVENT_WS_NOT_RUNNING) -def test_stopped_register(): +def test_stopped_register() -> None: factory = _get_factory() connection = Mock(spec_set=EventWebsocketProtocol) connection.send_invalid_request_response = Mock() @@ -84,7 +84,7 @@ def test_broadcast_event(can_receive_event: bool) -> None: connection.send_event_response.assert_called_once_with(response) -def test_broadcast_multiple_events_multiple_connections(): +def test_broadcast_multiple_events_multiple_connections() -> None: stream_id = 'stream_id' factory = _get_factory(10) connection1 = Mock(spec_set=EventWebsocketProtocol) diff --git a/tests/event/websocket/test_protocol.py b/tests/event/websocket/test_protocol.py index 2f3a4dcfe..426d74778 100644 --- a/tests/event/websocket/test_protocol.py +++ b/tests/event/websocket/test_protocol.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Optional -from unittest.mock import ANY, Mock +from unittest.mock import ANY, Mock, patch import pytest from autobahn.websocket import ConnectionRequest @@ -27,11 +27,11 @@ @pytest.fixture -def factory(): +def factory() -> Mock: return Mock(spec_set=EventWebsocketFactory) -def test_init(): +def test_init() -> None: protocol = EventWebsocketProtocol() assert protocol.client_peer is None @@ -41,7 +41,7 @@ def test_init(): assert not protocol._stream_is_active -def test_next_expected_event_id(): +def test_next_expected_event_id() -> None: protocol = EventWebsocketProtocol() assert protocol.next_expected_event_id() == 0 @@ -51,7 +51,7 @@ def test_next_expected_event_id(): assert protocol.next_expected_event_id() == 6 -def test_on_connect(): +def test_on_connect() -> None: protocol = EventWebsocketProtocol() request = Mock(spec_set=ConnectionRequest) request.peer = 'some_peer' @@ -61,7 +61,7 @@ def test_on_connect(): assert protocol.client_peer == 'some_peer' -def test_on_open(factory): +def test_on_open(factory: Mock) -> None: protocol = EventWebsocketProtocol() protocol.factory = factory @@ -70,7 +70,7 @@ def test_on_open(factory): factory.register.assert_called_once_with(protocol) -def test_on_close(factory): +def test_on_close(factory: Mock) -> None: protocol = EventWebsocketProtocol() protocol.factory = factory @@ -79,7 +79,7 @@ def test_on_close(factory): factory.unregister.assert_called_once_with(protocol) -def test_send_event_response(): +def test_send_event_response() -> None: protocol = EventWebsocketProtocol() protocol.sendMessage = Mock() response = EventResponse( @@ -99,7 +99,8 @@ def test_send_event_response(): expected_payload = (b'{"type":"EVENT","peer_id":"my_peer_id","network":"my_network","event":{"id":10,' b'"timestamp":123.0,"type":"VERTEX_METADATA_CHANGED","data":{"hash":"abc","nonce":123,' - b'"timestamp":456,"version":1,"weight":10.0,"inputs":[],"outputs":[],"parents":[],' + b'"timestamp":456,"signal_bits":0,"version":1,"weight":10.0,"inputs":[],"outputs":[],' + b'"parents":[],' b'"tokens":[],"token_name":null,"token_symbol":null,"metadata":{"hash":"abc",' b'"spent_outputs":[],"conflict_with":[],"voided_by":[],"received_by":[],"children":[],' b'"twins":[],"accumulated_weight":10.0,"score":20.0,"first_block":null,"height":100,' @@ -112,7 +113,11 @@ def test_send_event_response(): @pytest.mark.parametrize('_type', [InvalidRequestType.VALIDATION_ERROR, InvalidRequestType.STREAM_IS_INACTIVE]) @pytest.mark.parametrize('invalid_payload', [None, b'some_payload']) @pytest.mark.parametrize('error_message', [None, 'some error']) -def test_send_invalid_request_response(_type, invalid_payload, error_message): +def test_send_invalid_request_response( + _type: InvalidRequestType, + invalid_payload: bytes | None, + error_message: str | None +) -> None: protocol = EventWebsocketProtocol() protocol.sendMessage = Mock() @@ -173,7 +178,7 @@ def test_can_receive_event( assert result == expected_result -def test_on_valid_stop_message(): +def test_on_valid_stop_message() -> None: protocol = EventWebsocketProtocol() protocol._stream_is_active = True @@ -182,7 +187,7 @@ def test_on_valid_stop_message(): assert not protocol._stream_is_active -def test_stop_message_on_inactive(): +def test_stop_message_on_inactive() -> None: protocol = EventWebsocketProtocol() protocol.sendMessage = Mock() protocol._stream_is_active = False @@ -206,7 +211,7 @@ def test_stop_message_on_inactive(): (10, 0, 10), ] ) -def test_on_valid_ack_message(ack_event_id, window_size, last_sent_event_id): +def test_on_valid_ack_message(ack_event_id: int, window_size: int, last_sent_event_id: int) -> None: protocol = EventWebsocketProtocol() protocol._last_sent_event_id = last_sent_event_id protocol.factory = Mock() @@ -235,7 +240,7 @@ def test_on_valid_ack_message(ack_event_id, window_size, last_sent_event_id): (10, 0, 10), ] ) -def test_on_valid_start_message(ack_event_id, window_size, last_sent_event_id): +def test_on_valid_start_message(ack_event_id: int, window_size: int, last_sent_event_id: int | None) -> None: protocol = EventWebsocketProtocol() protocol._last_sent_event_id = last_sent_event_id protocol.factory = Mock() @@ -251,7 +256,7 @@ def test_on_valid_start_message(ack_event_id, window_size, last_sent_event_id): protocol.factory.send_next_event_to_connection.assert_called_once() -def test_ack_message_on_inactive(): +def test_ack_message_on_inactive() -> None: protocol = EventWebsocketProtocol() protocol.sendMessage = Mock() protocol._stream_is_active = False @@ -264,7 +269,7 @@ def test_ack_message_on_inactive(): protocol.sendMessage.assert_called_once_with(response) -def test_start_message_on_active(): +def test_start_message_on_active() -> None: protocol = EventWebsocketProtocol() protocol.sendMessage = Mock() protocol._stream_is_active = True @@ -294,17 +299,21 @@ def test_start_message_on_active(): (5, 1, 10, InvalidRequestType.ACK_TOO_LARGE), ] ) -def test_on_invalid_ack_message(_ack_event_id, last_sent_event_id, ack_event_id, _type): +def test_on_invalid_ack_message( + _ack_event_id: int, + last_sent_event_id: int | None, + ack_event_id: int, + _type: InvalidRequestType, +) -> None: protocol = EventWebsocketProtocol() protocol._ack_event_id = _ack_event_id protocol._last_sent_event_id = last_sent_event_id - protocol.send_invalid_request_response = Mock() protocol._stream_is_active = True payload = f'{{"type": "ACK", "ack_event_id": {ack_event_id}, "window_size": 0}}'.encode('utf8') - protocol.onMessage(payload, False) - - protocol.send_invalid_request_response.assert_called_once_with(_type, payload) + with patch.object(protocol, 'send_invalid_request_response') as mock: + protocol.onMessage(payload, False) + mock.assert_called_once_with(_type, payload) @pytest.mark.parametrize( @@ -318,16 +327,15 @@ def test_on_invalid_ack_message(_ack_event_id, last_sent_event_id, ack_event_id, (10, 5), ] ) -def test_on_invalid_start_message(_ack_event_id, ack_event_id): +def test_on_invalid_start_message(_ack_event_id: int, ack_event_id: int | None) -> None: protocol = EventWebsocketProtocol() protocol._ack_event_id = _ack_event_id - protocol.send_invalid_request_response = Mock() - ack_event_id = 'null' if ack_event_id is None else ack_event_id - payload = f'{{"type": "START_STREAM", "last_ack_event_id": {ack_event_id}, "window_size": 0}}'.encode('utf8') + ack_event_id_str: str = 'null' if ack_event_id is None else f'{ack_event_id}' + payload = f'{{"type": "START_STREAM", "last_ack_event_id": {ack_event_id_str}, "window_size": 0}}'.encode('utf8') - protocol.onMessage(payload, False) - - protocol.send_invalid_request_response.assert_called_once_with(InvalidRequestType.ACK_TOO_SMALL, payload) + with patch.object(protocol, 'send_invalid_request_response') as mock: + protocol.onMessage(payload, False) + mock.assert_called_once_with(InvalidRequestType.ACK_TOO_SMALL, payload) @pytest.mark.parametrize( @@ -343,11 +351,10 @@ def test_on_invalid_start_message(_ack_event_id, ack_event_id): b'{"type": "ACK", "ack_event_id": -10, "window_size": 0}', ] ) -def test_validation_error_on_message(payload): +def test_validation_error_on_message(payload: bytes) -> None: protocol = EventWebsocketProtocol() - protocol.send_invalid_request_response = Mock() protocol._stream_is_active = False - protocol.onMessage(payload, False) - - protocol.send_invalid_request_response.assert_called_once_with(InvalidRequestType.VALIDATION_ERROR, payload, ANY) + with patch.object(protocol, 'send_invalid_request_response') as mock: + protocol.onMessage(payload, False) + mock.assert_called_once_with(InvalidRequestType.VALIDATION_ERROR, payload, ANY) diff --git a/tests/execution_manager/__init__.py b/tests/execution_manager/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/execution_manager/test_execution_manager.py b/tests/execution_manager/test_execution_manager.py new file mode 100644 index 000000000..9093c64fc --- /dev/null +++ b/tests/execution_manager/test_execution_manager.py @@ -0,0 +1,47 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from unittest.mock import Mock, patch + +from hathor.execution_manager import ExecutionManager +from hathor.reactor import ReactorProtocol + + +def test_crash_and_exit() -> None: + def callback() -> None: + pass + + callback_wrapped = Mock(wraps=callback) + log_mock = Mock() + reactor_mock = Mock(spec_set=ReactorProtocol) + manager = ExecutionManager(reactor_mock) + manager._log = log_mock + reason = 'some critical failure' + + manager.register_on_crash_callback(callback_wrapped) + + with patch.object(sys, 'exit') as exit_mock: + manager.crash_and_exit(reason=reason) + + callback_wrapped.assert_called_once() + log_mock.critical.assert_called_once_with( + 'Critical failure occurred, causing the full node to halt execution. Manual intervention is required.', + reason=reason, + exc_info=True + ) + + reactor_mock.stop.assert_called_once() + reactor_mock.crash.assert_called_once() + exit_mock.assert_called_once_with(-1) diff --git a/tests/feature_activation/test_bit_signaling_service.py b/tests/feature_activation/test_bit_signaling_service.py index f3b24e140..930ca39f2 100644 --- a/tests/feature_activation/test_bit_signaling_service.py +++ b/tests/feature_activation/test_bit_signaling_service.py @@ -173,7 +173,8 @@ def _test_generate_signal_bits( feature_service=feature_service, tx_storage=Mock(), support_features=support_features, - not_support_features=not_support_features + not_support_features=not_support_features, + feature_storage=Mock(), ) return service.generate_signal_bits(block=Mock()) @@ -216,6 +217,7 @@ def test_support_intersection_validation( tx_storage=Mock(), support_features=support_features, not_support_features=not_support_features, + feature_storage=Mock(), ) message = str(e.value) @@ -256,7 +258,7 @@ def test_non_signaling_features_warning( tx_storage = Mock(spec_set=TransactionStorage) tx_storage.get_best_block = lambda: best_block - def get_bits_description_mock(block): + def get_bits_description_mock(block: Block) -> dict[Feature, FeatureDescription]: if block == best_block: return {} raise NotImplementedError @@ -270,6 +272,7 @@ def get_bits_description_mock(block): tx_storage=tx_storage, support_features=support_features, not_support_features=not_support_features, + feature_storage=Mock(), ) logger_mock = Mock() service._log = logger_mock @@ -283,3 +286,35 @@ def get_bits_description_mock(block): best_block_hash='abc', non_signaling_features=non_signaling_features, ) + + +def test_on_must_signal_not_supported() -> None: + service = BitSignalingService( + feature_settings=Mock(), + feature_service=Mock(), + tx_storage=Mock(), + support_features=set(), + not_support_features={Feature.NOP_FEATURE_1}, + feature_storage=Mock(), + ) + + service.on_must_signal(feature=Feature.NOP_FEATURE_1) + + assert service._support_features == {Feature.NOP_FEATURE_1} + assert service._not_support_features == set() + + +def test_on_must_signal_supported() -> None: + service = BitSignalingService( + feature_settings=Mock(), + feature_service=Mock(), + tx_storage=Mock(), + support_features=set(), + not_support_features=set(), + feature_storage=Mock(), + ) + + service.on_must_signal(feature=Feature.NOP_FEATURE_1) + + assert service._support_features == {Feature.NOP_FEATURE_1} + assert service._not_support_features == set() diff --git a/tests/feature_activation/test_criteria.py b/tests/feature_activation/test_criteria.py index 2d8e5774a..b8ef70d2e 100644 --- a/tests/feature_activation/test_criteria.py +++ b/tests/feature_activation/test_criteria.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any + import pytest from pydantic import ValidationError @@ -43,7 +45,7 @@ ) ] ) -def test_valid_criteria(criteria): +def test_valid_criteria(criteria: dict[str, Any]) -> None: Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) @@ -56,10 +58,10 @@ def test_valid_criteria(criteria): (10, 'bit must be lower than max_signal_bits: 10 >= 2') ] ) -def test_bit(bit, error): +def test_bit(bit: int, error: str) -> None: criteria = VALID_CRITERIA | dict(bit=bit) with pytest.raises(ValidationError) as e: - Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) # type: ignore[arg-type] errors = e.value.errors() assert errors[0]['msg'] == error @@ -75,10 +77,10 @@ def test_bit(bit, error): (100, 'Should be a multiple of evaluation_interval: 100 % 1000 != 0') ] ) -def test_start_height(start_height, error): +def test_start_height(start_height: int, error: str) -> None: criteria = VALID_CRITERIA | dict(start_height=start_height) with pytest.raises(ValidationError) as e: - Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) # type: ignore[arg-type] errors = e.value.errors() assert errors[0]['msg'] == error @@ -95,10 +97,10 @@ def test_start_height(start_height, error): (3111, 'Should be a multiple of evaluation_interval: 3111 % 1000 != 0') ] ) -def test_timeout_height(timeout_height, error): +def test_timeout_height(timeout_height: int, error: str) -> None: criteria = VALID_CRITERIA | dict(timeout_height=timeout_height) with pytest.raises(ValidationError) as e: - Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) # type: ignore[arg-type] errors = e.value.errors() assert errors[0]['msg'] == error @@ -113,10 +115,10 @@ def test_timeout_height(timeout_height, error): (100000, 'threshold must not be greater than evaluation_interval: 100000 > 1000') ] ) -def test_threshold(threshold, error): +def test_threshold(threshold: int, error: str) -> None: criteria = VALID_CRITERIA | dict(threshold=threshold) with pytest.raises(ValidationError) as e: - Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) # type: ignore[arg-type] errors = e.value.errors() assert errors[0]['msg'] == error @@ -132,10 +134,10 @@ def test_threshold(threshold, error): (100, 'Should be a multiple of evaluation_interval: 100 % 1000 != 0'), ] ) -def test_minimum_activation_height(minimum_activation_height, error): +def test_minimum_activation_height(minimum_activation_height: int, error: str) -> None: criteria = VALID_CRITERIA | dict(minimum_activation_height=minimum_activation_height) with pytest.raises(ValidationError) as e: - Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) # type: ignore[arg-type] errors = e.value.errors() assert errors[0]['msg'] == error @@ -149,10 +151,10 @@ def test_minimum_activation_height(minimum_activation_height, error): ('0.0', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"') ] ) -def test_version(version, error): +def test_version(version: str, error: str) -> None: criteria = VALID_CRITERIA | dict(version=version) with pytest.raises(ValidationError) as e: - Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) # type: ignore[arg-type] errors = e.value.errors() assert errors[0]['msg'] == error diff --git a/tests/feature_activation/test_feature_service.py b/tests/feature_activation/test_feature_service.py index a66af95dc..60c76d8bc 100644 --- a/tests/feature_activation/test_feature_service.py +++ b/tests/feature_activation/test_feature_service.py @@ -119,6 +119,7 @@ def service(feature_settings: FeatureSettings, tx_storage: TransactionStorage) - feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() return service @@ -169,6 +170,7 @@ def test_get_state_from_defined( feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -200,6 +202,7 @@ def test_get_state_from_started_to_failed( feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -231,11 +234,13 @@ def test_get_state_from_started_to_must_signal_on_timeout( feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) assert result == FeatureState.MUST_SIGNAL + service.bit_signaling_service.on_must_signal.assert_called_once_with(Feature.NOP_FEATURE_1) @pytest.mark.parametrize('block_height', [8, 9, 10, 11]) @@ -263,6 +268,7 @@ def test_get_state_from_started_to_locked_in_on_default_threshold( feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -294,6 +300,7 @@ def test_get_state_from_started_to_locked_in_on_custom_threshold( feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -333,6 +340,7 @@ def test_get_state_from_started_to_started( feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -362,6 +370,7 @@ def test_get_state_from_must_signal_to_locked_in( feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -394,6 +403,7 @@ def test_get_state_from_locked_in_to_active( feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -426,6 +436,7 @@ def test_get_state_from_locked_in_to_locked_in( feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -451,6 +462,7 @@ def test_get_state_from_active(block_mocks: list[Block], tx_storage: Transaction feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -473,6 +485,7 @@ def test_caching_mechanism(block_mocks: list[Block], tx_storage: TransactionStor } ) service = FeatureService(feature_settings=feature_settings, tx_storage=tx_storage) + service.bit_signaling_service = Mock() block = block_mocks[block_height] calculate_new_state_mock = Mock(wraps=service._calculate_new_state) @@ -507,6 +520,7 @@ def test_is_feature_active(block_mocks: list[Block], tx_storage: TransactionStor feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.is_feature_active(block=block, feature=Feature.NOP_FEATURE_1) @@ -531,6 +545,7 @@ def test_get_state_from_failed(block_mocks: list[Block], tx_storage: Transaction feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.get_state(block=block, feature=Feature.NOP_FEATURE_1) @@ -559,6 +574,7 @@ def test_get_bits_description(tx_storage: TransactionStorage) -> None: feature_settings=feature_settings, tx_storage=tx_storage ) + service.bit_signaling_service = Mock() def get_state(self: FeatureService, *, block: Block, feature: Feature) -> FeatureState: states = { @@ -596,6 +612,7 @@ def test_get_ancestor_at_height_invalid( ancestor_height: int ) -> None: service = FeatureService(feature_settings=feature_settings, tx_storage=tx_storage) + service.bit_signaling_service = Mock() block = block_mocks[block_height] with pytest.raises(AssertionError) as e: @@ -625,6 +642,7 @@ def test_get_ancestor_at_height( ancestor_height: int ) -> None: service = FeatureService(feature_settings=feature_settings, tx_storage=tx_storage) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service._get_ancestor_at_height(block=block, ancestor_height=ancestor_height) @@ -653,6 +671,7 @@ def test_get_ancestor_at_height_voided( ancestor_height: int ) -> None: service = FeatureService(feature_settings=feature_settings, tx_storage=tx_storage) + service.bit_signaling_service = Mock() block = block_mocks[block_height] parent_block = block_mocks[block_height - 1] parent_block.get_metadata().voided_by = {b'some'} @@ -711,6 +730,7 @@ def test_check_must_signal( } ) service = FeatureService(feature_settings=feature_settings, tx_storage=tx_storage) + service.bit_signaling_service = Mock() block = block_mocks[block_height] result = service.is_signaling_mandatory_features(block) diff --git a/tests/feature_activation/test_feature_simulation.py b/tests/feature_activation/test_feature_simulation.py index c7e8cf253..53e02f1b9 100644 --- a/tests/feature_activation/test_feature_simulation.py +++ b/tests/feature_activation/test_feature_simulation.py @@ -25,11 +25,13 @@ from hathor.feature_activation.resources.feature import FeatureResource from hathor.feature_activation.settings import Settings as FeatureSettings from hathor.simulator import FakeConnection +from hathor.simulator.utils import add_new_blocks from hathor.transaction.exceptions import BlockMustSignalError +from hathor.util import not_none from tests import unittest from tests.resources.base_resource import StubSite from tests.simulation.base import SimulatorTestCase -from tests.utils import HAS_ROCKSDB, add_new_blocks +from tests.utils import HAS_ROCKSDB class BaseFeatureSimulationTest(SimulatorTestCase): @@ -41,7 +43,7 @@ def get_simulator_builder(self) -> Builder: def _get_result(web_client: StubSite) -> dict[str, Any]: """Returns the feature activation api response.""" response = web_client.get('feature') - result = response.result.json_value() + result: dict[str, Any] = response.result.json_value() del result['block_hash'] # we don't assert the block hash because it's not always the same @@ -615,7 +617,7 @@ def test_feature_from_existing_storage(self) -> None: calculate_new_state_mock.reset_mock() manager1.stop() - artifacts1.rocksdb_storage.close() + not_none(artifacts1.rocksdb_storage).close() # new builder is created with the same storage from the previous manager builder2 = self.get_simulator_builder_from_dir(rocksdb_dir).set_settings(settings) diff --git a/tests/feature_activation/test_mining_simulation.py b/tests/feature_activation/test_mining_simulation.py index cb306a693..f65056ff1 100644 --- a/tests/feature_activation/test_mining_simulation.py +++ b/tests/feature_activation/test_mining_simulation.py @@ -143,7 +143,8 @@ def test_signal_bits_in_mining(self) -> None: def _get_signal_bits_from_get_block_template(self, web_client: StubSite) -> int: result = self._get_result(web_client) - return result['signal_bits'] + signal_bits: int = result['signal_bits'] + return signal_bits def _get_signal_bits_from_mining(self, web_client: StubSite) -> int: result = self._get_result(web_client) @@ -153,13 +154,14 @@ def _get_signal_bits_from_mining(self, web_client: StubSite) -> int: @staticmethod def _get_result(web_client: StubSite) -> dict[str, Any]: response = web_client.get('') - return response.result.json_value() + result: dict[str, Any] = response.result.json_value() + return result def _get_last_ws_signal_bits(self, transport: StringTransport) -> int: messages = self._get_transport_messages(transport) assert len(messages) > 0 last_message = messages[-1] - signal_bits = last_message['params'][0]['signal_bits'] + signal_bits: int = last_message['params'][0]['signal_bits'] return signal_bits diff --git a/tests/feature_activation/test_settings.py b/tests/feature_activation/test_settings.py index 04af34229..b2c7eac9a 100644 --- a/tests/feature_activation/test_settings.py +++ b/tests/feature_activation/test_settings.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any import pytest from pydantic import ValidationError @@ -56,9 +57,9 @@ ) ] ) -def test_valid_settings(features): +def test_valid_settings(features: dict[str, Any]) -> None: data = dict(features=features) - FeatureSettings(**data) + FeatureSettings(**data) # type: ignore[arg-type] @pytest.mark.parametrize( @@ -114,14 +115,14 @@ def test_valid_settings(features): ) ] ) -def test_conflicting_bits(features): +def test_conflicting_bits(features: list[dict[str, Any]]) -> None: with pytest.raises(ValidationError) as e: data = dict(features=features) - FeatureSettings(**data) + FeatureSettings(**data) # type: ignore[arg-type] errors = e.value.errors() assert errors[0]['msg'] == 'At least one pair of Features have the same bit configured for an overlapping ' \ - 'interval: Feature.NOP_FEATURE_1 and Feature.NOP_FEATURE_2' + 'interval: NOP_FEATURE_1 and NOP_FEATURE_2' @pytest.mark.parametrize( @@ -131,10 +132,10 @@ def test_conflicting_bits(features): (100, 101, 'default_threshold must not be greater than evaluation_interval: 101 > 100') ] ) -def test_default_threshold(evaluation_interval, default_threshold, error): +def test_default_threshold(evaluation_interval: int, default_threshold: int, error: str) -> None: with pytest.raises(ValidationError) as e: data = dict(evaluation_interval=evaluation_interval, default_threshold=default_threshold) - FeatureSettings(**data) + FeatureSettings(**data) # type: ignore[arg-type] errors = e.value.errors() assert errors[0]['msg'] == error @@ -160,5 +161,5 @@ def test_default_threshold(evaluation_interval, default_threshold, error): ) ] ) -def test_find_overlap(intervals, expected): +def test_find_overlap(intervals: list[FeatureInterval], expected: tuple[FeatureInterval, FeatureInterval]) -> None: assert expected == _find_overlap(intervals) diff --git a/tests/others/test_metrics.py b/tests/others/test_metrics.py index 41c4ddb25..f799fc961 100644 --- a/tests/others/test_metrics.py +++ b/tests/others/test_metrics.py @@ -109,6 +109,7 @@ def _init_manager(): b'migrations': 0.0, b'event': 0.0, b'event-metadata': 0.0, + b'feature-activation-metadata': 0.0, }) manager.tx_storage.pre_init() @@ -161,6 +162,7 @@ def _init_manager(): b'migrations': 0.0, b'event': 0.0, b'event-metadata': 0.0, + b'feature-activation-metadata': 0.0, }) manager.tx_storage.pre_init() diff --git a/tests/p2p/netfilter/test_factory.py b/tests/p2p/netfilter/test_factory.py index 2dc4d5cde..53ca409c8 100644 --- a/tests/p2p/netfilter/test_factory.py +++ b/tests/p2p/netfilter/test_factory.py @@ -1,3 +1,5 @@ +from unittest.mock import Mock + from twisted.internet.address import IPv4Address from hathor.p2p.netfilter import get_table @@ -10,7 +12,7 @@ class NetfilterFactoryTest(unittest.TestCase): - def test_factory(self): + def test_factory(self) -> None: pre_conn = get_table('filter').get_chain('pre_conn') match = NetfilterMatchIPAddress('192.168.0.1/32') @@ -20,7 +22,7 @@ def test_factory(self): builder = TestBuilder() artifacts = builder.build() wrapped_factory = artifacts.p2p_manager.server_factory - factory = NetfilterFactory(connections=None, wrappedFactory=wrapped_factory) + factory = NetfilterFactory(connections=Mock(), wrappedFactory=wrapped_factory) ret = factory.buildProtocol(IPv4Address('TCP', '192.168.0.1', 1234)) self.assertIsNone(ret) diff --git a/tests/p2p/netfilter/test_match.py b/tests/p2p/netfilter/test_match.py index ce59c28dd..39bb844fe 100644 --- a/tests/p2p/netfilter/test_match.py +++ b/tests/p2p/netfilter/test_match.py @@ -22,7 +22,7 @@ def match(self, context: 'NetfilterContext') -> bool: class NetfilterMatchTest(unittest.TestCase): - def test_match_all(self): + def test_match_all(self) -> None: matcher = NetfilterMatchAll() context = NetfilterContext() self.assertTrue(matcher.match(context)) @@ -31,7 +31,7 @@ def test_match_all(self): json = matcher.to_json() self.assertEqual(json['type'], 'NetfilterMatchAll') - def test_never_match(self): + def test_never_match(self) -> None: matcher = NetfilterNeverMatch() context = NetfilterContext() self.assertFalse(matcher.match(context)) @@ -40,14 +40,14 @@ def test_never_match(self): json = matcher.to_json() self.assertEqual(json['type'], 'NetfilterNeverMatch') - def test_match_and_success(self): + def test_match_and_success(self) -> None: m1 = NetfilterMatchAll() m2 = NetfilterMatchAll() matcher = NetfilterMatchAnd(m1, m2) context = NetfilterContext() self.assertTrue(matcher.match(context)) - def test_match_and_fail_01(self): + def test_match_and_fail_01(self) -> None: m1 = NetfilterNeverMatch() m2 = NetfilterMatchAll() matcher = NetfilterMatchAnd(m1, m2) @@ -60,28 +60,28 @@ def test_match_and_fail_01(self): self.assertEqual(json['match_params']['a']['type'], 'NetfilterNeverMatch') self.assertEqual(json['match_params']['b']['type'], 'NetfilterMatchAll') - def test_match_and_fail_10(self): + def test_match_and_fail_10(self) -> None: m1 = NetfilterMatchAll() m2 = NetfilterNeverMatch() matcher = NetfilterMatchAnd(m1, m2) context = NetfilterContext() self.assertFalse(matcher.match(context)) - def test_match_and_fail_00(self): + def test_match_and_fail_00(self) -> None: m1 = NetfilterNeverMatch() m2 = NetfilterNeverMatch() matcher = NetfilterMatchAnd(m1, m2) context = NetfilterContext() self.assertFalse(matcher.match(context)) - def test_match_or_success_11(self): + def test_match_or_success_11(self) -> None: m1 = NetfilterMatchAll() m2 = NetfilterMatchAll() matcher = NetfilterMatchOr(m1, m2) context = NetfilterContext() self.assertTrue(matcher.match(context)) - def test_match_or_success_10(self): + def test_match_or_success_10(self) -> None: m1 = NetfilterMatchAll() m2 = NetfilterNeverMatch() matcher = NetfilterMatchOr(m1, m2) @@ -94,21 +94,21 @@ def test_match_or_success_10(self): self.assertEqual(json['match_params']['a']['type'], 'NetfilterMatchAll') self.assertEqual(json['match_params']['b']['type'], 'NetfilterNeverMatch') - def test_match_or_success_01(self): + def test_match_or_success_01(self) -> None: m1 = NetfilterNeverMatch() m2 = NetfilterMatchAll() matcher = NetfilterMatchOr(m1, m2) context = NetfilterContext() self.assertTrue(matcher.match(context)) - def test_match_or_fail_00(self): + def test_match_or_fail_00(self) -> None: m1 = NetfilterNeverMatch() m2 = NetfilterNeverMatch() matcher = NetfilterMatchOr(m1, m2) context = NetfilterContext() self.assertFalse(matcher.match(context)) - def test_match_ip_address_empty_context(self): + def test_match_ip_address_empty_context(self) -> None: matcher = NetfilterMatchIPAddress('192.168.0.0/24') context = NetfilterContext() self.assertFalse(matcher.match(context)) @@ -118,7 +118,7 @@ def test_match_ip_address_empty_context(self): self.assertEqual(json['type'], 'NetfilterMatchIPAddress') self.assertEqual(json['match_params']['host'], '192.168.0.0/24') - def test_match_ip_address_ipv4_net(self): + def test_match_ip_address_ipv4_net(self) -> None: matcher = NetfilterMatchIPAddress('192.168.0.0/24') context = NetfilterContext(addr=IPv4Address('TCP', '192.168.0.10', 1234)) self.assertTrue(matcher.match(context)) @@ -129,7 +129,7 @@ def test_match_ip_address_ipv4_net(self): context = NetfilterContext(addr=IPv4Address('TCP', '', 1234)) self.assertFalse(matcher.match(context)) - def test_match_ip_address_ipv4_ip(self): + def test_match_ip_address_ipv4_ip(self) -> None: matcher = NetfilterMatchIPAddress('192.168.0.1/32') context = NetfilterContext(addr=IPv4Address('TCP', '192.168.0.1', 1234)) self.assertTrue(matcher.match(context)) @@ -138,24 +138,24 @@ def test_match_ip_address_ipv4_ip(self): context = NetfilterContext(addr=IPv4Address('TCP', '', 1234)) self.assertFalse(matcher.match(context)) - def test_match_ip_address_ipv4_hostname(self): + def test_match_ip_address_ipv4_hostname(self) -> None: matcher = NetfilterMatchIPAddress('192.168.0.1/32') - context = NetfilterContext(addr=HostnameAddress('hathor.network', 80)) + context = NetfilterContext(addr=HostnameAddress(b'hathor.network', 80)) self.assertFalse(matcher.match(context)) - def test_match_ip_address_ipv4_unix(self): + def test_match_ip_address_ipv4_unix(self) -> None: matcher = NetfilterMatchIPAddress('192.168.0.1/32') context = NetfilterContext(addr=UNIXAddress('/unix.sock')) self.assertFalse(matcher.match(context)) - def test_match_ip_address_ipv4_ipv6(self): + def test_match_ip_address_ipv4_ipv6(self) -> None: matcher = NetfilterMatchIPAddress('192.168.0.1/32') context = NetfilterContext(addr=IPv6Address('TCP', '2001:db8::', 80)) self.assertFalse(matcher.match(context)) context = NetfilterContext(addr=IPv6Address('TCP', '', 80)) self.assertFalse(matcher.match(context)) - def test_match_ip_address_ipv6_net(self): + def test_match_ip_address_ipv6_net(self) -> None: matcher = NetfilterMatchIPAddress('2001:0db8:0:f101::/64') context = NetfilterContext(addr=IPv6Address('TCP', '2001:db8::8a2e:370:7334', 1234)) self.assertFalse(matcher.match(context)) @@ -167,7 +167,7 @@ def test_match_ip_address_ipv6_net(self): self.assertEqual(json['type'], 'NetfilterMatchIPAddress') self.assertEqual(json['match_params']['host'], str(ip_network('2001:0db8:0:f101::/64'))) - def test_match_ip_address_ipv6_ip(self): + def test_match_ip_address_ipv6_ip(self) -> None: matcher = NetfilterMatchIPAddress('2001:0db8:0:f101::1/128') context = NetfilterContext(addr=IPv6Address('TCP', '2001:db8:0:f101::1', 1234)) self.assertTrue(matcher.match(context)) @@ -176,22 +176,22 @@ def test_match_ip_address_ipv6_ip(self): context = NetfilterContext(addr=IPv6Address('TCP', '2001:db8:0:f101:2::7334', 1234)) self.assertFalse(matcher.match(context)) - def test_match_ip_address_ipv6_hostname(self): + def test_match_ip_address_ipv6_hostname(self) -> None: matcher = NetfilterMatchIPAddress('2001:0db8:0:f101::1/128') - context = NetfilterContext(addr=HostnameAddress('hathor.network', 80)) + context = NetfilterContext(addr=HostnameAddress(b'hathor.network', 80)) self.assertFalse(matcher.match(context)) - def test_match_ip_address_ipv6_unix(self): + def test_match_ip_address_ipv6_unix(self) -> None: matcher = NetfilterMatchIPAddress('2001:0db8:0:f101::1/128') context = NetfilterContext(addr=UNIXAddress('/unix.sock')) self.assertFalse(matcher.match(context)) - def test_match_ip_address_ipv6_ipv4(self): + def test_match_ip_address_ipv6_ipv4(self) -> None: matcher = NetfilterMatchIPAddress('2001:0db8:0:f101::1/128') context = NetfilterContext(addr=IPv4Address('TCP', '192.168.0.1', 1234)) self.assertFalse(matcher.match(context)) - def test_match_peer_id_empty_context(self): + def test_match_peer_id_empty_context(self) -> None: matcher = NetfilterMatchPeerId('123') context = NetfilterContext() self.assertFalse(matcher.match(context)) @@ -200,7 +200,7 @@ def test_match_peer_id_empty_context(self): class BaseNetfilterMatchTest(unittest.TestCase): __test__ = False - def test_match_peer_id(self): + def test_match_peer_id(self) -> None: network = 'testnet' peer_id1 = PeerId() peer_id2 = PeerId() diff --git a/tests/p2p/netfilter/test_match_remote.py b/tests/p2p/netfilter/test_match_remote.py index 89df4acf7..1947f39be 100644 --- a/tests/p2p/netfilter/test_match_remote.py +++ b/tests/p2p/netfilter/test_match_remote.py @@ -6,7 +6,7 @@ class NetfilterMatchRemoteTest(unittest.TestCase): - def test_match_ip(self): + def test_match_ip(self) -> None: matcher = NetfilterMatchIPAddressRemoteURL('test', self.clock, 'http://localhost:8080') context = NetfilterContext(addr=IPv4Address('TCP', '192.168.0.1', 1234)) self.assertFalse(matcher.match(context)) diff --git a/tests/p2p/netfilter/test_tables.py b/tests/p2p/netfilter/test_tables.py index 6c845ec5e..a3505aa01 100644 --- a/tests/p2p/netfilter/test_tables.py +++ b/tests/p2p/netfilter/test_tables.py @@ -6,17 +6,17 @@ class NetfilterTableTest(unittest.TestCase): - def test_default_table_filter(self): + def test_default_table_filter(self) -> None: tb_filter = get_table('filter') tb_filter.get_chain('pre_conn') tb_filter.get_chain('post_hello') tb_filter.get_chain('post_peerid') - def test_default_table_not_exists(self): + def test_default_table_not_exists(self) -> None: with self.assertRaises(KeyError): get_table('do-not-exists') - def test_add_get_chain(self): + def test_add_get_chain(self) -> None: mytable = NetfilterTable('mytable') mychain = NetfilterChain('mychain', NetfilterAccept()) mytable.add_chain(mychain) diff --git a/tests/p2p/netfilter/test_utils.py b/tests/p2p/netfilter/test_utils.py index cde078af0..127cf9b0d 100644 --- a/tests/p2p/netfilter/test_utils.py +++ b/tests/p2p/netfilter/test_utils.py @@ -4,7 +4,7 @@ class NetfilterUtilsTest(unittest.TestCase): - def test_peer_id_blacklist(self): + def test_peer_id_blacklist(self) -> None: post_peerid = get_table('filter').get_chain('post_peerid') # Chain starts empty diff --git a/tests/p2p/test_capabilities.py b/tests/p2p/test_capabilities.py index 0380abaf4..022fb1fc6 100644 --- a/tests/p2p/test_capabilities.py +++ b/tests/p2p/test_capabilities.py @@ -1,3 +1,4 @@ +from hathor.p2p.states import ReadyState from hathor.p2p.sync_v1.agent import NodeSyncTimestamp from hathor.p2p.sync_v2.agent import NodeBlockSync from hathor.simulator import FakeConnection @@ -5,7 +6,7 @@ class SyncV1HathorCapabilitiesTestCase(unittest.SyncV1Params, unittest.TestCase): - def test_capabilities(self): + def test_capabilities(self) -> None: network = 'testnet' manager1 = self.create_peer(network, capabilities=[self._settings.CAPABILITY_WHITELIST]) manager2 = self.create_peer(network, capabilities=[]) @@ -18,6 +19,8 @@ def test_capabilities(self): self.clock.advance(0.1) # Even if we don't have the capability we must connect because the whitelist url conf is None + assert isinstance(conn._proto1.state, ReadyState) + assert isinstance(conn._proto2.state, ReadyState) self.assertEqual(conn._proto1.state.state_name, 'READY') self.assertEqual(conn._proto2.state.state_name, 'READY') self.assertIsInstance(conn._proto1.state.sync_agent, NodeSyncTimestamp) @@ -33,6 +36,8 @@ def test_capabilities(self): conn2.run_one_step(debug=True) self.clock.advance(0.1) + assert isinstance(conn2._proto1.state, ReadyState) + assert isinstance(conn2._proto2.state, ReadyState) self.assertEqual(conn2._proto1.state.state_name, 'READY') self.assertEqual(conn2._proto2.state.state_name, 'READY') self.assertIsInstance(conn2._proto1.state.sync_agent, NodeSyncTimestamp) @@ -40,7 +45,7 @@ def test_capabilities(self): class SyncV2HathorCapabilitiesTestCase(unittest.SyncV2Params, unittest.TestCase): - def test_capabilities(self): + def test_capabilities(self) -> None: network = 'testnet' manager1 = self.create_peer(network, capabilities=[self._settings.CAPABILITY_WHITELIST, self._settings.CAPABILITY_SYNC_VERSION]) @@ -54,6 +59,8 @@ def test_capabilities(self): self.clock.advance(0.1) # Even if we don't have the capability we must connect because the whitelist url conf is None + assert isinstance(conn._proto1.state, ReadyState) + assert isinstance(conn._proto2.state, ReadyState) self.assertEqual(conn._proto1.state.state_name, 'READY') self.assertEqual(conn._proto2.state.state_name, 'READY') self.assertIsInstance(conn._proto1.state.sync_agent, NodeBlockSync) @@ -71,6 +78,8 @@ def test_capabilities(self): conn2.run_one_step(debug=True) self.clock.advance(0.1) + assert isinstance(conn2._proto1.state, ReadyState) + assert isinstance(conn2._proto2.state, ReadyState) self.assertEqual(conn2._proto1.state.state_name, 'READY') self.assertEqual(conn2._proto2.state.state_name, 'READY') self.assertIsInstance(conn2._proto1.state.sync_agent, NodeBlockSync) diff --git a/tests/p2p/test_connections.py b/tests/p2p/test_connections.py index 03f56358f..c75abea7e 100644 --- a/tests/p2p/test_connections.py +++ b/tests/p2p/test_connections.py @@ -8,7 +8,7 @@ class ConnectionsTest(unittest.TestCase): @pytest.mark.skipif(sys.platform == 'win32', reason='run_server is very finicky on Windows') - def test_connections(self): + def test_connections(self) -> None: process = run_server() process2 = run_server(listen=8006, status=8086, bootstrap='tcp://127.0.0.1:8005') process3 = run_server(listen=8007, status=8087, bootstrap='tcp://127.0.0.1:8005') @@ -17,7 +17,7 @@ def test_connections(self): process2.terminate() process3.terminate() - def test_manager_connections(self): + def test_manager_connections(self) -> None: manager = self.create_peer('testnet', enable_sync_v1=True, enable_sync_v2=False) endpoint = 'tcp://127.0.0.1:8005' diff --git a/tests/p2p/test_double_spending.py b/tests/p2p/test_double_spending.py index 9eb408ee2..21b74d620 100644 --- a/tests/p2p/test_double_spending.py +++ b/tests/p2p/test_double_spending.py @@ -1,5 +1,10 @@ +from unittest.mock import Mock + from hathor.crypto.util import decode_address +from hathor.manager import HathorManager from hathor.simulator.utils import add_new_blocks +from hathor.transaction import Transaction +from hathor.util import not_none from tests import unittest from tests.utils import add_blocks_unlock_reward, add_new_tx @@ -7,7 +12,7 @@ class BaseHathorSyncMethodsTestCase(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() self.network = 'testnet' @@ -16,16 +21,16 @@ def setUp(self): self.genesis = self.manager1.tx_storage.get_all_genesis() self.genesis_blocks = [tx for tx in self.genesis if tx.is_block] - def _add_new_transactions(self, manager, num_txs): + def _add_new_transactions(self, manager: HathorManager, num_txs: int) -> list[Transaction]: txs = [] for _ in range(num_txs): - address = self.get_address(0) + address = not_none(self.get_address(0)) value = self.rng.choice([5, 10, 15, 20]) tx = add_new_tx(manager, address, value) txs.append(tx) return txs - def test_simple_double_spending(self): + def test_simple_double_spending(self) -> None: add_new_blocks(self.manager1, 5, advance_clock=15) add_blocks_unlock_reward(self.manager1) @@ -33,6 +38,7 @@ def test_simple_double_spending(self): from hathor.wallet.base_wallet import WalletOutputInfo address = self.get_address(0) + assert address is not None value = 500 outputs = [] @@ -125,7 +131,7 @@ def test_simple_double_spending(self): self.assertConsensusValid(self.manager1) - def test_double_spending_propagation(self): + def test_double_spending_propagation(self) -> None: blocks = add_new_blocks(self.manager1, 4, advance_clock=15) add_blocks_unlock_reward(self.manager1) @@ -165,7 +171,7 @@ def test_double_spending_propagation(self): outputs = [WalletOutputInfo(address=address, value=value, timelock=None), WalletOutputInfo(address=address, value=tx_total_value - 500, timelock=None)] self.clock.advance(1) - inputs = [WalletInputInfo(i.tx_id, i.index, b'') for i in tx1.inputs] + inputs = [WalletInputInfo(i.tx_id, i.index, Mock()) for i in tx1.inputs] tx4 = self.manager1.wallet.prepare_transaction_incomplete_inputs(Transaction, inputs, outputs, self.manager1.tx_storage) tx4.weight = 5 @@ -186,7 +192,7 @@ def test_double_spending_propagation(self): address = self.manager1.wallet.get_unused_address_bytes() value = 100 - inputs = [WalletInputInfo(tx_id=tx1.hash, index=1, private_key=None)] + inputs = [WalletInputInfo(tx_id=tx1.hash, index=1, private_key=Mock())] outputs = [WalletOutputInfo(address=address, value=int(value), timelock=None)] self.clock.advance(1) tx2 = self.manager1.wallet.prepare_transaction_incomplete_inputs(Transaction, inputs, outputs, @@ -236,7 +242,7 @@ def test_double_spending_propagation(self): address = self.manager1.wallet.get_unused_address_bytes() value = 500 - inputs = [WalletInputInfo(tx_id=tx4.hash, index=0, private_key=None)] + inputs = [WalletInputInfo(tx_id=tx4.hash, index=0, private_key=Mock())] outputs = [WalletOutputInfo(address=address, value=int(value), timelock=None)] self.clock.advance(1) tx5 = self.manager1.wallet.prepare_transaction_incomplete_inputs(Transaction, inputs, outputs, force=True, @@ -273,7 +279,7 @@ def test_double_spending_propagation(self): address = self.manager1.wallet.get_unused_address_bytes() value = blocks[3].outputs[0].value - inputs = [WalletInputInfo(tx_id=blocks[3].hash, index=0, private_key=None)] + inputs = [WalletInputInfo(tx_id=blocks[3].hash, index=0, private_key=Mock())] outputs = [WalletOutputInfo(address=address, value=value, timelock=None)] self.clock.advance(1) tx7 = self.manager1.wallet.prepare_transaction_incomplete_inputs(Transaction, inputs, outputs, diff --git a/tests/p2p/test_get_best_blockchain.py b/tests/p2p/test_get_best_blockchain.py index 4d00ea55b..ff0d95149 100644 --- a/tests/p2p/test_get_best_blockchain.py +++ b/tests/p2p/test_get_best_blockchain.py @@ -1,4 +1,4 @@ -from twisted.internet.defer import inlineCallbacks +from twisted.internet.protocol import Protocol from hathor.indexes.height_index import HeightInfo from hathor.p2p.messages import ProtocolMessages @@ -17,18 +17,15 @@ class BaseGetBestBlockchainTestCase(SimulatorTestCase): seed_config = 6 - def _send_cmd(self, proto, cmd, payload=None): + def _send_cmd(self, proto: Protocol, cmd: str, payload: str | None = None) -> None: if not payload: line = '{}\r\n'.format(cmd) else: line = '{} {}\r\n'.format(cmd, payload) - if isinstance(line, str): - line = line.encode('utf-8') + proto.dataReceived(line.encode('utf-8')) - return proto.dataReceived(line) - - def test_get_best_blockchain(self): + def test_get_best_blockchain(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() conn12 = FakeConnection(manager1, manager2, latency=0.05) @@ -54,8 +51,8 @@ def test_get_best_blockchain(self): # assert the protocol is in ReadyState state1 = protocol1.state state2 = protocol2.state - self.assertIsInstance(state1, ReadyState) - self.assertIsInstance(state2, ReadyState) + assert isinstance(state1, ReadyState) + assert isinstance(state2, ReadyState) # assert ReadyState commands self.assertIn(ProtocolMessages.GET_BEST_BLOCKCHAIN, state1.cmd_map) @@ -81,10 +78,10 @@ def test_get_best_blockchain(self): self.assertEqual(self._settings.DEFAULT_BEST_BLOCKCHAIN_BLOCKS, len(state1.peer_best_blockchain)) self.assertEqual(self._settings.DEFAULT_BEST_BLOCKCHAIN_BLOCKS, len(state2.peer_best_blockchain)) - self.assertIsInstance(state1.peer_best_blockchain[0], HeightInfo) - self.assertIsInstance(state2.peer_best_blockchain[0], HeightInfo) + assert isinstance(state1.peer_best_blockchain[0], HeightInfo) + assert isinstance(state2.peer_best_blockchain[0], HeightInfo) - def test_handle_get_best_blockchain(self): + def test_handle_get_best_blockchain(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() conn12 = FakeConnection(manager1, manager2, latency=0.05) @@ -101,13 +98,13 @@ def test_handle_get_best_blockchain(self): self.assertEqual(1, len(connected_peers1)) protocol2 = connected_peers1[0] state2 = protocol2.state - self.assertIsInstance(state2, ReadyState) + assert isinstance(state2, ReadyState) connected_peers2 = list(manager2.connections.connected_peers.values()) self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] state1 = protocol1.state - self.assertIsInstance(state1, ReadyState) + assert isinstance(state1, ReadyState) # assert compliance with N blocks inside the boundaries state1.send_get_best_blockchain(n_blocks=1) @@ -141,7 +138,7 @@ def test_handle_get_best_blockchain(self): self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] state1 = protocol1.state - self.assertIsInstance(state1, ReadyState) + assert isinstance(state1, ReadyState) # assert param validation exception closes connection state1.handle_get_best_blockchain('invalid single value') @@ -149,7 +146,7 @@ def test_handle_get_best_blockchain(self): # state1 is managed by manager2 self.assertTrue(conn12.tr2.disconnecting) - def test_handle_best_blockchain(self): + def test_handle_best_blockchain(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() conn12 = FakeConnection(manager1, manager2, latency=0.05) @@ -160,19 +157,19 @@ def test_handle_best_blockchain(self): self.assertEqual(1, len(connected_peers1)) protocol2 = connected_peers1[0] state2 = protocol2.state - self.assertIsInstance(state2, ReadyState) + assert isinstance(state2, ReadyState) connected_peers2 = list(manager2.connections.connected_peers.values()) self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] state1 = protocol1.state - self.assertIsInstance(state1, ReadyState) + assert isinstance(state1, ReadyState) self.assertFalse(conn12.tr1.disconnecting) self.simulator.run(60) # assert a valid blockchain keeps connections open - fake_blockchain = [ + fake_blockchain: list[tuple[float, str]] = [ (1, '0000000000000002eccfbca9bc06c449c01f37afb3cb49c04ee62921d9bcf9dc'), (2, '00000000000000006c846e182462a2cc437070288a486dfa21aa64bb373b8507'), ] @@ -203,7 +200,7 @@ def test_handle_best_blockchain(self): self.simulator.run(60) self.assertTrue(conn12.tr2.disconnecting) - def test_node_without_get_best_blockchain_capability(self): + def test_node_without_get_best_blockchain_capability(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() @@ -232,10 +229,10 @@ def test_node_without_get_best_blockchain_capability(self): # assert the peers don't engage in get_best_blockchain messages state2 = protocol2.state - self.assertIsInstance(state2, ReadyState) + assert isinstance(state2, ReadyState) self.assertIsNone(state2.lc_get_best_blockchain) state1 = protocol1.state - self.assertIsInstance(state1, ReadyState) + assert isinstance(state1, ReadyState) self.assertIsNone(state1.lc_get_best_blockchain) # assert the connections remains open @@ -261,7 +258,7 @@ def test_node_without_get_best_blockchain_capability(self): self.simulator.run(60) self.assertTrue(conn12.tr2.disconnecting) - def test_best_blockchain_from_storage(self): + def test_best_blockchain_from_storage(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() conn12 = FakeConnection(manager1, manager2, latency=0.05) @@ -281,8 +278,8 @@ def test_best_blockchain_from_storage(self): self.assertTrue(block is memo_block) # cache miss if best block doesn't match - fake_block = HeightInfo(1, 'fake hash') - manager1._latest_n_height_tips = [fake_block] + fake_block = HeightInfo(1, b'fake hash') + # manager1._latest_n_height_tips = [fake_block] # FIXME: This property is not defined. Fix this test. best_blockchain = manager1.tx_storage.get_n_height_tips(1) # there is only the genesis block block = best_blockchain[0] # the memoized best_blockchain is skiped @@ -309,7 +306,7 @@ def test_best_blockchain_from_storage(self): block = best_blockchain[0] self.assertTrue(block is memo_block) - def test_stop_looping_on_exit(self): + def test_stop_looping_on_exit(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() conn12 = FakeConnection(manager1, manager2, latency=0.05) @@ -320,18 +317,18 @@ def test_stop_looping_on_exit(self): self.assertEqual(1, len(connected_peers1)) protocol2 = connected_peers1[0] state2 = protocol2.state - self.assertIsInstance(state2, ReadyState) + assert isinstance(state2, ReadyState) connected_peers2 = list(manager2.connections.connected_peers.values()) self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] state1 = protocol1.state - self.assertIsInstance(state1, ReadyState) + assert isinstance(state1, ReadyState) - self.assertIsNotNone(state1.lc_get_best_blockchain) + assert state1.lc_get_best_blockchain is not None self.assertTrue(state1.lc_get_best_blockchain.running) - self.assertIsNotNone(state2.lc_get_best_blockchain) + assert state2.lc_get_best_blockchain is not None self.assertTrue(state2.lc_get_best_blockchain.running) state1.on_exit() @@ -343,8 +340,7 @@ def test_stop_looping_on_exit(self): self.assertIsNotNone(state2.lc_get_best_blockchain) self.assertFalse(state2.lc_get_best_blockchain.running) - @inlineCallbacks - def test_best_blockchain_from_status_resource(self): + async def test_best_blockchain_from_status_resource(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() conn12 = FakeConnection(manager1, manager2, latency=0.05) @@ -353,7 +349,7 @@ def test_best_blockchain_from_status_resource(self): # check /status before generate blocks self.web = StubSite(StatusResource(manager1)) - response = yield self.web.get("status") + response = await self.web.get("status") data = response.json_value() connections = data.get('connections') self.assertEqual(len(connections['connected_peers']), 1) @@ -385,7 +381,7 @@ def test_best_blockchain_from_status_resource(self): self.simulator.run(60) # check /status after mine blocks - response = yield self.web.get("status") + response = await self.web.get("status") data = response.json_value() connections = data.get('connections') self.assertEqual(len(connections['connected_peers']), 1) diff --git a/tests/p2p/test_peer_id.py b/tests/p2p/test_peer_id.py index c3e8be202..bccb9bcb2 100644 --- a/tests/p2p/test_peer_id.py +++ b/tests/p2p/test_peer_id.py @@ -1,37 +1,42 @@ import os import shutil import tempfile +from typing import cast +from unittest.mock import Mock + +from twisted.internet.interfaces import ITransport from hathor.p2p.peer_id import InvalidPeerIdException, PeerId from hathor.p2p.peer_storage import PeerStorage +from hathor.util import not_none from tests import unittest from tests.unittest import TestBuilder class PeerIdTest(unittest.TestCase): - def test_invalid_id(self): + def test_invalid_id(self) -> None: p1 = PeerId() - p1.id = p1.id[::-1] + p1.id = not_none(p1.id)[::-1] self.assertRaises(InvalidPeerIdException, p1.validate) - def test_invalid_public_key(self): + def test_invalid_public_key(self) -> None: p1 = PeerId() p2 = PeerId() p1.public_key = p2.public_key self.assertRaises(InvalidPeerIdException, p1.validate) - def test_invalid_private_key(self): + def test_invalid_private_key(self) -> None: p1 = PeerId() p2 = PeerId() p1.private_key = p2.private_key self.assertRaises(InvalidPeerIdException, p1.validate) - def test_no_private_key(self): + def test_no_private_key(self) -> None: p1 = PeerId() p1.private_key = None p1.validate() - def test_create_from_json(self): + def test_create_from_json(self) -> None: p1 = PeerId() data1 = p1.to_json(include_private_key=True) p2 = PeerId.create_from_json(data1) @@ -39,7 +44,7 @@ def test_create_from_json(self): self.assertEqual(data1, data2) p2.validate() - def test_create_from_json_without_private_key(self): + def test_create_from_json_without_private_key(self) -> None: p1 = PeerId() data1 = p1.to_json() # Just to test a part of the code @@ -51,20 +56,20 @@ def test_create_from_json_without_private_key(self): self.assertEqual(data1, data2) p2.validate() - def test_sign_verify(self): + def test_sign_verify(self) -> None: data = b'abacate' p1 = PeerId() signature = p1.sign(data) self.assertTrue(p1.verify_signature(signature, data)) - def test_sign_verify_fail(self): + def test_sign_verify_fail(self) -> None: data = b'abacate' p1 = PeerId() signature = p1.sign(data) signature = signature[::-1] self.assertFalse(p1.verify_signature(signature, data)) - def test_merge_peer(self): + def test_merge_peer(self) -> None: # Testing peer storage with merge of peers peer_storage = PeerStorage() @@ -72,14 +77,14 @@ def test_merge_peer(self): p2 = PeerId() p2.id = p1.id p2.public_key = p1.public_key - p1.public_key = '' + p1.public_key = None peer_storage.add_or_merge(p1) self.assertEqual(len(peer_storage), 1) peer_storage.add_or_merge(p2) - peer = peer_storage[p1.id] + peer = peer_storage[not_none(p1.id)] self.assertEqual(peer.id, p1.id) self.assertEqual(peer.private_key, p1.private_key) self.assertEqual(peer.public_key, p1.public_key) @@ -88,11 +93,11 @@ def test_merge_peer(self): p3 = PeerId() p3.entrypoints.append('1') p3.entrypoints.append('3') - p3.public_key = '' + p3.public_key = None p4 = PeerId() - p4.public_key = '' - p4.private_key = '' + p4.public_key = None + p4.private_key = None p4.id = p3.id p4.entrypoints.append('2') p4.entrypoints.append('3') @@ -103,7 +108,7 @@ def test_merge_peer(self): peer_storage.add_or_merge(p3) self.assertEqual(len(peer_storage), 2) - peer = peer_storage[p3.id] + peer = peer_storage[not_none(p3.id)] self.assertEqual(peer.id, p3.id) self.assertEqual(peer.private_key, p3.private_key) self.assertEqual(peer.entrypoints, ['2', '3', '1']) @@ -111,7 +116,7 @@ def test_merge_peer(self): with self.assertRaises(ValueError): peer_storage.add(p1) - def test_save_peer_file(self): + def test_save_peer_file(self) -> None: import json p = PeerId() @@ -127,7 +132,7 @@ def test_save_peer_file(self): # Removing tmpdir shutil.rmtree(tmpdir) - def test_retry_connection(self): + def test_retry_connection(self) -> None: p = PeerId() interval = p.retry_interval p.increment_retry_attempt(0) @@ -144,26 +149,27 @@ def test_retry_connection(self): self.assertEqual(p.retry_interval, 5) self.assertEqual(p.retry_timestamp, 0) - def test_validate_certificate(self): + def test_validate_certificate(self) -> None: builder = TestBuilder() artifacts = builder.build() - protocol = artifacts.p2p_manager.server_factory.buildProtocol('127.0.0.1') + protocol = artifacts.p2p_manager.server_factory.buildProtocol(Mock()) + + peer = PeerId() - peer = PeerId('testnet') + from OpenSSL import crypto class FakeTransport: - def getPeerCertificate(self): - from OpenSSL import crypto + def getPeerCertificate(self) -> crypto.X509: # we use a new peer here just to save the trouble of manually creating a certificate - random_peer = PeerId('testnet') + random_peer = PeerId() return crypto.X509.from_cryptography(random_peer.get_certificate()) - protocol.transport = FakeTransport() + protocol.transport = cast(ITransport, FakeTransport()) result = peer.validate_certificate(protocol) self.assertFalse(result) - def test_retry_logic(self): - peer = PeerId('testnet') + def test_retry_logic(self) -> None: + peer = PeerId() self.assertTrue(peer.can_retry(0)) retry_interval = peer.retry_interval @@ -207,7 +213,7 @@ def test_retry_logic(self): class BasePeerIdTest(unittest.TestCase): __test__ = False - async def test_validate_entrypoint(self): + async def test_validate_entrypoint(self) -> None: manager = self.create_peer('testnet', unlock_wallet=False) peer_id = manager.my_peer peer_id.entrypoints = ['tcp://127.0.0.1:40403'] @@ -230,10 +236,11 @@ async def test_validate_entrypoint(self): protocol.connection_string = None peer_id.entrypoints = ['tcp://127.0.0.1:40403'] + from collections import namedtuple + Peer = namedtuple('Peer', 'host') + class FakeTransport: - def getPeer(self): - from collections import namedtuple - Peer = namedtuple('Peer', 'host') + def getPeer(self) -> Peer: return Peer(host='127.0.0.1') protocol.transport = FakeTransport() result = await peer_id.validate_entrypoint(protocol) diff --git a/tests/p2p/test_protocol.py b/tests/p2p/test_protocol.py index 1aadea540..a834f9e20 100644 --- a/tests/p2p/test_protocol.py +++ b/tests/p2p/test_protocol.py @@ -2,7 +2,7 @@ from typing import Optional from unittest.mock import Mock, patch -from twisted.internet.defer import inlineCallbacks +from twisted.internet.protocol import Protocol from twisted.python.failure import Failure from hathor.p2p.peer_id import PeerId @@ -15,7 +15,7 @@ class BaseHathorProtocolTestCase(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() self.network = 'testnet' self.peer_id1 = PeerId() @@ -32,52 +32,49 @@ def assertAndStepConn(self, conn: FakeConnection, regex1: bytes, regex2: Optiona self.assertRegex(conn.peek_tr2_value(), regex2) conn.run_one_step() - def assertIsConnected(self, conn=None): + def assertIsConnected(self, conn: FakeConnection | None = None) -> None: if conn is None: conn = self.conn self.assertFalse(conn.tr1.disconnecting) self.assertFalse(conn.tr2.disconnecting) - def assertIsNotConnected(self, conn=None): + def assertIsNotConnected(self, conn: FakeConnection | None = None) -> None: if conn is None: conn = self.conn self.assertTrue(conn.tr1.disconnecting) self.assertTrue(conn.tr2.disconnecting) - def _send_cmd(self, proto, cmd, payload=None): + def _send_cmd(self, proto: Protocol, cmd: str, payload: str | None = None) -> None: if not payload: line = '{}\r\n'.format(cmd) else: line = '{} {}\r\n'.format(cmd, payload) - if isinstance(line, str): - line = line.encode('utf-8') + proto.dataReceived(line.encode('utf-8')) - return proto.dataReceived(line) - - def _check_result_only_cmd(self, result, expected_cmd): + def _check_result_only_cmd(self, result: bytes, expected_cmd: bytes) -> None: cmd_list = [] for line in result.split(b'\r\n'): cmd, _, _ = line.partition(b' ') cmd_list.append(cmd) self.assertIn(expected_cmd, cmd_list) - def _check_cmd_and_value(self, result, expected): + def _check_cmd_and_value(self, result: bytes, expected: tuple[bytes, bytes]) -> None: result_list = [] for line in result.split(b'\r\n'): cmd, _, data = line.partition(b' ') result_list.append((cmd, data)) self.assertIn(expected, result_list) - def test_on_connect(self): + def test_on_connect(self) -> None: self._check_result_only_cmd(self.conn.peek_tr1_value(), b'HELLO') - def test_invalid_command(self): + def test_invalid_command(self) -> None: self._send_cmd(self.conn.proto1, 'INVALID-CMD') self.conn.proto1.state.handle_error('') self.assertTrue(self.conn.tr1.disconnecting) - def test_rate_limit(self): + def test_rate_limit(self) -> None: hits = 1 window = 60 @@ -99,7 +96,7 @@ def test_rate_limit(self): self.conn.proto1.connections = None self.conn.proto1.on_disconnect(Failure(Exception())) - def test_invalid_size(self): + def test_invalid_size(self) -> None: self.conn.tr1.clear() cmd = b'HELLO ' max_payload_bytes = HathorLineReceiver.MAX_LENGTH - len(cmd) @@ -123,32 +120,32 @@ def test_invalid_size(self): line_length_exceeded_wrapped.assert_called_once() self.assertTrue(self.conn.tr1.disconnecting) - def test_invalid_payload(self): + def test_invalid_payload(self) -> None: self.conn.run_one_step() # HELLO self.conn.run_one_step() # PEER-ID self.conn.run_one_step() # READY with self.assertRaises(JSONDecodeError): self._send_cmd(self.conn.proto1, 'PEERS', 'abc') - def test_invalid_hello1(self): + def test_invalid_hello1(self) -> None: self.conn.tr1.clear() self._send_cmd(self.conn.proto1, 'HELLO') self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') self.assertTrue(self.conn.tr1.disconnecting) - def test_invalid_hello2(self): + def test_invalid_hello2(self) -> None: self.conn.tr1.clear() self._send_cmd(self.conn.proto1, 'HELLO', 'invalid_payload') self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') self.assertTrue(self.conn.tr1.disconnecting) - def test_invalid_hello3(self): + def test_invalid_hello3(self) -> None: self.conn.tr1.clear() self._send_cmd(self.conn.proto1, 'HELLO', '{}') self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') self.assertTrue(self.conn.tr1.disconnecting) - def test_invalid_hello4(self): + def test_invalid_hello4(self) -> None: self.conn.tr1.clear() self._send_cmd( self.conn.proto1, @@ -158,7 +155,7 @@ def test_invalid_hello4(self): self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') self.assertTrue(self.conn.tr1.disconnecting) - def test_invalid_hello5(self): + def test_invalid_hello5(self) -> None: # hello with clocks too far apart self.conn.tr1.clear() data = self.conn.proto2.state._get_hello_data() @@ -171,14 +168,14 @@ def test_invalid_hello5(self): self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') self.assertTrue(self.conn.tr1.disconnecting) - def test_valid_hello(self): + def test_valid_hello(self) -> None: self.conn.run_one_step() # HELLO self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEER-ID') self._check_result_only_cmd(self.conn.peek_tr2_value(), b'PEER-ID') self.assertFalse(self.conn.tr1.disconnecting) self.assertFalse(self.conn.tr2.disconnecting) - def test_invalid_same_peer_id(self): + def test_invalid_same_peer_id(self) -> None: manager3 = self.create_peer(self.network, peer_id=self.peer_id1) conn = FakeConnection(self.manager1, manager3) conn.run_one_step() # HELLO @@ -186,7 +183,7 @@ def test_invalid_same_peer_id(self): self._check_result_only_cmd(conn.peek_tr1_value(), b'ERROR') self.assertTrue(conn.tr1.disconnecting) - def test_invalid_same_peer_id2(self): + def test_invalid_same_peer_id2(self) -> None: """ We connect nodes 1-2 and 1-3. Nodes 2 and 3 have the same peer_id. The connections are established simultaneously, so we do not detect a peer id duplication in PEER_ID @@ -246,7 +243,7 @@ def test_invalid_same_peer_id2(self): # connection is still up self.assertIsConnected(conn_alive) - def test_invalid_different_network(self): + def test_invalid_different_network(self) -> None: manager3 = self.create_peer(network='mainnet') conn = FakeConnection(self.manager1, manager3) conn.run_one_step() # HELLO @@ -254,23 +251,23 @@ def test_invalid_different_network(self): self.assertTrue(conn.tr1.disconnecting) conn.run_one_step() # ERROR - def test_send_invalid_unicode(self): + def test_send_invalid_unicode(self) -> None: # \xff is an invalid unicode. self.conn.proto1.dataReceived(b'\xff\r\n') self.assertTrue(self.conn.tr1.disconnecting) - def test_on_disconnect(self): + def test_on_disconnect(self) -> None: self.assertIn(self.conn.proto1, self.manager1.connections.handshaking_peers) self.conn.disconnect(Failure(Exception('testing'))) self.assertNotIn(self.conn.proto1, self.manager1.connections.handshaking_peers) - def test_on_disconnect_after_hello(self): + def test_on_disconnect_after_hello(self) -> None: self.conn.run_one_step() # HELLO self.assertIn(self.conn.proto1, self.manager1.connections.handshaking_peers) self.conn.disconnect(Failure(Exception('testing'))) self.assertNotIn(self.conn.proto1, self.manager1.connections.handshaking_peers) - def test_on_disconnect_after_peer_id(self): + def test_on_disconnect_after_peer_id(self) -> None: self.conn.run_one_step() # HELLO self.assertIn(self.conn.proto1, self.manager1.connections.handshaking_peers) # No peer id in the peer_storage (known_peers) @@ -291,7 +288,7 @@ def test_on_disconnect_after_peer_id(self): # Peer id 2 removed from peer_storage (known_peers) after disconnection and after looping call self.assertNotIn(self.peer_id2.id, self.manager1.connections.peer_storage) - def test_idle_connection(self): + def test_idle_connection(self) -> None: self.clock.advance(self._settings.PEER_IDLE_TIMEOUT - 10) self.assertIsConnected(self.conn) self.clock.advance(15) @@ -301,7 +298,7 @@ def test_idle_connection(self): class SyncV1HathorProtocolTestCase(unittest.SyncV1Params, BaseHathorProtocolTestCase): __test__ = True - def test_two_connections(self): + def test_two_connections(self) -> None: self.conn.run_one_step() # HELLO self.conn.run_one_step() # PEER-ID self.conn.run_one_step() # READY @@ -318,8 +315,7 @@ def test_two_connections(self): self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEERS') self.conn.run_one_step() - @inlineCallbacks - def test_get_data(self): + def test_get_data(self) -> None: self.conn.run_one_step() # HELLO self.conn.run_one_step() # PEER-ID self.conn.run_one_step() # READY @@ -329,11 +325,11 @@ def test_get_data(self): self.conn.run_one_step() # TIPS self.assertIsConnected() missing_tx = '00000000228dfcd5dec1c9c6263f6430a5b4316bb9e3decb9441a6414bfd8697' - yield self._send_cmd(self.conn.proto1, 'GET-DATA', missing_tx) + self._send_cmd(self.conn.proto1, 'GET-DATA', missing_tx) self._check_result_only_cmd(self.conn.peek_tr1_value(), b'NOT-FOUND') self.conn.run_one_step() - def test_valid_hello_and_peer_id(self): + def test_valid_hello_and_peer_id(self) -> None: self._check_result_only_cmd(self.conn.peek_tr1_value(), b'HELLO') self._check_result_only_cmd(self.conn.peek_tr2_value(), b'HELLO') self.conn.run_one_step() # HELLO @@ -358,7 +354,7 @@ def test_valid_hello_and_peer_id(self): self.conn.run_one_step() # TIPS self.assertIsConnected() - def test_send_ping(self): + def test_send_ping(self) -> None: self.conn.run_one_step() # HELLO self.conn.run_one_step() # PEER-ID self.conn.run_one_step() # READY @@ -379,8 +375,7 @@ def test_send_ping(self): self.conn.run_one_step() self.assertEqual(self.clock.seconds(), self.conn.proto1.last_message) - @inlineCallbacks - def test_invalid_peer_id(self): + def test_invalid_peer_id(self) -> None: self.conn.run_one_step() # HELLO self.conn.run_one_step() # PEER-ID self.conn.run_one_step() # READY @@ -389,7 +384,7 @@ def test_invalid_peer_id(self): self.conn.run_one_step() # PEERS self.conn.run_one_step() # TIPS invalid_payload = {'id': '123', 'entrypoints': ['tcp://localhost:1234']} - yield self._send_cmd(self.conn.proto1, 'PEER-ID', json_dumps(invalid_payload)) + self._send_cmd(self.conn.proto1, 'PEER-ID', json_dumps(invalid_payload)) self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') self.assertTrue(self.conn.tr1.disconnecting) @@ -397,7 +392,7 @@ def test_invalid_peer_id(self): class SyncV2HathorProtocolTestCase(unittest.SyncV2Params, BaseHathorProtocolTestCase): __test__ = True - def test_two_connections(self): + def test_two_connections(self) -> None: self.assertAndStepConn(self.conn, b'^HELLO') self.assertAndStepConn(self.conn, b'^PEER-ID') self.assertAndStepConn(self.conn, b'^READY') @@ -425,8 +420,7 @@ def test_two_connections(self): self.assertIsConnected() - @inlineCallbacks - def test_get_data(self): + def test_get_data(self) -> None: self.assertAndStepConn(self.conn, b'^HELLO') self.assertAndStepConn(self.conn, b'^PEER-ID') self.assertAndStepConn(self.conn, b'^READY') @@ -442,11 +436,11 @@ def test_get_data(self): 'last_block_hash': missing_tx, 'start_from': [self._settings.GENESIS_BLOCK_HASH.hex()] } - yield self._send_cmd(self.conn.proto1, 'GET-TRANSACTIONS-BFS', json_dumps(payload)) + self._send_cmd(self.conn.proto1, 'GET-TRANSACTIONS-BFS', json_dumps(payload)) self._check_result_only_cmd(self.conn.peek_tr1_value(), b'NOT-FOUND') self.conn.run_one_step() - def test_valid_hello_and_peer_id(self): + def test_valid_hello_and_peer_id(self) -> None: self.assertAndStepConn(self.conn, b'^HELLO') self.assertAndStepConn(self.conn, b'^PEER-ID') self.assertAndStepConn(self.conn, b'^READY') @@ -477,7 +471,7 @@ def test_valid_hello_and_peer_id(self): self.assertAndStepConn(self.conn, b'^BEST-BLOCK') self.assertIsConnected() - def test_send_ping(self): + def test_send_ping(self) -> None: self.assertAndStepConn(self.conn, b'^HELLO') self.assertAndStepConn(self.conn, b'^PEER-ID') self.assertAndStepConn(self.conn, b'^READY') diff --git a/tests/p2p/test_rate_limiter.py b/tests/p2p/test_rate_limiter.py index 83e7b6e56..3eace5471 100644 --- a/tests/p2p/test_rate_limiter.py +++ b/tests/p2p/test_rate_limiter.py @@ -1,13 +1,14 @@ from hathor.p2p.rate_limiter import RateLimiter +from hathor.util import not_none from tests import unittest class RateLimiterTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: super().setUp() self.rate_limiter = RateLimiter(reactor=self.clock) - def test_limiter(self): + def test_limiter(self) -> None: key = 'test' self.rate_limiter.set_limit(key, 2, 2) @@ -31,7 +32,7 @@ def test_limiter(self): self.assertTrue(self.rate_limiter.add_hit(key)) # Get limit - self.assertEqual(self.rate_limiter.get_limit(key).max_hits, 2) + self.assertEqual(not_none(self.rate_limiter.get_limit(key)).max_hits, 2) # Unset limit self.rate_limiter.unset_limit(key) diff --git a/tests/p2p/test_split_brain.py b/tests/p2p/test_split_brain.py index 68ee24609..3a2352853 100644 --- a/tests/p2p/test_split_brain.py +++ b/tests/p2p/test_split_brain.py @@ -3,8 +3,10 @@ from hathor.daa import TestMode from hathor.graphviz import GraphvizVisualizer +from hathor.manager import HathorManager from hathor.simulator import FakeConnection from hathor.simulator.utils import add_new_block +from hathor.util import not_none from hathor.wallet import HDWallet from tests import unittest from tests.utils import add_blocks_unlock_reward, add_new_double_spending, add_new_transactions @@ -13,7 +15,7 @@ class BaseHathorSyncMethodsTestCase(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() first_timestamp = self._settings.GENESIS_BLOCK_TIMESTAMP @@ -21,13 +23,13 @@ def setUp(self): self.network = 'testnet' - def create_peer(self, network, unlock_wallet=True): + def create_peer(self, network: str, unlock_wallet: bool = True) -> HathorManager: # type: ignore[override] wallet = HDWallet(gap_limit=2) wallet._manually_initialize() - manager = super().create_peer(network, wallet=wallet) + manager: HathorManager = super().create_peer(network, wallet=wallet) manager.daa.TEST_MODE = TestMode.TEST_ALL_WEIGHT - manager.avg_time_between_blocks = 64 + # manager.avg_time_between_blocks = 64 # FIXME: This property is not defined. Fix this test. # Don't use it anywhere else. It is unsafe to generate mnemonic words like this. # It should be used only for testing purposes. @@ -37,14 +39,14 @@ def create_peer(self, network, unlock_wallet=True): return manager @pytest.mark.slow - def test_split_brain_plain(self): + def test_split_brain_plain(self) -> None: debug_pdf = False manager1 = self.create_peer(self.network, unlock_wallet=True) - manager1.avg_time_between_blocks = 3 + # manager1.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. manager2 = self.create_peer(self.network, unlock_wallet=True) - manager2.avg_time_between_blocks = 3 + # manager2.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. for _ in range(10): add_new_block(manager1, advance_clock=1) @@ -100,12 +102,12 @@ def test_split_brain_plain(self): self.assertConsensusValid(manager2) @pytest.mark.slow - def test_split_brain_only_blocks_different_height(self): + def test_split_brain_only_blocks_different_height(self) -> None: manager1 = self.create_peer(self.network, unlock_wallet=True) - manager1.avg_time_between_blocks = 3 + # manager1.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. manager2 = self.create_peer(self.network, unlock_wallet=True) - manager2.avg_time_between_blocks = 3 + # manager2.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. for _ in range(10): add_new_block(manager1, advance_clock=1) @@ -117,7 +119,7 @@ def test_split_brain_only_blocks_different_height(self): # Add one more block to manager1, so it's the winner chain add_new_block(manager1, advance_clock=1) - block_tip1 = manager1.tx_storage.indexes.height.get_tip() + block_tip1 = not_none(manager1.tx_storage.indexes).height.get_tip() self.assertConsensusValid(manager1) self.assertConsensusValid(manager2) @@ -140,17 +142,17 @@ def test_split_brain_only_blocks_different_height(self): self.assertConsensusValid(manager2) self.assertConsensusEqual(manager1, manager2) - self.assertEqual(block_tip1, manager1.tx_storage.indexes.height.get_tip()) - self.assertEqual(block_tip1, manager2.tx_storage.indexes.height.get_tip()) + self.assertEqual(block_tip1, not_none(manager1.tx_storage.indexes).height.get_tip()) + self.assertEqual(block_tip1, not_none(manager2.tx_storage.indexes).height.get_tip()) # XXX We must decide what to do when different chains have the same score # For now we are voiding everyone until the first common block - def test_split_brain_only_blocks_same_height(self): + def test_split_brain_only_blocks_same_height(self) -> None: manager1 = self.create_peer(self.network, unlock_wallet=True) - manager1.avg_time_between_blocks = 3 + # manager1.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. manager2 = self.create_peer(self.network, unlock_wallet=True) - manager2.avg_time_between_blocks = 3 + # manager2.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. for _ in range(10): add_new_block(manager1, advance_clock=1) @@ -268,12 +270,12 @@ def test_split_brain_only_blocks_same_height(self): self.assertEqual(len(manager2.tx_storage.get_best_block_tips()), 1) self.assertCountEqual(manager2.tx_storage.get_best_block_tips(), {new_block.hash}) - def test_split_brain_only_blocks_bigger_score(self): + def test_split_brain_only_blocks_bigger_score(self) -> None: manager1 = self.create_peer(self.network, unlock_wallet=True) - manager1.avg_time_between_blocks = 3 + # manager1.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. manager2 = self.create_peer(self.network, unlock_wallet=True) - manager2.avg_time_between_blocks = 3 + # manager2.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. # Start with 1 because of the genesis block manager2_blocks = 1 @@ -328,13 +330,13 @@ def test_split_brain_only_blocks_bigger_score(self): # Assert that the consensus had the manager2 chain self.assertEqual(winners2_blocks, manager2_blocks) - def test_split_brain_no_double_spending(self): + def test_split_brain_no_double_spending(self) -> None: manager1 = self.create_peer(self.network, unlock_wallet=True) - manager1.avg_time_between_blocks = 3 + # manager1.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. manager1.connections.disable_rate_limiter() manager2 = self.create_peer(self.network, unlock_wallet=True) - manager2.avg_time_between_blocks = 3 + # manager2.avg_time_between_blocks = 3 # FIXME: This property is not defined. Fix this test. manager2.connections.disable_rate_limiter() winner_blocks = 1 diff --git a/tests/p2p/test_split_brain2.py b/tests/p2p/test_split_brain2.py index fc4601898..e1622fb8a 100644 --- a/tests/p2p/test_split_brain2.py +++ b/tests/p2p/test_split_brain2.py @@ -10,7 +10,7 @@ class BaseHathorSyncMethodsTestCase(SimulatorTestCase): __test__ = False @pytest.mark.flaky(max_runs=3, min_passes=1) - def test_split_brain(self): + def test_split_brain(self) -> None: debug_pdf = False manager1 = self.create_peer() diff --git a/tests/p2p/test_sync.py b/tests/p2p/test_sync.py index e387bba89..69bd417e4 100644 --- a/tests/p2p/test_sync.py +++ b/tests/p2p/test_sync.py @@ -5,7 +5,9 @@ from hathor.p2p.protocol import PeerIdState from hathor.p2p.sync_version import SyncVersion from hathor.simulator import FakeConnection +from hathor.transaction import Block, Transaction from hathor.transaction.storage.exceptions import TransactionIsNotABlock +from hathor.util import not_none from tests import unittest from tests.utils import add_blocks_unlock_reward @@ -13,7 +15,7 @@ class BaseHathorSyncMethodsTestCase(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() # import sys @@ -27,15 +29,16 @@ def setUp(self): self.genesis = self.manager1.tx_storage.get_all_genesis() self.genesis_blocks = [tx for tx in self.genesis if tx.is_block] - def _add_new_tx(self, address, value): - from hathor.transaction import Transaction + def _add_new_tx(self, address: str, value: int) -> Transaction: from hathor.wallet.base_wallet import WalletOutputInfo outputs = [] outputs.append( WalletOutputInfo(address=decode_address(address), value=int(value), timelock=None)) - tx = self.manager1.wallet.prepare_transaction_compute_inputs(Transaction, outputs, self.manager1.tx_storage) + tx: Transaction = self.manager1.wallet.prepare_transaction_compute_inputs( + Transaction, outputs, self.manager1.tx_storage + ) tx.timestamp = int(self.clock.seconds()) tx.storage = self.manager1.tx_storage tx.weight = 10 @@ -46,30 +49,30 @@ def _add_new_tx(self, address, value): self.clock.advance(10) return tx - def _add_new_transactions(self, num_txs): + def _add_new_transactions(self, num_txs: int) -> list[Transaction]: txs = [] for _ in range(num_txs): - address = self.get_address(0) + address = not_none(self.get_address(0)) value = self.rng.choice([5, 10, 50, 100, 120]) tx = self._add_new_tx(address, value) txs.append(tx) return txs - def _add_new_block(self, propagate=True): - block = self.manager1.generate_mining_block() + def _add_new_block(self, propagate: bool = True) -> Block: + block: Block = self.manager1.generate_mining_block() self.assertTrue(self.manager1.cpu_mining_service.resolve(block)) self.manager1.verification_service.verify(block) self.manager1.on_new_tx(block, propagate_to_peers=propagate) self.clock.advance(10) return block - def _add_new_blocks(self, num_blocks, propagate=True): + def _add_new_blocks(self, num_blocks: int, propagate: bool = True) -> list[Block]: blocks = [] for _ in range(num_blocks): blocks.append(self._add_new_block(propagate=propagate)) return blocks - def test_get_blocks_before(self): + def test_get_blocks_before(self) -> None: genesis_block = self.genesis_blocks[0] result = self.manager1.tx_storage.get_blocks_before(genesis_block.hash) self.assertEqual(0, len(result)) @@ -88,7 +91,7 @@ def test_get_blocks_before(self): expected_result = expected_result[::-1] self.assertEqual(result, expected_result) - def test_block_sync_only_genesis(self): + def test_block_sync_only_genesis(self) -> None: manager2 = self.create_peer(self.network) self.assertEqual(manager2.state, manager2.NodeState.READY) @@ -102,7 +105,7 @@ def test_block_sync_only_genesis(self): self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2) - def test_block_sync_new_blocks(self): + def test_block_sync_new_blocks(self) -> None: self._add_new_blocks(15) manager2 = self.create_peer(self.network) @@ -123,7 +126,7 @@ def test_block_sync_new_blocks(self): self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2) - def test_block_sync_many_new_blocks(self): + def test_block_sync_many_new_blocks(self) -> None: self._add_new_blocks(150) manager2 = self.create_peer(self.network) @@ -143,7 +146,7 @@ def test_block_sync_many_new_blocks(self): self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2) - def test_block_sync_new_blocks_and_txs(self): + def test_block_sync_new_blocks_and_txs(self) -> None: self._add_new_blocks(25) self._add_new_transactions(3) self._add_new_blocks(4) @@ -172,7 +175,7 @@ def test_block_sync_new_blocks_and_txs(self): self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2) - def test_tx_propagation_nat_peers(self): + def test_tx_propagation_nat_peers(self) -> None: """ manager1 <- manager2 <- manager3 """ self._add_new_blocks(25) @@ -229,7 +232,7 @@ def test_tx_propagation_nat_peers(self): self.assertConsensusValid(self.manager2) self.assertConsensusValid(self.manager3) - def test_check_sync_state(self): + def test_check_sync_state(self) -> None: """Tests if the LoopingCall to check the sync state works""" # Initially it should do nothing, since there is no recent activity self.manager1.check_sync_state() @@ -249,7 +252,7 @@ def test_check_sync_state(self): class SyncV1HathorSyncMethodsTestCase(unittest.SyncV1Params, BaseHathorSyncMethodsTestCase): __test__ = True - def test_downloader(self): + def test_downloader(self) -> None: from hathor.p2p.sync_v1.agent import NodeSyncTimestamp blocks = self._add_new_blocks(3) @@ -326,7 +329,7 @@ def test_downloader(self): downloader.check_downloading_queue() self.assertEqual(len(downloader.downloading_deque), 0) - def _downloader_bug_setup(self): + def _downloader_bug_setup(self) -> None: """ This is an auxiliary method to setup a bug scenario.""" from hathor.p2p.sync_version import SyncVersion @@ -390,7 +393,7 @@ def _downloader_bug_setup(self): # by this point everything should be set to so we can trigger the bug, any issues that happen before this # comment are an issue in setting up the scenario, not related to the problem itself - def test_downloader_retry_reorder(self): + def test_downloader_retry_reorder(self) -> None: """ Reproduce the bug that causes a reorder in the downloader queue. The tracking issue for this bug is #465 @@ -454,7 +457,7 @@ def test_downloader_retry_reorder(self): # if the fix is applied, we would see tx_A in storage by this point self.assertTrue(self.manager_bug.tx_storage.transaction_exists(self.tx_A.hash)) - def test_downloader_disconnect(self): + def test_downloader_disconnect(self) -> None: """ This is related to test_downloader_retry_reorder, but it basically tests the change in behavior instead. When a peer disconnects it should be immediately removed from the tx-detail's connections list. @@ -474,7 +477,7 @@ def test_downloader_disconnect(self): class SyncV2HathorSyncMethodsTestCase(unittest.SyncV2Params, BaseHathorSyncMethodsTestCase): __test__ = True - def test_sync_metadata(self): + def test_sync_metadata(self) -> None: # test if the synced peer will build all tx metadata correctly height = 0 @@ -519,7 +522,7 @@ def test_sync_metadata(self): self.assertCountEqual(meta1.conflict_with or [], meta2.conflict_with or []) self.assertCountEqual(meta1.twins or [], meta2.twins or []) - def test_tx_propagation_nat_peers(self): + def test_tx_propagation_nat_peers(self) -> None: super().test_tx_propagation_nat_peers() node_sync1 = self.conn1.proto1.state.sync_agent @@ -534,7 +537,7 @@ def test_tx_propagation_nat_peers(self): self.assertEqual(node_sync2.peer_best_block.height, self.manager2.tx_storage.get_height_best_block()) self.assertConsensusEqual(self.manager2, self.manager3) - def test_block_sync_new_blocks_and_txs(self): + def test_block_sync_new_blocks_and_txs(self) -> None: self._add_new_blocks(25) self._add_new_transactions(3) self._add_new_blocks(4) @@ -563,7 +566,7 @@ def test_block_sync_new_blocks_and_txs(self): self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2) - def test_block_sync_many_new_blocks(self): + def test_block_sync_many_new_blocks(self) -> None: self._add_new_blocks(150) manager2 = self.create_peer(self.network) @@ -584,7 +587,7 @@ def test_block_sync_many_new_blocks(self): self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2) - def test_block_sync_new_blocks(self): + def test_block_sync_new_blocks(self) -> None: self._add_new_blocks(15) manager2 = self.create_peer(self.network) @@ -605,7 +608,7 @@ def test_block_sync_new_blocks(self): self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2) - def test_full_sync(self): + def test_full_sync(self) -> None: # 10 blocks blocks = self._add_new_blocks(10) # N blocks to unlock the reward @@ -677,7 +680,7 @@ def test_full_sync(self): self.assertEqual(len(manager2.tx_storage.indexes.mempool_tips.get()), 1) self.assertEqual(len(self.manager1.tx_storage.indexes.mempool_tips.get()), 1) - def test_block_sync_checkpoints(self): + def test_block_sync_checkpoints(self) -> None: TOTAL_BLOCKS = 30 LAST_CHECKPOINT = 15 FIRST_CHECKPOINT = LAST_CHECKPOINT // 2 @@ -718,7 +721,7 @@ def test_block_sync_checkpoints(self): self.assertConsensusValid(self.manager1) self.assertConsensusValid(manager2) - def test_block_sync_only_genesis(self): + def test_block_sync_only_genesis(self) -> None: manager2 = self.create_peer(self.network) self.assertEqual(manager2.state, manager2.NodeState.READY) diff --git a/tests/p2p/test_sync_bridge.py b/tests/p2p/test_sync_bridge.py index cdf000627..9c9024be0 100644 --- a/tests/p2p/test_sync_bridge.py +++ b/tests/p2p/test_sync_bridge.py @@ -5,7 +5,7 @@ class MixedSyncRandomSimulatorTestCase(SimulatorTestCase): __test__ = True - def test_the_three_transacting_miners(self): + def test_the_three_transacting_miners(self) -> None: manager1 = self.create_peer(enable_sync_v1=True, enable_sync_v2=False) manager2 = self.create_peer(enable_sync_v1=True, enable_sync_v2=True) manager3 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) @@ -44,7 +44,7 @@ def test_the_three_transacting_miners(self): # sync-v2 consensus test is more lenient (if sync-v1 assert passes sync-v2 assert will pass too) self.assertConsensusEqualSyncV2(manager_a, manager_b, strict_sync_v2_indexes=False) - def test_bridge_with_late_v2(self): + def test_bridge_with_late_v2(self) -> None: manager1 = self.create_peer(enable_sync_v1=True, enable_sync_v2=False) manager2 = self.create_peer(enable_sync_v1=True, enable_sync_v2=True) manager3 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) diff --git a/tests/p2p/test_sync_enabled.py b/tests/p2p/test_sync_enabled.py index a352c08a0..f681f90a0 100644 --- a/tests/p2p/test_sync_enabled.py +++ b/tests/p2p/test_sync_enabled.py @@ -5,7 +5,7 @@ class BaseRandomSimulatorTestCase(SimulatorTestCase): - def test_new_node_disabled(self): + def test_new_node_disabled(self) -> None: manager1 = self.create_peer() manager1.allow_mining_without_peers() @@ -39,7 +39,7 @@ def test_new_node_disabled(self): v2 = list(manager2.tx_storage.get_all_transactions()) self.assertEqual(3, len(v2)) - def test_sync_rotate(self): + def test_sync_rotate(self) -> None: manager1 = self.create_peer() manager1.connections.MAX_ENABLED_SYNC = 3 other_managers = [self.create_peer() for _ in range(15)] diff --git a/tests/p2p/test_sync_mempool.py b/tests/p2p/test_sync_mempool.py index f2a0219b3..dff3c27bf 100644 --- a/tests/p2p/test_sync_mempool.py +++ b/tests/p2p/test_sync_mempool.py @@ -1,6 +1,8 @@ from hathor.crypto.util import decode_address from hathor.graphviz import GraphvizVisualizer from hathor.simulator import FakeConnection +from hathor.transaction import Block, Transaction +from hathor.util import not_none from tests import unittest from tests.utils import add_blocks_unlock_reward @@ -8,7 +10,7 @@ class BaseHathorSyncMempoolTestCase(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() self.network = 'testnet' @@ -18,7 +20,7 @@ def setUp(self): self.genesis = self.manager1.tx_storage.get_all_genesis() self.genesis_blocks = [tx for tx in self.genesis if tx.is_block] - def _add_new_tx(self, address, value): + def _add_new_tx(self, address: str, value: int) -> Transaction: from hathor.transaction import Transaction from hathor.wallet.base_wallet import WalletOutputInfo @@ -26,7 +28,9 @@ def _add_new_tx(self, address, value): outputs.append( WalletOutputInfo(address=decode_address(address), value=int(value), timelock=None)) - tx = self.manager1.wallet.prepare_transaction_compute_inputs(Transaction, outputs, self.manager1.tx_storage) + tx: Transaction = self.manager1.wallet.prepare_transaction_compute_inputs( + Transaction, outputs, self.manager1.tx_storage + ) tx.timestamp = int(self.clock.seconds()) tx.storage = self.manager1.tx_storage tx.weight = 10 @@ -37,30 +41,30 @@ def _add_new_tx(self, address, value): self.clock.advance(10) return tx - def _add_new_transactions(self, num_txs): + def _add_new_transactions(self, num_txs: int) -> list[Transaction]: txs = [] for _ in range(num_txs): - address = self.get_address(0) + address = not_none(self.get_address(0)) value = self.rng.choice([5, 10, 50, 100, 120]) tx = self._add_new_tx(address, value) txs.append(tx) return txs - def _add_new_block(self, propagate=True): - block = self.manager1.generate_mining_block() + def _add_new_block(self, propagate: bool = True) -> Block: + block: Block = self.manager1.generate_mining_block() self.assertTrue(self.manager1.cpu_mining_service.resolve(block)) self.manager1.verification_service.verify(block) self.manager1.on_new_tx(block, propagate_to_peers=propagate) self.clock.advance(10) return block - def _add_new_blocks(self, num_blocks, propagate=True): + def _add_new_blocks(self, num_blocks: int, propagate: bool = True) -> list[Block]: blocks = [] for _ in range(num_blocks): blocks.append(self._add_new_block(propagate=propagate)) return blocks - def test_mempool_basic(self): + def test_mempool_basic(self) -> None: # 10 blocks self._add_new_blocks(2) # N blocks to unlock the reward @@ -100,7 +104,7 @@ class SyncV1HathorSyncMempoolTestCase(unittest.SyncV1Params, BaseHathorSyncMempo class SyncV2HathorSyncMempoolTestCase(unittest.SyncV2Params, BaseHathorSyncMempoolTestCase): __test__ = True - def test_mempool_basic(self): + def test_mempool_basic(self) -> None: super().test_mempool_basic() # 3 genesis diff --git a/tests/p2p/test_sync_rate_limiter.py b/tests/p2p/test_sync_rate_limiter.py index 9433c7ade..04d091c27 100644 --- a/tests/p2p/test_sync_rate_limiter.py +++ b/tests/p2p/test_sync_rate_limiter.py @@ -1,7 +1,9 @@ -from unittest.mock import MagicMock, Mock +from unittest.mock import Mock, patch from twisted.python.failure import Failure +from hathor.p2p.states import ReadyState +from hathor.p2p.sync_v1.agent import NodeSyncTimestamp from hathor.simulator import FakeConnection from hathor.simulator.trigger import StopAfterNMinedBlocks from tests import unittest @@ -11,7 +13,7 @@ class SyncV1RandomSimulatorTestCase(unittest.SyncV1Params, SimulatorTestCase): __test__ = True - def test_sync_rate_limiter(self): + def test_sync_rate_limiter(self) -> None: manager1 = self.create_peer() miner1 = self.simulator.create_miner(manager1, hashpower=10e6) @@ -32,21 +34,23 @@ def test_sync_rate_limiter(self): connected_peers2 = list(manager2.connections.connected_peers.values()) self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] + assert isinstance(protocol1.state, ReadyState) sync2 = protocol1.state.sync_agent - sync2._send_tips = MagicMock() + assert isinstance(sync2, NodeSyncTimestamp) - for i in range(100): - sync2.send_tips() - self.assertEqual(sync2._send_tips.call_count, min(i + 1, 8)) - self.assertEqual(sync2._send_tips.call_count, 8) + with patch.object(sync2, '_send_tips') as mock: + for i in range(100): + sync2.send_tips() + self.assertEqual(mock.call_count, min(i + 1, 8)) + self.assertEqual(mock.call_count, 8) - sync2.send_tips() - self.assertEqual(sync2._send_tips.call_count, 8) + sync2.send_tips() + self.assertEqual(mock.call_count, 8) - self.simulator._clock.advance(2000) - self.assertTrue(sync2._send_tips.call_count, 16) + self.simulator._clock.advance(2000) + self.assertTrue(mock.call_count, 16) - def test_sync_rate_limiter_disconnect(self): + def test_sync_rate_limiter_disconnect(self) -> None: # Test send_tips delayed calls cancelation with disconnection manager1 = self.create_peer() manager2 = self.create_peer() @@ -64,36 +68,39 @@ def test_sync_rate_limiter_disconnect(self): self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] + assert isinstance(protocol1.state, ReadyState) sync1 = protocol1.state.sync_agent - sync1._send_tips = Mock(wraps=sync1._send_tips) + assert isinstance(sync1, NodeSyncTimestamp) + mock = Mock(wraps=sync1._send_tips) - sync1.send_tips() - self.assertEqual(sync1._send_tips.call_count, 1) - self.assertEqual(len(sync1._send_tips_call_later), 0) + with patch.object(sync1, '_send_tips', new=mock): + sync1.send_tips() + self.assertEqual(mock.call_count, 1) + self.assertEqual(len(sync1._send_tips_call_later), 0) - sync1.send_tips() - self.assertEqual(sync1._send_tips.call_count, 1) - self.assertEqual(len(sync1._send_tips_call_later), 1) + sync1.send_tips() + self.assertEqual(mock.call_count, 1) + self.assertEqual(len(sync1._send_tips_call_later), 1) - sync1.send_tips() - self.assertEqual(sync1._send_tips.call_count, 1) - self.assertEqual(len(sync1._send_tips_call_later), 2) + sync1.send_tips() + self.assertEqual(mock.call_count, 1) + self.assertEqual(len(sync1._send_tips_call_later), 2) - # Close the connection. - conn12.disconnect(Failure(Exception('testing'))) - self.simulator.remove_connection(conn12) + # Close the connection. + conn12.disconnect(Failure(Exception('testing'))) + self.simulator.remove_connection(conn12) - self.simulator.run(30) + self.simulator.run(30) - # Send tips should not be called any further since the connection has already been closed. - self.assertEqual(sync1._send_tips.call_count, 1) - # Residual delayed calls - self.assertEqual(len(sync1._send_tips_call_later), 2) - # The residual delayed calls should have been canceled - for call_later in sync1._send_tips_call_later: - self.assertFalse(call_later.active()) + # Send tips should not be called any further since the connection has already been closed. + self.assertEqual(mock.call_count, 1) + # Residual delayed calls + self.assertEqual(len(sync1._send_tips_call_later), 2) + # The residual delayed calls should have been canceled + for call_later in sync1._send_tips_call_later: + self.assertFalse(call_later.active()) - def test_sync_rate_limiter_delayed_calls_draining(self): + def test_sync_rate_limiter_delayed_calls_draining(self) -> None: # Test the draining of delayed calls from _send_tips_call_later list manager1 = self.create_peer() manager2 = self.create_peer() @@ -111,7 +118,9 @@ def test_sync_rate_limiter_delayed_calls_draining(self): self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] + assert isinstance(protocol1.state, ReadyState) sync1 = protocol1.state.sync_agent + assert isinstance(sync1, NodeSyncTimestamp) sync1.send_tips() self.assertEqual(len(sync1._send_tips_call_later), 0) @@ -131,7 +140,7 @@ def test_sync_rate_limiter_delayed_calls_draining(self): # should have been executed self.assertEqual(len(sync1._send_tips_call_later), 0) - def test_sync_rate_limiter_delayed_calls_stop(self): + def test_sync_rate_limiter_delayed_calls_stop(self) -> None: # Test the draining of delayed calls from _send_tips_call_later list manager1 = self.create_peer() manager2 = self.create_peer() @@ -149,7 +158,9 @@ def test_sync_rate_limiter_delayed_calls_stop(self): self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] + assert isinstance(protocol1.state, ReadyState) sync1 = protocol1.state.sync_agent + assert isinstance(sync1, NodeSyncTimestamp) sync1.send_tips() self.assertEqual(len(sync1._send_tips_call_later), 0) diff --git a/tests/p2p/test_sync_v2.py b/tests/p2p/test_sync_v2.py index 68be619de..f072134c0 100644 --- a/tests/p2p/test_sync_v2.py +++ b/tests/p2p/test_sync_v2.py @@ -1,13 +1,15 @@ import base64 import re +from unittest.mock import patch import pytest -from twisted.internet.defer import inlineCallbacks, succeed +from twisted.internet.defer import Deferred, succeed from twisted.python.failure import Failure from hathor.p2p.messages import ProtocolMessages from hathor.p2p.peer_id import PeerId -from hathor.p2p.sync_v2.agent import _HeightInfo +from hathor.p2p.states import ReadyState +from hathor.p2p.sync_v2.agent import NodeBlockSync, _HeightInfo from hathor.simulator import FakeConnection from hathor.simulator.trigger import ( StopAfterNMinedBlocks, @@ -16,7 +18,11 @@ StopWhenTrue, Trigger, ) +from hathor.transaction.storage import TransactionRocksDBStorage +from hathor.transaction.storage.transaction_storage import TransactionStorage from hathor.transaction.storage.traversal import DFSWalk +from hathor.types import VertexId +from hathor.util import not_none from tests.simulation.base import SimulatorTestCase from tests.utils import HAS_ROCKSDB @@ -26,7 +32,7 @@ class BaseRandomSimulatorTestCase(SimulatorTestCase): seed_config = 2 - def _get_partial_blocks(self, tx_storage): + def _get_partial_blocks(self, tx_storage: TransactionStorage) -> set[VertexId]: with tx_storage.allow_partially_validated_context(): partial_blocks = set() for tx in tx_storage.get_all_transactions(): @@ -89,6 +95,7 @@ def _run_restart_test(self, *, full_verification: bool, use_tx_storage_cache: bo conn12.disconnect(Failure(Exception('testing'))) self.simulator.remove_connection(conn12) manager2.stop() + assert isinstance(manager2.tx_storage, TransactionRocksDBStorage) manager2.tx_storage._rocksdb_storage.close() del manager2 @@ -146,19 +153,19 @@ def _run_restart_test(self, *, full_verification: bool, use_tx_storage_cache: bo self.assertConsensusEqualSyncV2(manager1, manager3) @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') - def test_restart_fullnode_full_verification(self): + def test_restart_fullnode_full_verification(self) -> None: self._run_restart_test(full_verification=True, use_tx_storage_cache=False) @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') - def test_restart_fullnode_quick(self): + def test_restart_fullnode_quick(self) -> None: self._run_restart_test(full_verification=False, use_tx_storage_cache=False) @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') - def test_restart_fullnode_quick_with_cache(self): + def test_restart_fullnode_quick_with_cache(self) -> None: self._run_restart_test(full_verification=False, use_tx_storage_cache=True) @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') - def test_restart_fullnode_full_verification_with_cache(self): + def test_restart_fullnode_full_verification_with_cache(self) -> None: self._run_restart_test(full_verification=True, use_tx_storage_cache=True) def test_exceeds_streaming_and_mempool_limits(self) -> None: @@ -250,7 +257,95 @@ def test_exceeds_streaming_and_mempool_limits(self) -> None: self.assertEqual(manager1.tx_storage.get_vertices_count(), manager2.tx_storage.get_vertices_count()) self.assertConsensusEqualSyncV2(manager1, manager2) - def _prepare_sync_v2_find_best_common_block_reorg(self): + def test_receiving_tips_limit(self) -> None: + from hathor.manager import HathorManager + from hathor.transaction import Transaction + from hathor.wallet.base_wallet import WalletOutputInfo + from tests.utils import BURN_ADDRESS + + manager1 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) + manager1.allow_mining_without_peers() + + # Find 100 blocks. + miner1 = self.simulator.create_miner(manager1, hashpower=10e6) + miner1.start() + trigger: Trigger = StopAfterNMinedBlocks(miner1, quantity=100) + self.assertTrue(self.simulator.run(3 * 3600, trigger=trigger)) + miner1.stop() + + # Custom tx generator that generates tips + parents = manager1.get_new_tx_parents(manager1.tx_storage.latest_timestamp) + + def custom_gen_new_tx(manager: HathorManager, _address: str, value: int, verify: bool = True) -> Transaction: + outputs = [] + # XXX: burn address guarantees that this output will not be used as input for any following transactions + # XXX: reduce value to make sure we can generate more transactions, otherwise it will spend a linear random + # percent from 1 to 100 of the available balance, this way it spends from 0.1% to 10% + outputs.append(WalletOutputInfo(address=BURN_ADDRESS, value=max(1, int(value / 10)), timelock=None)) + + assert manager.wallet is not None + tx = manager.wallet.prepare_transaction_compute_inputs(Transaction, outputs, manager.tx_storage) + tx.storage = manager.tx_storage + + max_ts_spent_tx = max(tx.get_spent_tx(txin).timestamp for txin in tx.inputs) + tx.timestamp = max(max_ts_spent_tx + 1, int(manager.reactor.seconds())) + + tx.weight = 1 + # XXX: fixed parents is the final requirement to make all the generated new tips + tx.parents = parents + manager.cpu_mining_service.resolve(tx) + if verify: + manager.verification_service.verify(tx) + return tx + + # Generate 100 tx-tips in mempool. + gen_tx1 = self.simulator.create_tx_generator(manager1, rate=3., hashpower=10e9, ignore_no_funds=True) + gen_tx1.gen_new_tx = custom_gen_new_tx + gen_tx1.start() + trigger = StopAfterNTransactions(gen_tx1, quantity=100) + self.simulator.run(3600, trigger=trigger) + self.assertGreater(manager1.tx_storage.get_vertices_count(), 100) + gen_tx1.stop() + assert manager1.tx_storage.indexes is not None + assert manager1.tx_storage.indexes.mempool_tips is not None + mempool_tips_count = len(manager1.tx_storage.indexes.mempool_tips.get()) + # we should expect at the very least 30 tips + self.assertGreater(mempool_tips_count, 30) + + # Create a new peer and run sync for a while (but stop before getting synced). + peer_id = PeerId() + builder2 = self.simulator.get_default_builder() \ + .set_peer_id(peer_id) \ + .disable_sync_v1() \ + .enable_sync_v2() \ + + manager2 = self.simulator.create_peer(builder2) + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + + # Let the connection start to sync. + self.simulator.run(1) + + # Run until blocks are synced + sync2 = conn12.proto2.state.sync_agent + trigger = StopWhenTrue(sync2.is_synced) + self.assertTrue(self.simulator.run(300, trigger=trigger)) + + # Change manager2's max_running_time to check if it correctly closes the connection + # 10 < 30, so this should be strict enough that it will fail + sync2.max_receiving_tips = 10 + self.assertIsNone(sync2._blk_streaming_server) + self.assertIsNone(sync2._tx_streaming_server) + + # This should fail because the get tips should be rejected because it exceeds the limit + self.simulator.run(300) + # we should expect only the tips to be missing from the second node + self.assertEqual(manager1.tx_storage.get_vertices_count(), + manager2.tx_storage.get_vertices_count() + mempool_tips_count) + # and also the second node should have aborted the connection + self.assertTrue(conn12.proto2.aborting) + + def _prepare_sync_v2_find_best_common_block_reorg(self) -> FakeConnection: manager1 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) manager1.allow_mining_without_peers() miner1 = self.simulator.create_miner(manager1, hashpower=10e6) @@ -265,50 +360,53 @@ def _prepare_sync_v2_find_best_common_block_reorg(self): self.assertTrue(self.simulator.run(3600)) return conn12 - @inlineCallbacks - def test_sync_v2_find_best_common_block_reorg_1(self): + async def test_sync_v2_find_best_common_block_reorg_1(self) -> None: conn12 = self._prepare_sync_v2_find_best_common_block_reorg() + assert isinstance(conn12._proto1.state, ReadyState) sync_agent = conn12._proto1.state.sync_agent + assert isinstance(sync_agent, NodeBlockSync) rng = conn12.manager2.rng my_best_block = sync_agent.get_my_best_block() - peer_best_block = sync_agent.peer_best_block + peer_best_block = not_none(sync_agent.peer_best_block) fake_peer_best_block = _HeightInfo(my_best_block.height + 3, rng.randbytes(32)) reorg_height = peer_best_block.height - 50 - def fake_get_peer_block_hashes(heights): + def fake_get_peer_block_hashes(heights: list[int]) -> Deferred[list[_HeightInfo]]: # return empty as soon as the search lowest height is not the genesis if heights[0] != 0: - return [] + return succeed([]) # simulate a reorg response = [] for h in heights: if h < reorg_height: - vertex_id = conn12.manager2.tx_storage.indexes.height.get(h) + index_manager = not_none(conn12.manager2.tx_storage.indexes) + vertex_id = not_none(index_manager.height.get(h)) else: vertex_id = rng.randbytes(32) response.append(_HeightInfo(height=h, id=vertex_id)) return succeed(response) - sync_agent.get_peer_block_hashes = fake_get_peer_block_hashes - common_block_info = yield sync_agent.find_best_common_block(my_best_block, fake_peer_best_block) - self.assertIsNone(common_block_info) + with patch.object(sync_agent, 'get_peer_block_hashes', new=fake_get_peer_block_hashes): + common_block_info = await sync_agent.find_best_common_block(my_best_block, fake_peer_best_block) + self.assertIsNone(common_block_info) - @inlineCallbacks - def test_sync_v2_find_best_common_block_reorg_2(self): + async def test_sync_v2_find_best_common_block_reorg_2(self) -> None: conn12 = self._prepare_sync_v2_find_best_common_block_reorg() + assert isinstance(conn12._proto1.state, ReadyState) sync_agent = conn12._proto1.state.sync_agent + assert isinstance(sync_agent, NodeBlockSync) rng = conn12.manager2.rng my_best_block = sync_agent.get_my_best_block() - peer_best_block = sync_agent.peer_best_block + peer_best_block = not_none(sync_agent.peer_best_block) fake_peer_best_block = _HeightInfo(my_best_block.height + 3, rng.randbytes(32)) reorg_height = peer_best_block.height - 50 - def fake_get_peer_block_hashes(heights): + def fake_get_peer_block_hashes(heights: list[int]) -> Deferred[list[_HeightInfo]]: if heights[0] != 0: return succeed([ _HeightInfo(height=h, id=rng.randbytes(32)) @@ -319,15 +417,16 @@ def fake_get_peer_block_hashes(heights): response = [] for h in heights: if h < reorg_height: - vertex_id = conn12.manager2.tx_storage.indexes.height.get(h) + index_manager = not_none(conn12.manager2.tx_storage.indexes) + vertex_id = not_none(index_manager.height.get(h)) else: vertex_id = rng.randbytes(32) response.append(_HeightInfo(height=h, id=vertex_id)) return succeed(response) - sync_agent.get_peer_block_hashes = fake_get_peer_block_hashes - common_block_info = yield sync_agent.find_best_common_block(my_best_block, fake_peer_best_block) - self.assertIsNone(common_block_info) + with patch.object(sync_agent, 'get_peer_block_hashes', new=fake_get_peer_block_hashes): + common_block_info = await sync_agent.find_best_common_block(my_best_block, fake_peer_best_block) + self.assertIsNone(common_block_info) def test_multiple_unexpected_txs(self) -> None: manager1 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) diff --git a/tests/p2p/test_twin_tx.py b/tests/p2p/test_twin_tx.py index 9e5e8857a..ae2339cb5 100644 --- a/tests/p2p/test_twin_tx.py +++ b/tests/p2p/test_twin_tx.py @@ -1,6 +1,7 @@ from hathor.crypto.util import decode_address from hathor.simulator.utils import add_new_blocks from hathor.transaction import Transaction +from hathor.util import not_none from hathor.wallet.base_wallet import WalletOutputInfo from tests import unittest from tests.utils import add_blocks_unlock_reward, add_new_double_spending @@ -9,16 +10,16 @@ class BaseTwinTransactionTestCase(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() self.network = 'testnet' self.manager = self.create_peer(self.network, unlock_wallet=True) - def test_twin_tx(self): + def test_twin_tx(self) -> None: add_new_blocks(self.manager, 5, advance_clock=15) add_blocks_unlock_reward(self.manager) - address = self.get_address(0) + address = not_none(self.get_address(0)) value1 = 100 value2 = 101 value3 = 102 diff --git a/tests/p2p/test_whitelist.py b/tests/p2p/test_whitelist.py index e7b83fc18..5cbc7e4ae 100644 --- a/tests/p2p/test_whitelist.py +++ b/tests/p2p/test_whitelist.py @@ -17,7 +17,7 @@ class WhitelistTestCase(unittest.SyncV1Params, unittest.TestCase): @patch('hathor.p2p.states.peer_id.settings', new=settings._replace(ENABLE_PEER_WHITELIST=True)) - def test_sync_v11_whitelist_no_no(self): + def test_sync_v11_whitelist_no_no(self) -> None: network = 'testnet' manager1 = self.create_peer(network) @@ -39,7 +39,7 @@ def test_sync_v11_whitelist_no_no(self): self.assertTrue(conn.tr2.disconnecting) @patch('hathor.p2p.states.peer_id.settings', new=settings._replace(ENABLE_PEER_WHITELIST=True)) - def test_sync_v11_whitelist_yes_no(self): + def test_sync_v11_whitelist_yes_no(self) -> None: network = 'testnet' manager1 = self.create_peer(network) @@ -63,7 +63,7 @@ def test_sync_v11_whitelist_yes_no(self): self.assertTrue(conn.tr2.disconnecting) @patch('hathor.p2p.states.peer_id.settings', new=settings._replace(ENABLE_PEER_WHITELIST=True)) - def test_sync_v11_whitelist_yes_yes(self): + def test_sync_v11_whitelist_yes_yes(self) -> None: network = 'testnet' manager1 = self.create_peer(network) diff --git a/tests/pubsub/test_pubsub.py b/tests/pubsub/test_pubsub.py index 2d3d1ef62..b2e76e646 100644 --- a/tests/pubsub/test_pubsub.py +++ b/tests/pubsub/test_pubsub.py @@ -1,10 +1,10 @@ -from hathor.pubsub import HathorEvents, PubSubManager +from hathor.pubsub import EventArguments, HathorEvents, PubSubManager from tests.unittest import TestCase class PubSubTestCase(TestCase): - def test_duplicate_subscribe(self): - def noop(): + def test_duplicate_subscribe(self) -> None: + def noop(event: HathorEvents, args: EventArguments) -> None: pass pubsub = PubSubManager(self.clock) pubsub.subscribe(HathorEvents.NETWORK_NEW_TX_ACCEPTED, noop) diff --git a/tests/pubsub/test_pubsub2.py b/tests/pubsub/test_pubsub2.py index faaf9c758..d0ede02ac 100644 --- a/tests/pubsub/test_pubsub2.py +++ b/tests/pubsub/test_pubsub2.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable +from typing import Any, Callable from unittest.mock import Mock, patch import pytest @@ -78,7 +78,7 @@ def test_memory_reactor_clock_running_with_threading() -> None: pubsub = PubSubManager(reactor) handler = Mock() - def fake_call_from_thread(f: Callable) -> None: + def fake_call_from_thread(f: Callable[..., Any]) -> None: reactor.callLater(0, f) call_from_thread_mock = Mock(side_effect=fake_call_from_thread) diff --git a/tests/resources/healthcheck/test_healthcheck.py b/tests/resources/healthcheck/test_healthcheck.py index c616d3a03..5beff1f24 100644 --- a/tests/resources/healthcheck/test_healthcheck.py +++ b/tests/resources/healthcheck/test_healthcheck.py @@ -6,9 +6,9 @@ from hathor.healthcheck.resources.healthcheck import HealthcheckResource from hathor.manager import HathorManager from hathor.simulator import FakeConnection +from hathor.simulator.utils import add_new_blocks from tests import unittest from tests.resources.base_resource import StubSite, _BaseResourceTest -from tests.utils import add_new_blocks class BaseHealthcheckReadinessTest(_BaseResourceTest._ResourceTest): diff --git a/tests/resources/wallet/test_thin_wallet.py b/tests/resources/wallet/test_thin_wallet.py index ed1710c7e..4f01a739d 100644 --- a/tests/resources/wallet/test_thin_wallet.py +++ b/tests/resources/wallet/test_thin_wallet.py @@ -197,7 +197,6 @@ def test_history_paginate(self): response_history = yield self.web_address_history.get( 'thin_wallet/address_history', { b'addresses[]': address.encode(), - b'paginate': b'true' } ) @@ -217,7 +216,6 @@ def test_history_paginate(self): response_history = yield self.web_address_history.get( 'thin_wallet/address_history', { b'addresses[]': address.encode(), - b'paginate': b'true' } ) @@ -248,7 +246,6 @@ def test_history_paginate(self): response_history = yield self.web_address_history.get( 'thin_wallet/address_history', { b'addresses[]': random_address.encode(), - b'paginate': b'true' } ) @@ -261,7 +258,6 @@ def test_history_paginate(self): 'thin_wallet/address_history', { b'addresses[]': random_address.encode(), b'hash': response_data['first_hash'].encode(), - b'paginate': b'true' } ) diff --git a/tests/simulation/base.py b/tests/simulation/base.py index 8acb087ca..61ba7ad94 100644 --- a/tests/simulation/base.py +++ b/tests/simulation/base.py @@ -1,6 +1,8 @@ from typing import Optional +from hathor.manager import HathorManager from hathor.simulator import Simulator +from hathor.types import VertexId from tests import unittest @@ -9,7 +11,7 @@ class SimulatorTestCase(unittest.TestCase): seed_config: Optional[int] = None - def setUp(self): + def setUp(self) -> None: super().setUp() self.simulator = Simulator(self.seed_config) @@ -19,11 +21,17 @@ def setUp(self): print('Simulation seed config:', self.simulator.seed) print('-'*30) - def tearDown(self): + def tearDown(self) -> None: self.simulator.stop() super().tearDown() - def create_peer(self, enable_sync_v1=None, enable_sync_v2=None, soft_voided_tx_ids=None, simulator=None): + def create_peer( # type: ignore[override] + self, + enable_sync_v1: bool | None = None, + enable_sync_v2: bool | None = None, + soft_voided_tx_ids: set[VertexId] = set(), + simulator: Simulator | None = None + ) -> HathorManager: if enable_sync_v1 is None: assert hasattr(self, '_enable_sync_v1'), ('`_enable_sync_v1` has no default by design, either set one on ' 'the test class or pass `enable_sync_v1` by argument') diff --git a/tests/simulation/test_simulator.py b/tests/simulation/test_simulator.py index aac7edd66..5ef65d9e2 100644 --- a/tests/simulation/test_simulator.py +++ b/tests/simulation/test_simulator.py @@ -1,21 +1,22 @@ import pytest +from hathor.manager import HathorManager from hathor.simulator import FakeConnection -from hathor.simulator.trigger import All as AllTriggers, StopWhenSynced +from hathor.simulator.trigger import All as AllTriggers, StopWhenSynced, Trigger from hathor.verification.vertex_verifier import VertexVerifier from tests import unittest from tests.simulation.base import SimulatorTestCase class BaseRandomSimulatorTestCase(SimulatorTestCase): - def test_verify_pow(self): + def test_verify_pow(self) -> None: manager1 = self.create_peer() # just get one of the genesis, we don't really need to create any transaction tx = next(iter(manager1.tx_storage.get_all_genesis())) # optional argument must be valid, it just has to not raise any exception, there's no assert for that VertexVerifier(settings=self._settings, daa=manager1.daa).verify_pow(tx, override_weight=0.) - def test_one_node(self): + def test_one_node(self) -> None: manager1 = self.create_peer() miner1 = self.simulator.create_miner(manager1, hashpower=100e6) @@ -29,7 +30,7 @@ def test_one_node(self): # FIXME: the setup above produces 0 new blocks and transactions # self.assertGreater(manager1.tx_storage.get_vertices_count(), 3) - def test_two_nodes(self): + def test_two_nodes(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() @@ -63,10 +64,10 @@ def test_two_nodes(self): self.assertTrue(conn12.is_connected) self.assertTipsEqual(manager1, manager2) - def test_many_miners_since_beginning(self): - nodes = [] + def test_many_miners_since_beginning(self) -> None: + nodes: list[HathorManager] = [] miners = [] - stop_triggers = [] + stop_triggers: list[Trigger] = [] for hashpower in [10e6, 5e6, 1e6, 1e6, 1e6]: manager = self.create_peer() @@ -96,11 +97,11 @@ def test_many_miners_since_beginning(self): self.assertTipsEqual(nodes[0], node) @pytest.mark.flaky(max_runs=5, min_passes=1) - def test_new_syncing_peer(self): + def test_new_syncing_peer(self) -> None: nodes = [] miners = [] tx_generators = [] - stop_triggers = [] + stop_triggers: list[Trigger] = [] manager = self.create_peer() nodes.append(manager) @@ -162,7 +163,7 @@ class SyncV2RandomSimulatorTestCase(unittest.SyncV2Params, BaseRandomSimulatorTe class SyncBridgeRandomSimulatorTestCase(unittest.SyncBridgeParams, SyncV2RandomSimulatorTestCase): __test__ = True - def test_compare_mempool_implementations(self): + def test_compare_mempool_implementations(self) -> None: manager1 = self.create_peer() manager2 = self.create_peer() @@ -170,7 +171,7 @@ def test_compare_mempool_implementations(self): tx_storage = manager1.tx_storage assert tx_storage.indexes is not None assert tx_storage.indexes.mempool_tips is not None - assert manager1.tx_storage.indexes.tx_tips is not None + assert manager1.tx_storage.indexes and manager1.tx_storage.indexes.tx_tips is not None mempool_tips = tx_storage.indexes.mempool_tips miner1 = self.simulator.create_miner(manager1, hashpower=10e6) diff --git a/tests/simulation/test_simulator_itself.py b/tests/simulation/test_simulator_itself.py index 6683a37b4..206d966ae 100644 --- a/tests/simulation/test_simulator_itself.py +++ b/tests/simulation/test_simulator_itself.py @@ -1,5 +1,7 @@ import pytest +from hathor.manager import HathorManager +from hathor.p2p.peer_id import PeerId from hathor.simulator import FakeConnection, Simulator from tests import unittest @@ -11,7 +13,7 @@ class BaseSimulatorSelfTestCase(unittest.TestCase): __test__ = False - def setUp(self): + def setUp(self) -> None: super().setUp() seed = None @@ -29,14 +31,20 @@ def setUp(self): print('Simulation seed config:', self.simulator1.seed) print('-' * 30) - def tearDown(self): + def tearDown(self) -> None: super().tearDown() self.simulator1.stop() self.simulator2.stop() self.simulator3.stop() - def create_simulator_peer(self, simulator, peer_id_pool, enable_sync_v1=None, enable_sync_v2=None): + def create_simulator_peer( + self, + simulator: Simulator, + peer_id_pool: list[PeerId], + enable_sync_v1: bool | None = None, + enable_sync_v2: bool | None = None + ) -> HathorManager: if enable_sync_v1 is None: assert hasattr(self, '_enable_sync_v1'), ('`_enable_sync_v1` has no default by design, either set one on ' 'the test class or pass `enable_sync_v1` by argument') @@ -54,7 +62,7 @@ def create_simulator_peer(self, simulator, peer_id_pool, enable_sync_v1=None, en return simulator.create_peer(builder) - def _simulate_run(self, run_i, simulator): + def _simulate_run(self, run_i: int, simulator: Simulator) -> list[HathorManager]: # XXX: the following was adapted from test_new_syncing_peer, it doesn't matter too much, but has good coverage # of different behaviors that can be affected by non-determinism on the fullnode implementation @@ -110,7 +118,7 @@ def _simulate_run(self, run_i, simulator): # XXX: marked as flaky because of a known random issue @pytest.mark.flaky(max_runs=3, min_passes=1) - def test_determinism_full_runs(self): + def test_determinism_full_runs(self) -> None: # sanity assert as to not mess up with it on the setup self.assertEqual(self.simulator1.seed, self.simulator2.seed) self.assertEqual(self.simulator1.seed, self.simulator3.seed) @@ -128,7 +136,7 @@ def test_determinism_full_runs(self): # XXX: marked as flaky because of a known random issue @pytest.mark.flaky(max_runs=3, min_passes=1) - def test_determinism_interleaved(self): + def test_determinism_interleaved(self) -> None: # sanity assert as to not mess up with it on the setup self.assertEqual(self.simulator1.seed, self.simulator2.seed) diff --git a/tests/simulation/test_trigger.py b/tests/simulation/test_trigger.py index b91e4e293..678902d47 100644 --- a/tests/simulation/test_trigger.py +++ b/tests/simulation/test_trigger.py @@ -3,11 +3,12 @@ from hathor.p2p.messages import ProtocolMessages from hathor.simulator import FakeConnection, Simulator from hathor.simulator.trigger import StopAfterMinimumBalance, StopAfterNMinedBlocks, StopWhenSendLineMatch +from hathor.util import not_none from tests import unittest class TriggerTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: super().setUp() self.simulator = Simulator() @@ -20,11 +21,11 @@ def setUp(self): print('Simulation seed config:', self.simulator.seed) print('-' * 30) - def tearDown(self): + def tearDown(self) -> None: super().tearDown() self.simulator.stop() - def test_stop_after_n_mined_blocks(self): + def test_stop_after_n_mined_blocks(self) -> None: miner1 = self.simulator.create_miner(self.manager1, hashpower=1e6) miner1.start() @@ -47,11 +48,11 @@ def test_stop_after_n_mined_blocks(self): self.assertEqual(miner1.get_blocks_found(), 16) self.assertLess(reactor.seconds(), t0 + 3600) - def test_stop_after_minimum_balance(self): + def test_stop_after_minimum_balance(self) -> None: miner1 = self.simulator.create_miner(self.manager1, hashpower=1e6) miner1.start() - wallet = self.manager1.wallet + wallet = not_none(self.manager1.wallet) settings = self.simulator.settings minimum_balance = 1000_00 # 16 blocks @@ -62,7 +63,7 @@ def test_stop_after_minimum_balance(self): self.assertTrue(self.simulator.run(3600, trigger=trigger)) self.assertGreaterEqual(wallet.balance[token_uid].available, minimum_balance) - def test_stop_after_sendline(self): + def test_stop_after_sendline(self) -> None: manager2 = self.simulator.create_peer() conn12 = FakeConnection(self.manager1, manager2, latency=0.05) self.simulator.add_connection(conn12) diff --git a/tests/sysctl/test_feature_activation.py b/tests/sysctl/test_feature_activation.py new file mode 100644 index 000000000..48aeb2713 --- /dev/null +++ b/tests/sysctl/test_feature_activation.py @@ -0,0 +1,38 @@ +# Copyright 2024 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import Mock + +from hathor.feature_activation.bit_signaling_service import BitSignalingService +from hathor.feature_activation.feature import Feature +from hathor.sysctl import FeatureActivationSysctl + + +def test_feature_activation_sysctl() -> None: + bit_signaling_service_mock = Mock(spec_set=BitSignalingService) + sysctl = FeatureActivationSysctl(bit_signaling_service_mock) + + bit_signaling_service_mock.get_support_features = Mock(return_value=[Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_2]) + bit_signaling_service_mock.get_not_support_features = Mock(return_value=[Feature.NOP_FEATURE_3]) + bit_signaling_service_mock.get_best_block_signaling_features = Mock(return_value={Feature.NOP_FEATURE_1: Mock()}) + + assert sysctl.get('supported_features') == ['NOP_FEATURE_1', 'NOP_FEATURE_2'] + assert sysctl.get('not_supported_features') == ['NOP_FEATURE_3'] + assert sysctl.get('signaling_features') == ['NOP_FEATURE_1'] + + sysctl.unsafe_set('add_support', 'NOP_FEATURE_3') + bit_signaling_service_mock.add_feature_support.assert_called_once_with(Feature.NOP_FEATURE_3) + + sysctl.unsafe_set('remove_support', 'NOP_FEATURE_1') + bit_signaling_service_mock.remove_feature_support.assert_called_once_with(Feature.NOP_FEATURE_1) diff --git a/tests/sysctl/test_sysctl.py b/tests/sysctl/test_sysctl.py index d629d4230..01d8b46cc 100644 --- a/tests/sysctl/test_sysctl.py +++ b/tests/sysctl/test_sysctl.py @@ -13,7 +13,7 @@ class SysctlTest(unittest.TestCase): # We need this patch because pydantic.validate_arguments fails when it gets a mock function. - @patch('hathor.sysctl.sysctl.validate_arguments', new=lambda x: x) + @patch('hathor.sysctl.sysctl.validate_arguments', new=lambda x: x) # type: ignore def setUp(self) -> None: super().setUp() diff --git a/tests/tx/test_genesis.py b/tests/tx/test_genesis.py index a41021f8b..a5bf0f430 100644 --- a/tests/tx/test_genesis.py +++ b/tests/tx/test_genesis.py @@ -74,9 +74,9 @@ def test_genesis_weight(self): # Validate the block and tx weight # in test mode weight is always 1 self._daa.TEST_MODE = TestMode.TEST_ALL_WEIGHT - self.assertEqual(self._daa.calculate_block_difficulty(genesis_block), 1) + self.assertEqual(self._daa.calculate_block_difficulty(genesis_block, Mock()), 1) self.assertEqual(self._daa.minimum_tx_weight(genesis_tx), 1) self._daa.TEST_MODE = TestMode.DISABLED - self.assertEqual(self._daa.calculate_block_difficulty(genesis_block), genesis_block.weight) + self.assertEqual(self._daa.calculate_block_difficulty(genesis_block, Mock()), genesis_block.weight) self.assertEqual(self._daa.minimum_tx_weight(genesis_tx), genesis_tx.weight) diff --git a/tests/tx/test_indexes.py b/tests/tx/test_indexes.py index b28a7cfc4..1a0ad0923 100644 --- a/tests/tx/test_indexes.py +++ b/tests/tx/test_indexes.py @@ -631,7 +631,7 @@ def test_addresses_index_empty(self): address = self.get_address(10) assert address is not None self.assertTrue(addresses_indexes.is_address_empty(address)) - self.assertEqual(addresses_indexes.get_sorted_from_address(address), []) + self.assertEqual(list(addresses_indexes.get_sorted_from_address(address)), []) def test_addresses_index_last(self): """ @@ -653,7 +653,7 @@ def test_addresses_index_last(self): # XXX: this artificial address should major (be greater byte-wise) any possible "natural" address address = '\x7f' * 34 self.assertTrue(addresses_indexes.is_address_empty(address)) - self.assertEqual(addresses_indexes.get_sorted_from_address(address), []) + self.assertEqual(list(addresses_indexes.get_sorted_from_address(address)), []) # XXX: since we didn't add any multisig address, this is guaranteed to be reach the tail end of the index assert self._settings.P2PKH_VERSION_BYTE[0] < self._settings.MULTISIG_VERSION_BYTE[0] @@ -666,7 +666,7 @@ def test_addresses_index_last(self): assert address is not None self.assertTrue(addresses_indexes.is_address_empty(address)) - self.assertEqual(addresses_indexes.get_sorted_from_address(address), []) + self.assertEqual(list(addresses_indexes.get_sorted_from_address(address)), []) def test_height_index(self): from hathor.indexes.height_index import HeightInfo diff --git a/tests/tx/test_indexes2.py b/tests/tx/test_indexes2.py index b8df4d9eb..970903cc6 100644 --- a/tests/tx/test_indexes2.py +++ b/tests/tx/test_indexes2.py @@ -64,7 +64,7 @@ def test_timestamp_index(self): # XXX: we verified they're the same, doesn't matter which we pick: idx = idx_memory hashes = hashes_memory - self.log.debug('indexes match', idx=idx, hashes=unittest.shorten_hash(hashes)) + self.log.debug('indexes match', idx=idx, hashes=unittest.short_hashes(hashes)) if idx is None: break offset_variety.add(idx[1]) diff --git a/tests/tx/test_verification.py b/tests/tx/test_verification.py index 336d54510..e966f40e2 100644 --- a/tests/tx/test_verification.py +++ b/tests/tx/test_verification.py @@ -340,7 +340,7 @@ def test_merge_mined_block_verify_without_storage(self) -> None: verify_sigops_output_wrapped.assert_called_once() # MergeMinedBlock methods - verify_pow_wrapped.assert_called_once() + verify_aux_pow_wrapped.assert_called_once() def test_merge_mined_block_verify(self) -> None: block = self._get_valid_merge_mined_block() @@ -389,7 +389,7 @@ def test_merge_mined_block_verify(self) -> None: verify_mandatory_signaling_wrapped.assert_called_once() # MergeMinedBlock methods - verify_pow_wrapped.assert_called_once() + verify_aux_pow_wrapped.assert_called_once() def test_merge_mined_block_validate_basic(self) -> None: block = self._get_valid_merge_mined_block() @@ -484,7 +484,7 @@ def test_merge_mined_block_validate_full(self) -> None: verify_mandatory_signaling_wrapped.assert_called_once() # MergeMinedBlock methods - verify_pow_wrapped.assert_called_once() + verify_aux_pow_wrapped.assert_called_once() def test_transaction_verify_basic(self) -> None: tx = self._get_valid_tx() diff --git a/tests/unittest.py b/tests/unittest.py index 019437e26..939e32853 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -3,23 +3,32 @@ import shutil import tempfile import time -from typing import Iterator, Optional +from typing import Any, Callable, Collection, Iterable, Iterator, Optional from unittest import main as ut_main from structlog import get_logger from twisted.trial import unittest from hathor.builder import BuildArtifacts, Builder +from hathor.checkpoint import Checkpoint from hathor.conf import HathorSettings from hathor.conf.get_settings import get_global_settings from hathor.daa import DifficultyAdjustmentAlgorithm, TestMode +from hathor.event import EventManager +from hathor.event.storage import EventStorage +from hathor.manager import HathorManager from hathor.p2p.peer_id import PeerId +from hathor.p2p.sync_v1.agent import NodeSyncTimestamp +from hathor.p2p.sync_v2.agent import NodeBlockSync from hathor.p2p.sync_version import SyncVersion +from hathor.pubsub import PubSubManager from hathor.reactor import ReactorProtocol as Reactor, get_global_reactor from hathor.simulator.clock import MemoryReactorHeapClock -from hathor.transaction import BaseTransaction -from hathor.util import Random -from hathor.wallet import HDWallet, Wallet +from hathor.transaction import BaseTransaction, Block, Transaction +from hathor.transaction.storage.transaction_storage import TransactionStorage +from hathor.types import VertexId +from hathor.util import Random, not_none +from hathor.wallet import BaseWallet, HDWallet, Wallet from tests.test_memory_reactor_clock import TestMemoryReactorClock logger = get_logger() @@ -28,9 +37,8 @@ USE_MEMORY_STORAGE = os.environ.get('HATHOR_TEST_MEMORY_STORAGE', 'false').lower() == 'true' -def shorten_hash(container): - container_type = type(container) - return container_type(h[-2:].hex() for h in container) +def short_hashes(container: Collection[bytes]) -> Iterable[str]: + return map(lambda hash_bytes: hash_bytes[-2:].hex(), container) def _load_peer_id_pool(file_path: Optional[str] = None) -> Iterator[PeerId]: @@ -45,7 +53,7 @@ def _load_peer_id_pool(file_path: Optional[str] = None) -> Iterator[PeerId]: yield PeerId.create_from_json(peer_id_dict) -def _get_default_peer_id_pool_filepath(): +def _get_default_peer_id_pool_filepath() -> str: this_file_path = os.path.dirname(__file__) file_name = 'peer_id_pool.json' file_path = os.path.join(this_file_path, file_name) @@ -104,8 +112,8 @@ class TestCase(unittest.TestCase): use_memory_storage: bool = USE_MEMORY_STORAGE seed_config: Optional[int] = None - def setUp(self): - self.tmpdirs = [] + def setUp(self) -> None: + self.tmpdirs: list[str] = [] self.clock = TestMemoryReactorClock() self.clock.advance(time.time()) self.log = logger.new() @@ -113,10 +121,10 @@ def setUp(self): self.seed = secrets.randbits(64) if self.seed_config is None else self.seed_config self.log.info('set seed', seed=self.seed) self.rng = Random(self.seed) - self._pending_cleanups = [] + self._pending_cleanups: list[Callable[..., Any]] = [] self._settings = get_global_settings() - def tearDown(self): + def tearDown(self) -> None: self.clean_tmpdirs() for fn in self._pending_cleanups: fn() @@ -139,12 +147,12 @@ def get_random_peer_id_from_pool(self, pool: Optional[list[PeerId]] = None, pool.remove(peer_id) return peer_id - def mkdtemp(self): + def mkdtemp(self) -> str: tmpdir = tempfile.mkdtemp() self.tmpdirs.append(tmpdir) return tmpdir - def _create_test_wallet(self, unlocked=False): + def _create_test_wallet(self, unlocked: bool = False) -> Wallet: """ Generate a Wallet with a number of keypairs for testing :rtype: Wallet """ @@ -164,14 +172,14 @@ def get_builder(self, network: str) -> TestBuilder: .set_network(network) return builder - def create_peer_from_builder(self, builder, start_manager=True): + def create_peer_from_builder(self, builder: Builder, start_manager: bool = True) -> HathorManager: artifacts = builder.build() manager = artifacts.manager if artifacts.rocksdb_storage: self._pending_cleanups.append(artifacts.rocksdb_storage.close) - manager.avg_time_between_blocks = 0.0001 + # manager.avg_time_between_blocks = 0.0001 # FIXME: This property is not defined. Fix this. if start_manager: manager.start() @@ -180,11 +188,28 @@ def create_peer_from_builder(self, builder, start_manager=True): return manager - def create_peer(self, network, peer_id=None, wallet=None, tx_storage=None, unlock_wallet=True, wallet_index=False, - capabilities=None, full_verification=True, enable_sync_v1=None, enable_sync_v2=None, - checkpoints=None, utxo_index=False, event_manager=None, use_memory_index=None, start_manager=True, - pubsub=None, event_storage=None, enable_event_queue=None, use_memory_storage=None): - + def create_peer( # type: ignore[no-untyped-def] + self, + network: str, + peer_id: PeerId | None = None, + wallet: BaseWallet | None = None, + tx_storage: TransactionStorage | None = None, + unlock_wallet: bool = True, + wallet_index: bool = False, + capabilities: list[str] | None = None, + full_verification: bool = True, + enable_sync_v1: bool | None = None, + enable_sync_v2: bool | None = None, + checkpoints: list[Checkpoint] | None = None, + utxo_index: bool = False, + event_manager: EventManager | None = None, + use_memory_index: bool | None = None, + start_manager: bool = True, + pubsub: PubSubManager | None = None, + event_storage: EventStorage | None = None, + enable_event_queue: bool | None = None, + use_memory_storage: bool | None = None + ): # TODO: Add -> HathorManager here. It breaks the lint in a lot of places. enable_sync_v1, enable_sync_v2 = self._syncVersionFlags(enable_sync_v1, enable_sync_v2) builder = self.get_builder(network) \ @@ -203,8 +228,9 @@ def create_peer(self, network, peer_id=None, wallet=None, tx_storage=None, unloc if not wallet: wallet = self._create_test_wallet() if unlock_wallet: + assert isinstance(wallet, Wallet) wallet.unlock(b'MYPASS') - builder.set_wallet(wallet) + builder.set_wallet(not_none(wallet)) if event_storage: builder.set_event_storage(event_storage) @@ -254,7 +280,7 @@ def create_peer(self, network, peer_id=None, wallet=None, tx_storage=None, unloc return manager - def run_to_completion(self): + def run_to_completion(self) -> None: """ This will advance the test's clock until all calls scheduled are done. """ for call in self.clock.getDelayedCalls(): @@ -277,7 +303,11 @@ def assertIsTopological(self, tx_sequence: Iterator[BaseTransaction], message: O self.assertIn(dep, valid_deps, message) valid_deps.add(tx.hash) - def _syncVersionFlags(self, enable_sync_v1=None, enable_sync_v2=None): + def _syncVersionFlags( + self, + enable_sync_v1: bool | None = None, + enable_sync_v2: bool | None = None + ) -> tuple[bool, bool]: """Internal: use this to check and get the flags and optionally provide override values.""" if enable_sync_v1 is None: assert hasattr(self, '_enable_sync_v1'), ('`_enable_sync_v1` has no default by design, either set one on ' @@ -290,19 +320,19 @@ def _syncVersionFlags(self, enable_sync_v1=None, enable_sync_v2=None): assert enable_sync_v1 or enable_sync_v2, 'enable at least one sync version' return enable_sync_v1, enable_sync_v2 - def assertTipsEqual(self, manager1, manager2): + def assertTipsEqual(self, manager1: HathorManager, manager2: HathorManager) -> None: _, enable_sync_v2 = self._syncVersionFlags() if enable_sync_v2: self.assertTipsEqualSyncV2(manager1, manager2) else: self.assertTipsEqualSyncV1(manager1, manager2) - def assertTipsNotEqual(self, manager1, manager2): + def assertTipsNotEqual(self, manager1: HathorManager, manager2: HathorManager) -> None: s1 = set(manager1.tx_storage.get_all_tips()) s2 = set(manager2.tx_storage.get_all_tips()) self.assertNotEqual(s1, s2) - def assertTipsEqualSyncV1(self, manager1, manager2): + def assertTipsEqualSyncV1(self, manager1: HathorManager, manager2: HathorManager) -> None: # XXX: this is the original implementation of assertTipsEqual s1 = set(manager1.tx_storage.get_all_tips()) s2 = set(manager2.tx_storage.get_all_tips()) @@ -312,39 +342,45 @@ def assertTipsEqualSyncV1(self, manager1, manager2): s2 = set(manager2.tx_storage.get_tx_tips()) self.assertEqual(s1, s2) - def assertTipsEqualSyncV2(self, manager1, manager2, *, strict_sync_v2_indexes=True): + def assertTipsEqualSyncV2( + self, + manager1: HathorManager, + manager2: HathorManager, + *, + strict_sync_v2_indexes: bool = True + ) -> None: # tx tips if strict_sync_v2_indexes: - tips1 = manager1.tx_storage.indexes.mempool_tips.get() - tips2 = manager2.tx_storage.indexes.mempool_tips.get() + tips1 = not_none(not_none(manager1.tx_storage.indexes).mempool_tips).get() + tips2 = not_none(not_none(manager2.tx_storage.indexes).mempool_tips).get() else: tips1 = {tx.hash for tx in manager1.tx_storage.iter_mempool_tips_from_best_index()} tips2 = {tx.hash for tx in manager2.tx_storage.iter_mempool_tips_from_best_index()} - self.log.debug('tx tips1', len=len(tips1), list=shorten_hash(tips1)) - self.log.debug('tx tips2', len=len(tips2), list=shorten_hash(tips2)) + self.log.debug('tx tips1', len=len(tips1), list=short_hashes(tips1)) + self.log.debug('tx tips2', len=len(tips2), list=short_hashes(tips2)) self.assertEqual(tips1, tips2) # best block s1 = set(manager1.tx_storage.get_best_block_tips()) s2 = set(manager2.tx_storage.get_best_block_tips()) - self.log.debug('block tips1', len=len(s1), list=shorten_hash(s1)) - self.log.debug('block tips2', len=len(s2), list=shorten_hash(s2)) + self.log.debug('block tips1', len=len(s1), list=short_hashes(s1)) + self.log.debug('block tips2', len=len(s2), list=short_hashes(s2)) self.assertEqual(s1, s2) # best block (from height index) - b1 = manager1.tx_storage.indexes.height.get_tip() - b2 = manager2.tx_storage.indexes.height.get_tip() + b1 = not_none(manager1.tx_storage.indexes).height.get_tip() + b2 = not_none(manager2.tx_storage.indexes).height.get_tip() self.assertIn(b1, s2) self.assertIn(b2, s1) - def assertConsensusEqual(self, manager1, manager2): + def assertConsensusEqual(self, manager1: HathorManager, manager2: HathorManager) -> None: _, enable_sync_v2 = self._syncVersionFlags() if enable_sync_v2: self.assertConsensusEqualSyncV2(manager1, manager2) else: self.assertConsensusEqualSyncV1(manager1, manager2) - def assertConsensusEqualSyncV1(self, manager1, manager2): + def assertConsensusEqualSyncV1(self, manager1: HathorManager, manager2: HathorManager) -> None: self.assertEqual(manager1.tx_storage.get_vertices_count(), manager2.tx_storage.get_vertices_count()) for tx1 in manager1.tx_storage.get_all_transactions(): tx2 = manager2.tx_storage.get_transaction(tx1.hash) @@ -358,12 +394,20 @@ def assertConsensusEqualSyncV1(self, manager1, manager2): self.assertIsNone(tx2_meta.voided_by) else: # If tx1 is voided, then tx2 must be voided. + assert tx1_meta.voided_by is not None + assert tx2_meta.voided_by is not None self.assertGreaterEqual(len(tx1_meta.voided_by), 1) self.assertGreaterEqual(len(tx2_meta.voided_by), 1) # Hard verification # self.assertEqual(tx1_meta.voided_by, tx2_meta.voided_by) - def assertConsensusEqualSyncV2(self, manager1, manager2, *, strict_sync_v2_indexes=True): + def assertConsensusEqualSyncV2( + self, + manager1: HathorManager, + manager2: HathorManager, + *, + strict_sync_v2_indexes: bool = True + ) -> None: # The current sync algorithm does not propagate voided blocks/txs # so the count might be different even though the consensus is equal # One peer might have voided txs that the other does not have @@ -374,7 +418,9 @@ def assertConsensusEqualSyncV2(self, manager1, manager2, *, strict_sync_v2_index # the following is specific to sync-v2 # helper function: - def get_all_executed_or_voided(tx_storage): + def get_all_executed_or_voided( + tx_storage: TransactionStorage + ) -> tuple[set[VertexId], set[VertexId], set[VertexId]]: """Get all txs separated into three sets: executed, voided, partial""" tx_executed = set() tx_voided = set() @@ -401,14 +447,16 @@ def get_all_executed_or_voided(tx_storage): self.log.debug('node1 rest', len_voided=len(tx_voided1), len_partial=len(tx_partial1)) self.log.debug('node2 rest', len_voided=len(tx_voided2), len_partial=len(tx_partial2)) - def assertConsensusValid(self, manager): + def assertConsensusValid(self, manager: HathorManager) -> None: for tx in manager.tx_storage.get_all_transactions(): if tx.is_block: + assert isinstance(tx, Block) self.assertBlockConsensusValid(tx) else: + assert isinstance(tx, Transaction) self.assertTransactionConsensusValid(tx) - def assertBlockConsensusValid(self, block): + def assertBlockConsensusValid(self, block: Block) -> None: self.assertTrue(block.is_block) if not block.parents: # Genesis @@ -419,7 +467,8 @@ def assertBlockConsensusValid(self, block): parent_meta = parent.get_metadata() self.assertIsNone(parent_meta.voided_by) - def assertTransactionConsensusValid(self, tx): + def assertTransactionConsensusValid(self, tx: Transaction) -> None: + assert tx.storage is not None self.assertFalse(tx.is_block) meta = tx.get_metadata() if meta.voided_by and tx.hash in meta.voided_by: @@ -439,7 +488,7 @@ def assertTransactionConsensusValid(self, tx): spent_meta = spent_tx.get_metadata() if spent_meta.voided_by is not None: - self.assertIsNotNone(meta.voided_by) + assert meta.voided_by is not None self.assertTrue(spent_meta.voided_by) self.assertTrue(meta.voided_by) self.assertTrue(spent_meta.voided_by.issubset(meta.voided_by)) @@ -447,30 +496,32 @@ def assertTransactionConsensusValid(self, tx): for parent in tx.get_parents(): parent_meta = parent.get_metadata() if parent_meta.voided_by is not None: - self.assertIsNotNone(meta.voided_by) + assert meta.voided_by is not None self.assertTrue(parent_meta.voided_by) self.assertTrue(meta.voided_by) self.assertTrue(parent_meta.voided_by.issubset(meta.voided_by)) - def assertSyncedProgress(self, node_sync): + def assertSyncedProgress(self, node_sync: NodeSyncTimestamp | NodeBlockSync) -> None: """Check "synced" status of p2p-manager, uses self._enable_sync_vX to choose which check to run.""" enable_sync_v1, enable_sync_v2 = self._syncVersionFlags() if enable_sync_v2: + assert isinstance(node_sync, NodeBlockSync) self.assertV2SyncedProgress(node_sync) elif enable_sync_v1: + assert isinstance(node_sync, NodeSyncTimestamp) self.assertV1SyncedProgress(node_sync) - def assertV1SyncedProgress(self, node_sync): + def assertV1SyncedProgress(self, node_sync: NodeSyncTimestamp) -> None: self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) - def assertV2SyncedProgress(self, node_sync): + def assertV2SyncedProgress(self, node_sync: NodeBlockSync) -> None: self.assertEqual(node_sync.synced_block, node_sync.peer_best_block) - def clean_tmpdirs(self): + def clean_tmpdirs(self) -> None: for tmpdir in self.tmpdirs: shutil.rmtree(tmpdir) - def clean_pending(self, required_to_quiesce=True): + def clean_pending(self, required_to_quiesce: bool = True) -> None: """ This handy method cleans all pending tasks from the reactor. diff --git a/tests/utils.py b/tests/utils.py index cdcbd7bb2..fff406b36 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -5,9 +5,10 @@ import time import urllib.parse from dataclasses import dataclass -from typing import Optional, cast +from typing import Any, Optional import requests +from cryptography.hazmat.primitives.asymmetric import ec from hathorlib.scripts import DataScript from twisted.internet.task import Clock @@ -19,7 +20,7 @@ from hathor.manager import HathorManager from hathor.mining.cpu_mining_service import CpuMiningService from hathor.simulator.utils import add_new_block, add_new_blocks, gen_new_double_spending, gen_new_tx -from hathor.transaction import BaseTransaction, Transaction, TxInput, TxOutput +from hathor.transaction import BaseTransaction, Block, Transaction, TxInput, TxOutput from hathor.transaction.scripts import P2PKH, HathorScript, Opcode, parse_address_script from hathor.transaction.token_creation_tx import TokenCreationTransaction from hathor.transaction.util import get_deposit_amount @@ -134,7 +135,13 @@ def add_new_double_spending(manager: HathorManager, *, use_same_parents: bool = return tx -def add_new_tx(manager, address, value, advance_clock=None, propagate=True): +def add_new_tx( + manager: HathorManager, + address: str, + value: int, + advance_clock: int | None = None, + propagate: bool = True +) -> Transaction: """ Create, resolve and propagate a new tx :param manager: Manager object to handle the creation @@ -153,11 +160,16 @@ def add_new_tx(manager, address, value, advance_clock=None, propagate=True): if propagate: manager.propagate_tx(tx, fails_silently=False) if advance_clock: - manager.reactor.advance(advance_clock) + manager.reactor.advance(advance_clock) # type: ignore[attr-defined] return tx -def add_new_transactions(manager, num_txs, advance_clock=None, propagate=True): +def add_new_transactions( + manager: HathorManager, + num_txs: int, + advance_clock: int | None = None, + propagate: bool = True +) -> list[Transaction]: """ Create, resolve and propagate some transactions :param manager: Manager object to handle the creation @@ -178,7 +190,7 @@ def add_new_transactions(manager, num_txs, advance_clock=None, propagate=True): return txs -def add_blocks_unlock_reward(manager): +def add_blocks_unlock_reward(manager: HathorManager) -> list[Block]: """This method adds new blocks to a 'burn address' to make sure the existing block rewards can be spent. It uses a 'burn address' so the manager's wallet is not impacted. @@ -186,7 +198,14 @@ def add_blocks_unlock_reward(manager): return add_new_blocks(manager, settings.REWARD_SPEND_MIN_BLOCKS, advance_clock=1, address=BURN_ADDRESS) -def run_server(hostname='localhost', listen=8005, status=8085, bootstrap=None, tries=100, alive_for_at_least_sec=3): +def run_server( + hostname: str = 'localhost', + listen: int = 8005, + status: int = 8085, + bootstrap: str | None = None, + tries: int = 100, + alive_for_at_least_sec: int = 3 +) -> subprocess.Popen[bytes]: """ Starts a full node in a subprocess running the cli command :param hostname: Hostname used to be accessed by other peers @@ -249,7 +268,14 @@ def run_server(hostname='localhost', listen=8005, status=8085, bootstrap=None, t return process -def request_server(path, method, host='http://localhost', port=8085, data=None, prefix=settings.API_VERSION_PREFIX): +def request_server( + path: str, + method: str, + host: str = 'http://localhost', + port: int = 8085, + data: dict[str, Any] | None = None, + prefix: str = settings.API_VERSION_PREFIX +) -> dict[str, Any]: """ Execute a request for status server :param path: Url path of the request @@ -280,11 +306,18 @@ def request_server(path, method, host='http://localhost', port=8085, data=None, response = requests.put(url, json=data) else: raise ValueError('Unsuported method') - return response.json() - - -def execute_mining(path='mining', *, count, host='http://localhost', port=8085, data=None, - prefix=settings.API_VERSION_PREFIX): + json_response: dict[str, Any] = response.json() + return json_response + + +def execute_mining( + path: str = 'mining', + *, + count: int, + host: str = 'http://localhost', + port: int = 8085, + prefix: str = settings.API_VERSION_PREFIX +) -> None: """Execute a mining on a given server""" from hathor.cli.mining import create_parser, execute partial_url = '{}:{}/{}/'.format(host, port, prefix) @@ -294,8 +327,16 @@ def execute_mining(path='mining', *, count, host='http://localhost', port=8085, execute(args) -def execute_tx_gen(*, count, address=None, value=None, timestamp=None, host='http://localhost', port=8085, data=None, - prefix=settings.API_VERSION_PREFIX): +def execute_tx_gen( + *, + count: int, + address: str | None = None, + value: int | None = None, + timestamp: str | None = None, + host: str = 'http://localhost', + port: int = 8085, + prefix: str = settings.API_VERSION_PREFIX +) -> None: """Execute a tx generator on a given server""" from hathor.cli.tx_generator import create_parser, execute url = '{}:{}/{}/'.format(host, port, prefix) @@ -311,7 +352,7 @@ def execute_tx_gen(*, count, address=None, value=None, timestamp=None, host='htt execute(args) -def get_genesis_key(): +def get_genesis_key() -> ec.EllipticCurvePrivateKeyWithSerialization: private_key_bytes = base64.b64decode( 'MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQgOCgCddzDZsfKgiMJLOt97eov9RLwHeePyBIK2WPF8MChRA' 'NCAAQ/XSOK+qniIY0F3X+lDrb55VQx5jWeBLhhzZnH6IzGVTtlAj9Ki73DVBm5+VXK400Idd6ddzS7FahBYYC7IaTl' @@ -378,7 +419,7 @@ def create_tokens(manager: 'HathorManager', address_b58: Optional[str] = None, m assert genesis_hash is not None deposit_input = [TxInput(genesis_hash, 0, b'')] change_output = TxOutput(genesis_block.outputs[0].value - deposit_amount, script, 0) - parents = [cast(bytes, tx.hash) for tx in genesis_txs] + parents = [tx.hash for tx in genesis_txs] timestamp = int(manager.reactor.seconds()) else: total_reward = 0 @@ -539,6 +580,7 @@ class EventMocker: hash='abc', nonce=123, timestamp=456, + signal_bits=0, version=1, weight=10, inputs=[],