diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c2e22d2cc..624cd4b0d 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -28,9 +28,7 @@ jobs: - '3.10' - '3.11' exclude: - # XXX: neither pypy-3.10 nor pypy-3.11 exist yet, maybe pypy-3.10 will be out on PyPy v7.3.10 - - python-impl: pypy - python-version: '3.10' + # XXX: pypy-3.11 does exist yet - python-impl: pypy python-version: '3.11' steps: @@ -52,8 +50,9 @@ jobs: python extras/github/docker.py - name: Check version if: steps.prep.outputs.check-version - run: | - make check-version VERSION='${{ steps.prep.outputs.check-version }}' + env: + VERSION: ${{ steps.prep.outputs.check-version }} + run: make check-custom - name: Set up QEMU # arm64 is not available natively uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx diff --git a/Dockerfile b/Dockerfile index bb0d7bd4a..f1e445072 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ # before changing these variables, make sure the tag $PYTHON-alpine$ALPINE exists first # list of valid tags hese: https://hub.docker.com/_/python -ARG PYTHON=3.9 +ARG PYTHON=3.10 ARG DEBIAN=bullseye # stage-0: copy pyproject.toml/poetry.lock and install the production set of dependencies diff --git a/Dockerfile.pypy b/Dockerfile.pypy index fe4799ed8..4ad4b1c6d 100644 --- a/Dockerfile.pypy +++ b/Dockerfile.pypy @@ -1,6 +1,6 @@ # before changing these variables, make sure the tag $PYTHON-alpine$ALPINE exists first # list of valid tags hese: https://hub.docker.com/_/pypy -ARG PYTHON=3.9 +ARG PYTHON=3.10 ARG DEBIAN=bullseye # stage-0: copy pyproject.toml/poetry.lock and install the production set of dependencies diff --git a/Makefile b/Makefile index 3c6f57fd8..563e86410 100644 --- a/Makefile +++ b/Makefile @@ -78,15 +78,15 @@ isort-check: yamllint: yamllint . -.PHONY: check-version -check-version: - bash ./extras/check_version.sh $(VERSION) +.PHONY: check-custom +check-custom: + bash ./extras/custom_checks.sh .PHONY: check -check: check-version yamllint flake8 isort-check mypy +check: check-custom yamllint flake8 isort-check mypy .PHONY: dcheck -dcheck: check-version yamllint flake8 isort-check dmypy +dcheck: check-custom yamllint flake8 isort-check dmypy # formatting: diff --git a/extras/check_version.sh b/extras/check_version.sh deleted file mode 100755 index e62cfbe01..000000000 --- a/extras/check_version.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -### -# This script will check all source files containing the project version and exit with an error code -1 in case -# they don't match. -# -# usage: ./extras/check_version.sh [version] -# -# example: ./extras/check_version.sh 0.52.1 -# -# When a version is provided, it is checked against the package version. -### - -OPENAPI_FILE="hathor/cli/openapi_files/openapi_base.json" -SRC_FILE="hathor/version.py" -PACKAGE_FILE="pyproject.toml" - -OPENAPI_VERSION=`grep "version\":" ${OPENAPI_FILE} | cut -d'"' -f4` -SRC_VERSION=`grep "BASE_VERSION =" ${SRC_FILE} | cut -d "'" -f2` -PACKAGE_VERSION=`grep '^version' ${PACKAGE_FILE} | cut -d '"' -f2` - -# For debugging: -# echo x${SRC_VERSION}x -# echo x${OPENAPI_VERSION}x -# echo x${PACKAGE_VERSION}x - -EXITCODE=0 - -if [[ x${PACKAGE_VERSION}x != x${SRC_VERSION}x ]]; then - echo "Version different in ${PACKAGE_FILE} and ${SRC_FILE}" - EXITCODE=-1 -fi - -if [[ x${PACKAGE_VERSION}x != x${OPENAPI_VERSION}x ]]; then - echo "Version different in ${PACKAGE_FILE} and ${OPENAPI_FILE}" - EXITCODE=-1 -fi - -# We expect an optional argument containing a version string to be checked against the others -if [[ $# -eq 1 ]]; then - if [[ x${PACKAGE_VERSION}x != x$1x ]]; then - echo "Version different in ${PACKAGE_FILE} and passed argument" - EXITCODE=-1 - fi -fi - -exit $EXITCODE diff --git a/extras/custom_checks.sh b/extras/custom_checks.sh new file mode 100644 index 000000000..9ae088714 --- /dev/null +++ b/extras/custom_checks.sh @@ -0,0 +1,112 @@ +#!/bin/bash + +# Define colors +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color + +# Source dirs +SOURCE_DIRS=(hathor tests) + +# Define your custom linter check functions here +# Each function should return 0 if everything is OK, and 1 if something is wrong. + +function check_version_match() { + # This function will check all source files containing the project version and return 1 in case + # they don't match. When a version is provided as an environment variable, it is checked against the package version. + + OPENAPI_FILE="hathor/cli/openapi_files/openapi_base.json" + SRC_FILE="hathor/version.py" + PACKAGE_FILE="pyproject.toml" + + OPENAPI_VERSION=`grep "version\":" ${OPENAPI_FILE} | cut -d'"' -f4` + SRC_VERSION=`grep "BASE_VERSION =" ${SRC_FILE} | cut -d "'" -f2` + PACKAGE_VERSION=`grep '^version' ${PACKAGE_FILE} | cut -d '"' -f2` + + # For debugging: + # echo x${SRC_VERSION}x + # echo x${OPENAPI_VERSION}x + # echo x${PACKAGE_VERSION}x + + EXITCODE=0 + + if [[ x${PACKAGE_VERSION}x != x${SRC_VERSION}x ]]; then + echo "Version different in ${PACKAGE_FILE} and ${SRC_FILE}" + EXITCODE=1 + fi + + if [[ x${PACKAGE_VERSION}x != x${OPENAPI_VERSION}x ]]; then + echo "Version different in ${PACKAGE_FILE} and ${OPENAPI_FILE}" + EXITCODE=1 + fi + + # We expect an optional environment variable containing a version string to be checked against the others + if [[ -n ${VERSION} ]]; then + if [[ x${PACKAGE_VERSION}x != x${VERSION}x ]]; then + echo "Version different in ${PACKAGE_FILE} and VERSION environment variable" + EXITCODE=1 + fi + fi + + return $EXITCODE +} + +function check_do_not_use_builtin_random_in_tests() { + # If the check fails, return 1 + # If the check passes, return 0 + exclude=( + hathor/merged_mining/debug_api.py + hathor/client.py + hathor/cli/tx_generator.py + ) + exclude_params=() + for item in "${exclude[@]}"; do + exclude_params+=(-not -path "*$item*") + done + if find "${SOURCE_DIRS[@]}" "${exclude_params[@]}" -type f -print0 | xargs -0 grep -l '\'; then + echo '"import random" found in the files above' + echo 'use `self.rng` or `hathor.util.Random` instead of `random`' + return 1 + fi + return 0 +} + +function check_deprecated_typing() { + if grep -R '\' "${SOURCE_DIRS[@]}"; then + echo 'do not use typing.List/Tuple/Dict/... for type annotations use builtin list/tuple/dict/... instead' + echo 'for more info check the PEP 585 doc: https://peps.python.org/pep-0585/' + return 1 + fi + return 0 +} + +# List of functions to be executed +checks=( + check_version_match + check_do_not_use_builtin_random_in_tests + check_deprecated_typing +) + +# Initialize a variable to track if any check fails +any_check_failed=0 + +# Loop over all checks +for check in "${checks[@]}"; do + $check + result=$? + if [ $result -ne 0 ]; then + echo -e "${RED}Check $check FAILED${NC}" + any_check_failed=1 + else + echo -e "${GREEN}Check $check PASSED${NC}" + fi +done + +# Exit with code 0 if no check failed, otherwise exit with code 1 +if [ $any_check_failed -eq 0 ]; then + echo -e "${GREEN}All checks PASSED${NC}" + exit 0 +else + echo -e "${RED}Some checks FAILED${NC}" + exit 1 +fi diff --git a/extras/gen_release_candidate_changes.py b/extras/gen_release_candidate_changes.py new file mode 100755 index 000000000..a6d85de1e --- /dev/null +++ b/extras/gen_release_candidate_changes.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python +""" +This script finds all PRs that have been merged into the `master` branch but not into the `release-candidate` branch in a given GitHub repository. + +Usage: + + ./extras/gen_release_candidate_changes.py + +Example output: + +``` +- #701 +- #697 +- #686 +``` +""" + +import yaml +import os +import requests + +BASE_API_URL = 'https://api.github.com' +REPO = 'HathorNetwork/hathor-core' + + +def get_gh_token(): + config_path = os.path.expanduser('~/.config/gh/hosts.yml') + + if not os.path.exists(config_path): + print("GitHub CLI configuration not found. Please authenticate with 'gh auth login'.") + exit(1) + + with open(config_path, 'r') as file: + config = yaml.safe_load(file) + + token = config['github.com']['oauth_token'] + return token + + +def get_headers(token): + return {'Authorization': f'token {token}'} + + +def get_commits_ahead(base, compare, token): + response = requests.get( + f'{BASE_API_URL}/repos/{REPO}/compare/{base}...{compare}', + headers=get_headers(token) + ) + data = response.json() + return [commit['sha'] for commit in data['commits']] + + +def get_pr_for_commit(commit, token): + response = requests.get( + f'{BASE_API_URL}/repos/{REPO}/commits/{commit}/pulls', + headers=get_headers(token), + params={'state': 'all'} + ) + data = response.json() + if data: + return data[0]['number'] + return None + + +def get_new_prs_in_master(token): + commits = get_commits_ahead('release-candidate', 'master', token) + prs = [] + for commit in commits: + pr = get_pr_for_commit(commit, token) + if pr and pr not in prs: + prs.append(pr) + return prs + + +if __name__ == '__main__': + token = get_gh_token() + prs = get_new_prs_in_master(token) + for pr in prs: + print(f'- #{pr}') diff --git a/extras/github/test_docker.py b/extras/github/test_docker.py index d52f0b8dc..eda6dbafc 100644 --- a/extras/github/test_docker.py +++ b/extras/github/test_docker.py @@ -17,7 +17,7 @@ def test_nightly_build_no_github_secret(self): 'GITHUB_EVENT_DEFAULT_BRANCH': 'master', 'GITHUB_EVENT_NUMBER': '', 'MATRIX_PYTHON_IMPL': 'python', - 'MATRIX_PYTHON_VERSION': '3.9', + 'MATRIX_PYTHON_VERSION': '3.10', 'SECRETS_DOCKERHUB_IMAGE': '', 'SECRETS_GHCR_IMAGE': '', }) @@ -32,7 +32,7 @@ def test_nightly_build_no_github_secret(self): output = prep_tags(os.environ, base_version, is_release_candidate) self.assertEqual(output['slack-notification-version'], base_version) - self.assertEqual(output['version'], base_version + '-python3.9') + self.assertEqual(output['version'], base_version + '-python3.10') self.assertEqual(output['login-dockerhub'], 'false') self.assertEqual(output['login-ghcr'], 'false') self.assertEqual(output['tags'], 'dont-push--local-only') @@ -47,7 +47,7 @@ def test_nightly_build(self): 'GITHUB_EVENT_DEFAULT_BRANCH': 'master', 'GITHUB_EVENT_NUMBER': '', 'MATRIX_PYTHON_IMPL': 'python', - 'MATRIX_PYTHON_VERSION': '3.9', + 'MATRIX_PYTHON_VERSION': '3.10', 'SECRETS_DOCKERHUB_IMAGE': 'mock_image', 'SECRETS_GHCR_IMAGE': '', }) @@ -62,12 +62,12 @@ def test_nightly_build(self): output = prep_tags(os.environ, base_version, is_release_candidate) self.assertEqual(output['slack-notification-version'], base_version) - self.assertEqual(output['version'], base_version + '-python3.9') + self.assertEqual(output['version'], base_version + '-python3.10') self.assertEqual(output['login-dockerhub'], 'true') self.assertEqual(output['login-ghcr'], 'false') self.assertEqual(len(output['tags'].split(',')), 2) self.assertIn('mock_image:nightly-55629a7d', output['tags'].split(',')) - self.assertIn('mock_image:nightly-55629a7d-python3.9', output['tags'].split(',')) + self.assertIn('mock_image:nightly-55629a7d-python3.10', output['tags'].split(',')) self.assertEqual(output['push'], 'true') self.assertEqual(output['dockerfile'], 'Dockerfile') @@ -110,7 +110,7 @@ def test_release_candidate_default_python(self): 'GITHUB_EVENT_DEFAULT_BRANCH': 'master', 'GITHUB_EVENT_NUMBER': '', 'MATRIX_PYTHON_IMPL': 'python', - 'MATRIX_PYTHON_VERSION': '3.9', + 'MATRIX_PYTHON_VERSION': '3.10', 'SECRETS_DOCKERHUB_IMAGE': 'mock_image', 'SECRETS_GHCR_IMAGE': '', }) @@ -140,7 +140,7 @@ def test_release_default_python(self): 'GITHUB_EVENT_DEFAULT_BRANCH': 'master', 'GITHUB_EVENT_NUMBER': '', 'MATRIX_PYTHON_IMPL': 'python', - 'MATRIX_PYTHON_VERSION': '3.9', + 'MATRIX_PYTHON_VERSION': '3.10', 'SECRETS_DOCKERHUB_IMAGE': 'mock_image', 'SECRETS_GHCR_IMAGE': '', }) @@ -155,12 +155,12 @@ def test_release_default_python(self): output = prep_tags(os.environ, base_version, is_release_candidate) self.assertEqual(output['slack-notification-version'], base_version) - self.assertEqual(output['version'], base_version + '-python3.9') + self.assertEqual(output['version'], base_version + '-python3.10') self.assertEqual(output['login-dockerhub'], 'true') self.assertEqual(output['login-ghcr'], 'false') self.assertEqual(len(output['tags'].split(',')), 4) - self.assertIn('mock_image:v0.53-python3.9', output['tags'].split(',')) - self.assertIn('mock_image:v0.53.0-python3.9', output['tags'].split(',')) + self.assertIn('mock_image:v0.53-python3.10', output['tags'].split(',')) + self.assertIn('mock_image:v0.53.0-python3.10', output['tags'].split(',')) self.assertIn('mock_image:v0.53.0', output['tags'].split(',')) self.assertIn('mock_image:latest', output['tags'].split(',')) self.assertEqual(output['push'], 'true') diff --git a/hathor/cli/events_simulator/events_simulator.py b/hathor/cli/events_simulator/events_simulator.py index 7d09f1cfa..d4beea9de 100644 --- a/hathor/cli/events_simulator/events_simulator.py +++ b/hathor/cli/events_simulator/events_simulator.py @@ -22,22 +22,29 @@ def create_parser() -> ArgumentParser: from hathor.cli.util import create_parser parser = create_parser() - possible_scenarios = [scenario.value for scenario in Scenario] + possible_scenarios = [scenario.name for scenario in Scenario] - parser.add_argument('--scenario', help=f'One of {possible_scenarios}', type=Scenario, required=True) + parser.add_argument('--scenario', help=f'One of {possible_scenarios}', type=str, required=True) parser.add_argument('--port', help='Port to run the WebSocket server', type=int, default=DEFAULT_PORT) return parser def execute(args: Namespace) -> None: + from hathor.cli.events_simulator.scenario import Scenario from hathor.event.storage import EventMemoryStorage from hathor.event.websocket import EventWebsocketFactory from hathor.util import reactor + try: + scenario = Scenario[args.scenario] + except KeyError as e: + possible_scenarios = [scenario.name for scenario in Scenario] + raise ValueError(f'Invalid scenario "{args.scenario}". Choose one of {possible_scenarios}') from e + storage = EventMemoryStorage() - for event in args.scenario.value: + for event in scenario.value: storage.save_event(event) factory = EventWebsocketFactory(reactor, storage) diff --git a/hathor/cli/run_node.py b/hathor/cli/run_node.py index f4b74eb04..00cbebe0e 100644 --- a/hathor/cli/run_node.py +++ b/hathor/cli/run_node.py @@ -23,6 +23,7 @@ from hathor.cli.run_node_args import RunNodeArgs from hathor.conf import TESTNET_SETTINGS_FILEPATH, HathorSettings from hathor.exception import PreInitializationError +from hathor.feature_activation.feature import Feature logger = get_logger() # LOGGING_CAPTURE_STDOUT = True @@ -112,6 +113,11 @@ def create_parser(cls) -> ArgumentParser: parser.add_argument('--peer-id-blacklist', action='extend', default=[], nargs='+', type=str, help='Peer IDs to forbid connection') parser.add_argument('--config-yaml', type=str, help='Configuration yaml filepath') + possible_features = [feature.value for feature in Feature] + parser.add_argument('--signal-support', default=[], action='append', choices=possible_features, + help=f'Signal support for a feature. One of {possible_features}') + parser.add_argument('--signal-not-support', default=[], action='append', choices=possible_features, + help=f'Signal not support for a feature. One of {possible_features}') return parser def prepare(self, *, register_resources: bool = True) -> None: diff --git a/hathor/cli/run_node_args.py b/hathor/cli/run_node_args.py index ffc0eb044..eb9ddcd0c 100644 --- a/hathor/cli/run_node_args.py +++ b/hathor/cli/run_node_args.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional +from typing import Optional from pydantic import Extra +from hathor.feature_activation.feature import Feature from hathor.utils.pydantic import BaseModel @@ -32,8 +33,8 @@ class RunNodeArgs(BaseModel, extra=Extra.allow): dns: Optional[str] peer: Optional[str] sysctl: Optional[str] - listen: List[str] - bootstrap: Optional[List[str]] + listen: list[str] + bootstrap: Optional[list[str]] status: Optional[int] stratum: Optional[int] data: Optional[str] @@ -68,5 +69,7 @@ class RunNodeArgs(BaseModel, extra=Extra.allow): x_localhost_only: bool x_rocksdb_indexes: bool x_enable_event_queue: bool - peer_id_blacklist: List[str] + peer_id_blacklist: list[str] config_yaml: Optional[str] + signal_support: set[Feature] + signal_not_support: set[Feature] diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index 3f4a9309c..884c4e8e0 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -46,6 +46,10 @@ class HathorSettings(NamedTuple): # enable peer whitelist ENABLE_PEER_WHITELIST: bool = False + # weather to use the whitelist with sync-v2 peers, does not affect whether the whitelist is enabled or not, it will + # always be enabled for sync-v1 if it is enabled + USE_PEER_WHITELIST_ON_SYNC_V2: bool = True + DECIMAL_PLACES: int = DECIMAL_PLACES # Genesis pre-mined tokens @@ -343,6 +347,7 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: # Capabilities CAPABILITY_WHITELIST: str = 'whitelist' CAPABILITY_SYNC_VERSION: str = 'sync-version' + CAPABILITY_GET_BEST_BLOCKCHAIN: str = 'get-best-blockchain' # Where to download whitelist from WHITELIST_URL: Optional[str] = None @@ -393,6 +398,15 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: # Maximum number of GET_TIPS delayed calls per connection while running sync. MAX_GET_TIPS_DELAYED_CALLS: int = 5 + # Maximum number of blocks in the best blockchain list. + MAX_BEST_BLOCKCHAIN_BLOCKS: int = 20 + + # Default number of blocks in the best blockchain list. + DEFAULT_BEST_BLOCKCHAIN_BLOCKS: int = 10 + + # Time in seconds to request the best blockchain from peers. + BEST_BLOCKCHAIN_INTERVAL: int = 5 # seconds + @classmethod def from_yaml(cls, *, filepath: str) -> 'HathorSettings': """Takes a filepath to a yaml file and returns a validated HathorSettings instance.""" diff --git a/hathor/consensus/consensus.py b/hathor/consensus/consensus.py index 6391a8470..185288751 100644 --- a/hathor/consensus/consensus.py +++ b/hathor/consensus/consensus.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Set - from structlog import get_logger from hathor.conf import HathorSettings @@ -57,7 +55,7 @@ class ConsensusAlgorithm: b0 will not be propagated to the voided_by of b1, b2, and b3. """ - def __init__(self, soft_voided_tx_ids: Set[bytes], pubsub: PubSubManager) -> None: + def __init__(self, soft_voided_tx_ids: set[bytes], pubsub: PubSubManager) -> None: self.log = logger.new() self._pubsub = pubsub self.soft_voided_tx_ids = frozenset(soft_voided_tx_ids) @@ -72,10 +70,11 @@ def create_context(self) -> ConsensusAlgorithmContext: def update(self, base: BaseTransaction) -> None: assert base.storage is not None assert base.storage.is_only_valid_allowed() + meta = base.get_metadata() + assert meta.validation.is_valid() try: self._unsafe_update(base) except Exception: - meta = base.get_metadata() meta.add_voided_by(settings.CONSENSUS_FAIL_ID) assert base.storage is not None base.storage.save_transaction(base, only_metadata=True) @@ -146,7 +145,7 @@ def _unsafe_update(self, base: BaseTransaction) -> None: if context.reorg_common_block is not None: context.pubsub.publish(HathorEvents.REORG_FINISHED) - def filter_out_soft_voided_entries(self, tx: BaseTransaction, voided_by: Set[bytes]) -> Set[bytes]: + def filter_out_soft_voided_entries(self, tx: BaseTransaction, voided_by: set[bytes]) -> set[bytes]: if not (self.soft_voided_tx_ids & voided_by): return voided_by ret = set() @@ -162,7 +161,7 @@ def filter_out_soft_voided_entries(self, tx: BaseTransaction, voided_by: Set[byt assert tx.storage is not None tx3 = tx.storage.get_transaction(h) tx3_meta = tx3.get_metadata() - tx3_voided_by: Set[bytes] = tx3_meta.voided_by or set() + tx3_voided_by: set[bytes] = tx3_meta.voided_by or set() if not (self.soft_voided_tx_ids & tx3_voided_by): ret.add(h) return ret diff --git a/hathor/consensus/context.py b/hathor/consensus/context.py index 85ce65536..0e74737ae 100644 --- a/hathor/consensus/context.py +++ b/hathor/consensus/context.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Optional, Set +from typing import TYPE_CHECKING, Optional from structlog import get_logger @@ -41,7 +41,7 @@ class ConsensusAlgorithmContext: pubsub: PubSubManager block_algorithm: 'BlockConsensusAlgorithm' transaction_algorithm: 'TransactionConsensusAlgorithm' - txs_affected: Set[BaseTransaction] + txs_affected: set[BaseTransaction] reorg_common_block: Optional[Block] def __init__(self, consensus: 'ConsensusAlgorithm', pubsub: PubSubManager) -> None: diff --git a/hathor/event/websocket/factory.py b/hathor/event/websocket/factory.py index 6f7a8b1f5..f13a425f6 100644 --- a/hathor/event/websocket/factory.py +++ b/hathor/event/websocket/factory.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Set +from typing import Optional from autobahn.twisted.websocket import WebSocketServerFactory from structlog import get_logger @@ -42,7 +42,7 @@ def __init__(self, reactor: Reactor, event_storage: EventStorage): self.log = logger.new() self._reactor = reactor self._event_storage = event_storage - self._connections: Set[EventWebsocketProtocol] = set() + self._connections: set[EventWebsocketProtocol] = set() latest_event = self._event_storage.get_last_event() diff --git a/hathor/feature_activation/bit_signaling_service.py b/hathor/feature_activation/bit_signaling_service.py new file mode 100644 index 000000000..88a1d38b4 --- /dev/null +++ b/hathor/feature_activation/bit_signaling_service.py @@ -0,0 +1,147 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from structlog import get_logger + +from hathor.feature_activation.feature import Feature +from hathor.feature_activation.feature_service import FeatureService +from hathor.feature_activation.model.criteria import Criteria +from hathor.feature_activation.model.feature_state import FeatureState +from hathor.feature_activation.settings import Settings as FeatureSettings +from hathor.transaction import Block +from hathor.transaction.storage import TransactionStorage + +logger = get_logger() + + +class BitSignalingService: + __slots__ = ( + '_log', + '_feature_settings', + '_feature_service', + '_tx_storage', + '_support_features', + '_not_support_features' + ) + + def __init__( + self, + *, + feature_settings: FeatureSettings, + feature_service: FeatureService, + tx_storage: TransactionStorage, + support_features: set[Feature], + not_support_features: set[Feature] + ) -> None: + self._log = logger.new() + self._feature_settings = feature_settings + self._feature_service = feature_service + self._tx_storage = tx_storage + self._support_features = support_features + self._not_support_features = not_support_features + + self._validate_support_intersection() + + def start(self) -> None: + best_block = self._tx_storage.get_best_block() + + self._warn_non_signaling_features(best_block) + self._log_feature_signals(best_block) + + def generate_signal_bits(self, *, block: Block, log: bool = False) -> int: + """ + Generate signal bits considering a given block. The block is used to determine which features are currently in + a signaling period. + + Args: + block: the block that is used to determine signaling features. + log: whether to log the signal for each feature. + + Returns: a number that represents the signal bits in binary. + """ + signaling_features = self._get_signaling_features(block) + signal_bits = 0 + + for feature, criteria in signaling_features.items(): + default_enable_bit = criteria.signal_support_by_default + support = feature in self._support_features + not_support = feature in self._not_support_features + enable_bit = (default_enable_bit or support) and not not_support + + if log: + self._log_signal_bits(feature, enable_bit, support, not_support) + + signal_bits |= int(enable_bit) << criteria.bit + + return signal_bits + + def _log_signal_bits(self, feature: Feature, enable_bit: bool, support: bool, not_support: bool) -> None: + """Generate info log for a feature's signal.""" + signal = 'enabled' if enable_bit else 'disabled' + reason = 'using default feature signal' + + if support: + reason = 'user signaled support' + + if not_support: + reason = 'user signaled not support' + + self._log.info( + 'Configuring support signal for feature.', + feature=feature.value, + signal=signal, + reason=reason + ) + + def _get_signaling_features(self, block: Block) -> dict[Feature, Criteria]: + """Given a specific block, return all features that are in a signaling state for that block.""" + feature_descriptions = self._feature_service.get_bits_description(block=block) + signaling_features = { + feature: description.criteria + for feature, description in feature_descriptions.items() + if description.state in FeatureState.get_signaling_states() + } + + assert len(signaling_features) <= self._feature_settings.max_signal_bits, ( + 'Invalid state. Signaling more features than the allowed maximum.' + ) + + return signaling_features + + def _validate_support_intersection(self) -> None: + """Validate that the provided support and not-support arguments do not conflict.""" + if intersection := self._support_features.intersection(self._not_support_features): + feature_names = [feature.value for feature in intersection] + raise ValueError(f'Cannot signal both "support" and "not support" for features {feature_names}') + + def _warn_non_signaling_features(self, best_block: Block) -> None: + """Generate a warning log if any signaled features are currently not in a signaling state.""" + currently_signaling_features = self._get_signaling_features(best_block) + signaled_features = self._support_features.union(self._not_support_features) + + if non_signaling_features := signaled_features.difference(currently_signaling_features): + feature_names = {feature.value for feature in non_signaling_features} + self._log.warn( + 'Considering the current best block, there are signaled features outside their signaling period. ' + 'Therefore, signaling for them has no effect. Make sure you are signaling for the desired features.', + best_block_hash=best_block.hash_hex, + best_block_height=best_block.get_height(), + non_signaling_features=feature_names + ) + + def _log_feature_signals(self, best_block: Block) -> None: + """Generate info logs for each feature's current signal.""" + signal_bits = self.generate_signal_bits(block=best_block, log=True) + + self._log.debug(f'Configured signal bits: {bin(signal_bits)[2:]}') diff --git a/hathor/feature_activation/feature.py b/hathor/feature_activation/feature.py index 3c626f46f..9cfd99ec9 100644 --- a/hathor/feature_activation/feature.py +++ b/hathor/feature_activation/feature.py @@ -25,3 +25,4 @@ class Feature(Enum): NOP_FEATURE_1 = 'NOP_FEATURE_1' NOP_FEATURE_2 = 'NOP_FEATURE_2' + NOP_FEATURE_3 = 'NOP_FEATURE_3' diff --git a/hathor/feature_activation/feature_service.py b/hathor/feature_activation/feature_service.py index b45cc717c..9d3d82c28 100644 --- a/hathor/feature_activation/feature_service.py +++ b/hathor/feature_activation/feature_service.py @@ -50,6 +50,7 @@ def get_state(self, *, block: Block, feature: Feature) -> FeatureState: offset_to_boundary = height % self._feature_settings.evaluation_interval offset_to_previous_boundary = offset_to_boundary or self._feature_settings.evaluation_interval previous_boundary_height = height - offset_to_previous_boundary + assert previous_boundary_height >= 0 previous_boundary_block = self._get_ancestor_at_height(block=block, height=previous_boundary_height) previous_boundary_state = self.get_state(block=previous_boundary_block, feature=feature) @@ -167,6 +168,7 @@ def _get_ancestor_iteratively(*, block: Block, ancestor_height: int) -> Block: """Given a block, returns its ancestor at a specific height by iterating over its ancestors. This is slow.""" # TODO: there are further optimizations to be done here, the latest common block height could be persisted in # metadata, so we could still use the height index if the requested height is before that height. + assert ancestor_height >= 0 ancestor = block while ancestor.get_height() > ancestor_height: ancestor = ancestor.get_block_parent() diff --git a/hathor/feature_activation/model/criteria.py b/hathor/feature_activation/model/criteria.py index 27313c112..c70b76b3f 100644 --- a/hathor/feature_activation/model/criteria.py +++ b/hathor/feature_activation/model/criteria.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Any, ClassVar, Optional +from typing import TYPE_CHECKING, Any, Optional from pydantic import Field, NonNegativeInt, validator @@ -27,10 +27,12 @@ class Criteria(BaseModel, validate_all=True): """ Represents the configuration for a certain feature activation criteria. + Note: the to_validated() method must be called to perform all attribute validations. + Attributes: - evaluation_interval: the number of blocks in the feature activation evaluation interval. Class variable. + evaluation_interval: the number of blocks in the feature activation evaluation interval. - max_signal_bits: the number of bits used in the first byte of a block's version field. Class variable. + max_signal_bits: the number of bits used in the first byte of a block's version field. bit: which bit in the version field of the block is going to be used to signal the feature support by miners. @@ -46,9 +48,11 @@ class Criteria(BaseModel, validate_all=True): the timeout_height is reached, effectively forcing activation. version: the client version of hathor-core at which this feature was defined. + + signal_support_by_default: the default miner support signal for this feature. """ - evaluation_interval: ClassVar[Optional[int]] = None - max_signal_bits: ClassVar[Optional[int]] = None + evaluation_interval: Optional[int] = None + max_signal_bits: Optional[int] = None bit: NonNegativeInt start_height: NonNegativeInt @@ -57,30 +61,53 @@ class Criteria(BaseModel, validate_all=True): minimum_activation_height: NonNegativeInt = 0 lock_in_on_timeout: bool = False version: str = Field(..., regex=version.BUILD_VERSION_REGEX) + signal_support_by_default: bool = False + + def to_validated(self, evaluation_interval: int, max_signal_bits: int) -> 'ValidatedCriteria': + """Create a validated version of self, including attribute validations that have external dependencies.""" + return ValidatedCriteria( + evaluation_interval=evaluation_interval, + max_signal_bits=max_signal_bits, + bit=self.bit, + start_height=self.start_height, + timeout_height=self.timeout_height, + threshold=self.threshold, + minimum_activation_height=self.minimum_activation_height, + lock_in_on_timeout=self.lock_in_on_timeout, + version=self.version, + signal_support_by_default=self.signal_support_by_default + ) def get_threshold(self, feature_settings: 'FeatureSettings') -> int: """Returns the configured threshold, or the default threshold if it is None.""" return self.threshold if self.threshold is not None else feature_settings.default_threshold + +class ValidatedCriteria(Criteria): + """ + Wrapper class for Criteria that holds its field validations. Can be created using Criteria.to_validated(). + """ @validator('bit') - def _validate_bit(cls, bit: int) -> int: + def _validate_bit(cls, bit: int, values: dict[str, Any]) -> int: """Validates that the bit is lower than the max_signal_bits.""" - assert Criteria.max_signal_bits is not None, 'Criteria.max_signal_bits must be set' + max_signal_bits = values.get('max_signal_bits') + assert max_signal_bits is not None, 'max_signal_bits must be set' - if bit >= Criteria.max_signal_bits: - raise ValueError(f'bit must be lower than max_signal_bits: {bit} >= {Criteria.max_signal_bits}') + if bit >= max_signal_bits: + raise ValueError(f'bit must be lower than max_signal_bits: {bit} >= {max_signal_bits}') return bit @validator('timeout_height') def _validate_timeout_height(cls, timeout_height: int, values: dict[str, Any]) -> int: """Validates that the timeout_height is greater than the start_height.""" - assert Criteria.evaluation_interval is not None, 'Criteria.evaluation_interval must be set' + evaluation_interval = values.get('evaluation_interval') + assert evaluation_interval is not None, 'evaluation_interval must be set' start_height = values.get('start_height') assert start_height is not None, 'start_height must be set' - minimum_timeout_height = start_height + 2 * Criteria.evaluation_interval + minimum_timeout_height = start_height + 2 * evaluation_interval if timeout_height < minimum_timeout_height: raise ValueError(f'timeout_height must be at least two evaluation intervals after the start_height: ' @@ -89,25 +116,27 @@ def _validate_timeout_height(cls, timeout_height: int, values: dict[str, Any]) - return timeout_height @validator('threshold') - def _validate_threshold(cls, threshold: Optional[int]) -> Optional[int]: + def _validate_threshold(cls, threshold: Optional[int], values: dict[str, Any]) -> Optional[int]: """Validates that the threshold is not greater than the evaluation_interval.""" - assert Criteria.evaluation_interval is not None, 'Criteria.evaluation_interval must be set' + evaluation_interval = values.get('evaluation_interval') + assert evaluation_interval is not None, 'evaluation_interval must be set' - if threshold is not None and threshold > Criteria.evaluation_interval: + if threshold is not None and threshold > evaluation_interval: raise ValueError( - f'threshold must not be greater than evaluation_interval: {threshold} > {Criteria.evaluation_interval}' + f'threshold must not be greater than evaluation_interval: {threshold} > {evaluation_interval}' ) return threshold @validator('start_height', 'timeout_height', 'minimum_activation_height') - def _validate_evaluation_interval_multiple(cls, value: int) -> int: + def _validate_evaluation_interval_multiple(cls, value: int, values: dict[str, Any]) -> int: """Validates that the value is a multiple of evaluation_interval.""" - assert Criteria.evaluation_interval is not None, 'Criteria.evaluation_interval must be set' + evaluation_interval = values.get('evaluation_interval') + assert evaluation_interval is not None, 'evaluation_interval must be set' - if value % Criteria.evaluation_interval != 0: + if value % evaluation_interval != 0: raise ValueError( - f'Should be a multiple of evaluation_interval: {value} % {Criteria.evaluation_interval} != 0' + f'Should be a multiple of evaluation_interval: {value} % {evaluation_interval} != 0' ) return value diff --git a/hathor/feature_activation/model/feature_state.py b/hathor/feature_activation/model/feature_state.py index 78c94f8a9..bb781f5eb 100644 --- a/hathor/feature_activation/model/feature_state.py +++ b/hathor/feature_activation/model/feature_state.py @@ -22,6 +22,8 @@ class FeatureState(Enum): Attributes: DEFINED: Represents that a feature is defined. It's the first state for each feature. STARTED: Represents that the activation process for some feature is started. + MUST_SIGNAL: Represents that a feature is going to be locked-in, and that miners must signal support for it. + LOCKED_IN: Represents that a feature is going to be activated. ACTIVE: Represents that a certain feature is activated. FAILED: Represents that a certain feature is not and will never be activated. """ @@ -32,3 +34,11 @@ class FeatureState(Enum): LOCKED_IN = 'LOCKED_IN' ACTIVE = 'ACTIVE' FAILED = 'FAILED' + + @staticmethod + def get_signaling_states() -> set['FeatureState']: + """ + Return the states for which a feature is considered in its signaling period, that is, voting to either + support it or not through bit signals is valid during those states. + """ + return {FeatureState.STARTED, FeatureState.MUST_SIGNAL, FeatureState.LOCKED_IN} diff --git a/hathor/feature_activation/resources/feature.py b/hathor/feature_activation/resources/feature.py index 1774e3caf..58c8070b4 100644 --- a/hathor/feature_activation/resources/feature.py +++ b/hathor/feature_activation/resources/feature.py @@ -22,8 +22,9 @@ from hathor.feature_activation.feature_service import FeatureService from hathor.feature_activation.model.feature_state import FeatureState from hathor.feature_activation.settings import Settings as FeatureSettings +from hathor.transaction import Block from hathor.transaction.storage import TransactionStorage -from hathor.utils.api import Response +from hathor.utils.api import ErrorResponse, QueryParams, Response @register_resource @@ -48,6 +49,45 @@ def render_GET(self, request: Request) -> bytes: request.setHeader(b'content-type', b'application/json; charset=utf-8') set_cors(request, 'GET') + if request.args: + return self.get_block_features(request) + + return self.get_features() + + def get_block_features(self, request: Request) -> bytes: + params = GetBlockFeaturesParams.from_request(request) + + if isinstance(params, ErrorResponse): + return params.json_dumpb() + + block_hash = bytes.fromhex(params.block) + block = self.tx_storage.get_transaction(block_hash) + + if not isinstance(block, Block): + error = ErrorResponse(error=f"Hash '{params.block}' is not a Block.") + return error.json_dumpb() + + signal_bits = [] + feature_descriptions = self._feature_service.get_bits_description(block=block) + + for feature, description in feature_descriptions.items(): + if description.state not in FeatureState.get_signaling_states(): + continue + + block_feature = GetBlockFeatureResponse( + bit=description.criteria.bit, + signal=block.get_feature_activation_bit_value(description.criteria.bit), + feature=feature, + feature_state=description.state.name + ) + + signal_bits.append(block_feature) + + response = GetBlockFeaturesResponse(signal_bits=signal_bits) + + return response.json_dumpb() + + def get_features(self) -> bytes: best_block = self.tx_storage.get_best_block() bit_counts = best_block.get_feature_activation_bit_counts() features = [] @@ -85,6 +125,21 @@ def render_GET(self, request: Request) -> bytes: return response.json_dumpb() +class GetBlockFeaturesParams(QueryParams): + block: str + + +class GetBlockFeatureResponse(Response, use_enum_values=True): + bit: int + signal: int + feature: Feature + feature_state: str + + +class GetBlockFeaturesResponse(Response): + signal_bits: list[GetBlockFeatureResponse] + + class GetFeatureResponse(Response, use_enum_values=True): name: Feature state: str diff --git a/hathor/feature_activation/settings.py b/hathor/feature_activation/settings.py index be6a407bb..f9505db12 100644 --- a/hathor/feature_activation/settings.py +++ b/hathor/feature_activation/settings.py @@ -41,18 +41,6 @@ class Settings(BaseModel, validate_all=True): # neither their values changed, to preserve history. features: dict[Feature, Criteria] = {} - @validator('evaluation_interval') - def _process_evaluation_interval(cls, evaluation_interval: int) -> int: - """Sets the evaluation_interval on Criteria.""" - Criteria.evaluation_interval = evaluation_interval - return evaluation_interval - - @validator('max_signal_bits') - def _process_max_signal_bits(cls, max_signal_bits: int) -> int: - """Sets the max_signal_bits on Criteria.""" - Criteria.max_signal_bits = max_signal_bits - return max_signal_bits - @validator('default_threshold') def _validate_default_threshold(cls, default_threshold: int, values: dict[str, Any]) -> int: """Validates that the default_threshold is not greater than the evaluation_interval.""" @@ -67,6 +55,19 @@ def _validate_default_threshold(cls, default_threshold: int, values: dict[str, A return default_threshold + @validator('features') + def _validate_features(cls, features: dict[Feature, Criteria], values: dict[str, Any]) -> dict[Feature, Criteria]: + """Validate Criteria by calling its to_validated() method, injecting the necessary attributes.""" + evaluation_interval = values.get('evaluation_interval') + max_signal_bits = values.get('max_signal_bits') + assert evaluation_interval is not None, 'evaluation_interval must be set' + assert max_signal_bits is not None, 'max_signal_bits must be set' + + return { + feature: criteria.to_validated(evaluation_interval, max_signal_bits) + for feature, criteria in features.items() + } + @validator('features') def _validate_conflicting_bits(cls, features: dict[Feature, Criteria]) -> dict[Feature, Criteria]: """ diff --git a/hathor/indexes/deps_index.py b/hathor/indexes/deps_index.py index ef2654fe3..fc8d56a87 100644 --- a/hathor/indexes/deps_index.py +++ b/hathor/indexes/deps_index.py @@ -118,10 +118,7 @@ def get_scope(self) -> Scope: return SCOPE def init_loop_step(self, tx: BaseTransaction) -> None: - tx_meta = tx.get_metadata() - if tx_meta.voided_by: - return - self.add_tx(tx, partial=False) + self.add_tx(tx) def update(self, tx: BaseTransaction) -> None: assert tx.hash is not None @@ -193,6 +190,6 @@ def remove_from_needed_index(self, tx: bytes) -> None: raise NotImplementedError @abstractmethod - def get_next_needed_tx(self) -> bytes: - """Choose the start hash for downloading the needed txs""" + def iter_next_needed_txs(self) -> Iterator[bytes]: + """Iterate over the next needed transactions.""" raise NotImplementedError diff --git a/hathor/indexes/height_index.py b/hathor/indexes/height_index.py index 34b775497..167787e69 100644 --- a/hathor/indexes/height_index.py +++ b/hathor/indexes/height_index.py @@ -19,6 +19,7 @@ from hathor.indexes.scope import Scope from hathor.transaction import BaseTransaction, Block from hathor.transaction.genesis import BLOCK_GENESIS +from hathor.types import VertexId from hathor.util import not_none SCOPE = Scope( @@ -34,6 +35,12 @@ class IndexEntry(NamedTuple): timestamp: int +class HeightInfo(NamedTuple): + """Used by a few methods to represent a (height, hash) tuple.""" + height: int + id: VertexId + + BLOCK_GENESIS_ENTRY: IndexEntry = IndexEntry(not_none(BLOCK_GENESIS.hash), BLOCK_GENESIS.timestamp) @@ -84,11 +91,19 @@ def get_tip(self) -> bytes: raise NotImplementedError @abstractmethod - def get_height_tip(self) -> tuple[int, bytes]: + def get_height_tip(self) -> HeightInfo: """ Return the best block height and hash, it returns the genesis when there is no other block """ raise NotImplementedError + @abstractmethod + def get_n_height_tips(self, n_blocks: int) -> list[HeightInfo]: + """ Return the n best block height and hash list, it returns the genesis when there is no other block + + The returned list starts at the highest block and goes down in reverse height order. + """ + raise NotImplementedError + def update_new_chain(self, height: int, block: Block) -> None: """ When we have a new winner chain we must update all the height index until the first height with a common block diff --git a/hathor/indexes/memory_deps_index.py b/hathor/indexes/memory_deps_index.py index b596ef98a..8d9d74a9b 100644 --- a/hathor/indexes/memory_deps_index.py +++ b/hathor/indexes/memory_deps_index.py @@ -34,6 +34,9 @@ class MemoryDepsIndex(DepsIndex): _txs_with_deps_ready: set[bytes] # Next to be downloaded + # - Key: hash of the tx to be downloaded + # - Value[0]: height + # - Value[1]: hash of the tx waiting for the download _needed_txs_index: dict[bytes, tuple[int, bytes]] def __init__(self): @@ -49,10 +52,11 @@ def force_clear(self) -> None: self._needed_txs_index = {} def add_tx(self, tx: BaseTransaction, partial: bool = True) -> None: - assert tx.hash is not None - assert tx.storage is not None validation = tx.get_metadata().validation if validation.is_fully_connected(): + # discover if new txs are ready because of this tx + self._update_new_deps_ready(tx) + # finally remove from rev deps self._del_from_deps_index(tx) elif not partial: raise ValueError('partial=False will only accept fully connected transactions') @@ -63,6 +67,19 @@ def add_tx(self, tx: BaseTransaction, partial: bool = True) -> None: def del_tx(self, tx: BaseTransaction) -> None: self._del_from_deps_index(tx) + def _update_new_deps_ready(self, tx: BaseTransaction) -> None: + """Go over the reverse dependencies of tx and check if any of them are now ready to be validated. + + This is also idempotent. + """ + assert tx.hash is not None + assert tx.storage is not None + for candidate_hash in self._rev_dep_index.get(tx.hash, []): + with tx.storage.allow_partially_validated_context(): + candidate_tx = tx.storage.get_transaction(candidate_hash) + if candidate_tx.is_ready_for_validation(): + self._txs_with_deps_ready.add(candidate_hash) + def _add_deps(self, tx: BaseTransaction) -> None: """This method is idempotent, because self.update needs it to be indempotent.""" assert tx.hash is not None @@ -94,7 +111,9 @@ def next_ready_for_validation(self, tx_storage: 'TransactionStorage', *, dry_run else: cur_ready, self._txs_with_deps_ready = self._txs_with_deps_ready, set() while cur_ready: - yield from sorted(cur_ready, key=lambda tx_hash: tx_storage.get_transaction(tx_hash).timestamp) + with tx_storage.allow_partially_validated_context(): + sorted_cur_ready = sorted(cur_ready, key=lambda tx_hash: tx_storage.get_transaction(tx_hash).timestamp) + yield from sorted_cur_ready if dry_run: cur_ready = self._txs_with_deps_ready - cur_ready else: @@ -113,7 +132,8 @@ def _get_rev_deps(self, tx: bytes) -> frozenset[bytes]: def known_children(self, tx: BaseTransaction) -> list[bytes]: assert tx.hash is not None assert tx.storage is not None - it_rev_deps = map(tx.storage.get_transaction, self._get_rev_deps(tx.hash)) + with tx.storage.allow_partially_validated_context(): + it_rev_deps = map(tx.storage.get_transaction, self._get_rev_deps(tx.hash)) return [not_none(rev.hash) for rev in it_rev_deps if tx.hash in rev.parents] # needed-txs-index methods: @@ -127,18 +147,13 @@ def is_tx_needed(self, tx: bytes) -> bool: def remove_from_needed_index(self, tx: bytes) -> None: self._needed_txs_index.pop(tx, None) - def get_next_needed_tx(self) -> bytes: - # This strategy maximizes the chance to download multiple txs on the same stream - # find the tx with highest "height" - # XXX: we could cache this onto `needed_txs` so we don't have to fetch txs every time - # TODO: improve this by using some sorted data structure to make this better than O(n) - height, start_hash, tx = max((h, s, t) for t, (h, s) in self._needed_txs_index.items()) - self.log.debug('next needed tx start', needed=len(self._needed_txs_index), start=start_hash.hex(), - height=height, needed_tx=tx.hex()) - return start_hash + def iter_next_needed_txs(self) -> Iterator[bytes]: + for tx_hash, _ in self._needed_txs_index.items(): + yield tx_hash def _add_needed(self, tx: BaseTransaction) -> None: """This method is idempotent, because self.update needs it to be indempotent.""" + assert tx.hash is not None assert tx.storage is not None tx_storage = tx.storage @@ -147,9 +162,14 @@ def _add_needed(self, tx: BaseTransaction) -> None: # get_all_dependencies is needed to ensure that we get the inputs that aren't reachable through parents alone, # this can happen for inputs that have not been confirmed as of the block the confirms the block or transaction # that we're adding the dependencies of - for tx_hash in tx.get_all_dependencies(): + for dep_hash in tx.get_all_dependencies(): # It may happen that we have one of the dependencies already, so just add the ones we don't # have. We should add at least one dependency, otherwise this tx should be full validated - if not tx_storage.transaction_exists(tx_hash): - self.log.debug('tx parent is needed', tx=tx_hash.hex()) - self._needed_txs_index[tx_hash] = (height, not_none(tx.hash)) + with tx_storage.allow_partially_validated_context(): + tx_exists = tx_storage.transaction_exists(dep_hash) + if not tx_exists: + self.log.debug('tx parent is needed', tx=dep_hash.hex()) + self._needed_txs_index[dep_hash] = (height, not_none(tx.hash)) + + # also, remove the given transaction from needed, because we already have it + self._needed_txs_index.pop(tx.hash, None) diff --git a/hathor/indexes/memory_height_index.py b/hathor/indexes/memory_height_index.py index 7040ce10d..db1ec4cc9 100644 --- a/hathor/indexes/memory_height_index.py +++ b/hathor/indexes/memory_height_index.py @@ -14,7 +14,7 @@ from typing import Optional -from hathor.indexes.height_index import BLOCK_GENESIS_ENTRY, HeightIndex, IndexEntry +from hathor.indexes.height_index import BLOCK_GENESIS_ENTRY, HeightIndex, HeightInfo, IndexEntry class MemoryHeightIndex(HeightIndex): @@ -68,6 +68,15 @@ def get(self, height: int) -> Optional[bytes]: def get_tip(self) -> bytes: return self._index[-1].hash - def get_height_tip(self) -> tuple[int, bytes]: + def get_height_tip(self) -> HeightInfo: height = len(self._index) - 1 - return height, self._index[height].hash + return HeightInfo(height, self._index[height].hash) + + def get_n_height_tips(self, n_blocks: int) -> list[HeightInfo]: + if n_blocks < 1: + raise ValueError('n_blocks must be a positive, non-zero, integer') + # highest height that is included, will be the first element + h_high = len(self._index) - 1 + # lowest height that is not included, -1 if it reaches the genesis + h_low = max(h_high - n_blocks, -1) + return [HeightInfo(h, self._index[h].hash) for h in range(h_high, h_low, -1)] diff --git a/hathor/indexes/memory_mempool_tips_index.py b/hathor/indexes/memory_mempool_tips_index.py index 9a8fd7c9f..564ad3bf6 100644 --- a/hathor/indexes/memory_mempool_tips_index.py +++ b/hathor/indexes/memory_mempool_tips_index.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterable, Optional, Set +from typing import Iterable, Optional from structlog import get_logger @@ -22,7 +22,7 @@ class MemoryMempoolTipsIndex(ByteCollectionMempoolTipsIndex): - _index: Set[bytes] + _index: set[bytes] def __init__(self): self.log = logger.new() diff --git a/hathor/indexes/memory_tx_group_index.py b/hathor/indexes/memory_tx_group_index.py index 39c0f9470..5b8415905 100644 --- a/hathor/indexes/memory_tx_group_index.py +++ b/hathor/indexes/memory_tx_group_index.py @@ -14,7 +14,7 @@ from abc import abstractmethod from collections import defaultdict -from typing import Iterable, Set, Sized, TypeVar +from typing import Iterable, Sized, TypeVar from structlog import get_logger @@ -31,7 +31,7 @@ class MemoryTxGroupIndex(TxGroupIndex[KT]): """Memory implementation of the TxGroupIndex. This class is abstract and cannot be used directly. """ - index: defaultdict[KT, Set[bytes]] + index: defaultdict[KT, set[bytes]] def __init__(self) -> None: self.force_clear() diff --git a/hathor/indexes/mempool_tips_index.py b/hathor/indexes/mempool_tips_index.py index 08c79dd46..460764239 100644 --- a/hathor/indexes/mempool_tips_index.py +++ b/hathor/indexes/mempool_tips_index.py @@ -14,7 +14,7 @@ from abc import abstractmethod from collections.abc import Collection -from typing import TYPE_CHECKING, Iterable, Iterator, Optional, Set, cast +from typing import TYPE_CHECKING, Iterable, Iterator, Optional, cast import structlog @@ -70,7 +70,7 @@ def iter_all(self, tx_storage: 'TransactionStorage') -> Iterator[Transaction]: # originally tx_storage.get_mempool_tips_index @abstractmethod - def get(self) -> Set[bytes]: + def get(self) -> set[bytes]: """ Get the set of mempool tips indexed. @@ -107,8 +107,8 @@ def update(self, tx: BaseTransaction, *, remove: Optional[bool] = None) -> None: assert tx.hash is not None assert tx.storage is not None tx_meta = tx.get_metadata() - to_remove: Set[bytes] = set() - to_remove_parents: Set[bytes] = set() + to_remove: set[bytes] = set() + to_remove_parents: set[bytes] = set() tx_storage = tx.storage for tip_tx in self.iter(tx_storage): assert tip_tx.hash is not None @@ -194,5 +194,5 @@ def iter_all(self, tx_storage: 'TransactionStorage') -> Iterator[Transaction]: else: yield tx - def get(self) -> Set[bytes]: + def get(self) -> set[bytes]: return set(iter(self._index)) diff --git a/hathor/indexes/rocksdb_deps_index.py b/hathor/indexes/rocksdb_deps_index.py index fee70fb99..780299ee8 100644 --- a/hathor/indexes/rocksdb_deps_index.py +++ b/hathor/indexes/rocksdb_deps_index.py @@ -351,11 +351,6 @@ def remove_from_needed_index(self, tx: bytes) -> None: key_needed = self._to_key_needed(tx) self._db.delete((self._cf, key_needed)) - def get_next_needed_tx(self) -> bytes: - # This strategy maximizes the chance to download multiple txs on the same stream - # Find the tx with highest "height" - # XXX: we could cache this onto `needed_txs` so we don't have to fetch txs every time - # TODO: improve this by using some sorted data structure to make this better than O(n) - height, start_hash, tx = max((h, s, t) for t, h, s in self._iter_needed()) - self.log.debug('next needed tx start', start=start_hash.hex(), height=height, needed_tx=tx.hex()) - return start_hash + def iter_next_needed_txs(self) -> Iterator[bytes]: + for tx_hash, _, __ in self._iter_needed(): + yield tx_hash diff --git a/hathor/indexes/rocksdb_height_index.py b/hathor/indexes/rocksdb_height_index.py index 022f60b0c..72964f754 100644 --- a/hathor/indexes/rocksdb_height_index.py +++ b/hathor/indexes/rocksdb_height_index.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Any, Optional from structlog import get_logger from hathor.conf import HathorSettings -from hathor.indexes.height_index import BLOCK_GENESIS_ENTRY, HeightIndex, IndexEntry +from hathor.indexes.height_index import BLOCK_GENESIS_ENTRY, HeightIndex, HeightInfo, IndexEntry from hathor.indexes.rocksdb_utils import RocksDBIndexUtils if TYPE_CHECKING: # pragma: no cover @@ -111,7 +111,7 @@ def _add(self, height: int, entry: IndexEntry, *, can_reorg: bool) -> None: raise ValueError(f'parent hash required (current height: {cur_height}, new height: {height})') elif height == cur_height + 1: self._db.put((self._cf, key), value) - elif cur_tip != entry.hash: + elif self.get(height) != entry.hash: if can_reorg: self._del_from_height(height) self._db.put((self._cf, key), value) @@ -119,7 +119,7 @@ def _add(self, height: int, entry: IndexEntry, *, can_reorg: bool) -> None: raise ValueError('adding would cause a re-org, use can_reorg=True to accept re-orgs') else: # nothing to do (there are more blocks, but the block at height currently matches the added block) - assert cur_tip == entry.hash + pass def add_new(self, height: int, block_hash: bytes, timestamp: int) -> None: self._add(height, IndexEntry(block_hash, timestamp), can_reorg=False) @@ -141,11 +141,28 @@ def get_tip(self) -> bytes: assert value is not None # must never be empty, at least genesis has been added return self._from_value(value).hash - def get_height_tip(self) -> tuple[int, bytes]: + def get_height_tip(self) -> HeightInfo: it = self._db.iteritems(self._cf) it.seek_to_last() (_, key), value = it.get() assert key is not None and value is not None # must never be empty, at least genesis has been added height = self._from_key(key) entry = self._from_value(value) - return height, entry.hash + return HeightInfo(height, entry.hash) + + def get_n_height_tips(self, n_blocks: int) -> list[HeightInfo]: + if n_blocks < 1: + raise ValueError('n_blocks must be a positive, non-zero, integer') + info_list: list[HeightInfo] = [] + # we need to iterate in reverse order + it: Any = reversed(self._db.iteritems(self._cf)) # XXX: mypy doesn't know what reversed does to this iterator + it.seek_to_last() + for (_, key), value in it: + # stop when we have enough elements, otherwise the iterator will stop naturally when it reaches the genesis + if len(info_list) == n_blocks: + break + assert key is not None and value is not None # must never be empty, at least genesis has been added + height = self._from_key(key) + entry = self._from_value(value) + info_list.append(HeightInfo(height, entry.hash)) + return info_list diff --git a/hathor/indexes/tips_index.py b/hathor/indexes/tips_index.py index f9fe09c67..992745b52 100644 --- a/hathor/indexes/tips_index.py +++ b/hathor/indexes/tips_index.py @@ -14,7 +14,6 @@ from abc import abstractmethod from enum import Enum -from typing import Set from intervaltree import Interval from structlog import get_logger @@ -88,5 +87,5 @@ def update_tx(self, tx: BaseTransaction, *, relax_assert: bool = False) -> None: raise NotImplementedError @abstractmethod - def __getitem__(self, index: float) -> Set[Interval]: + def __getitem__(self, index: float) -> set[Interval]: raise NotImplementedError diff --git a/hathor/manager.py b/hathor/manager.py index b1effa871..2d0b14577 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -51,6 +51,7 @@ from hathor.transaction.exceptions import TxValidationError from hathor.transaction.storage import TransactionStorage from hathor.transaction.storage.exceptions import TransactionDoesNotExist +from hathor.transaction.storage.tx_allow_scope import TxAllowScope from hathor.types import Address, VertexId from hathor.util import EnvironmentInfo, LogDuration, Random, Reactor, calculate_min_significant_weight, not_none from hathor.wallet import BaseWallet @@ -60,7 +61,11 @@ cpu = get_cpu_profiler() -DEFAULT_CAPABILITIES = [settings.CAPABILITY_WHITELIST, settings.CAPABILITY_SYNC_VERSION] +DEFAULT_CAPABILITIES = [ + settings.CAPABILITY_WHITELIST, + settings.CAPABILITY_SYNC_VERSION, + settings.CAPABILITY_GET_BEST_BLOCKCHAIN +] class HathorManager: @@ -264,6 +269,8 @@ def start(self) -> None: # Disable get transaction lock when initializing components self.tx_storage.disable_lock() + # Open scope for initialization. + self.tx_storage.set_allow_scope(TxAllowScope.VALID | TxAllowScope.PARTIAL | TxAllowScope.INVALID) # Initialize manager's components. if self._full_verification: self.tx_storage.reset_indexes() @@ -274,6 +281,7 @@ def start(self) -> None: self.tx_storage.finish_full_verification() else: self._initialize_components_new() + self.tx_storage.set_allow_scope(TxAllowScope.VALID) self.tx_storage.enable_lock() # Metric starts to capture data @@ -394,14 +402,11 @@ def _initialize_components_full_verification(self) -> None: # self.start_profiler() self.log.debug('reset all metadata') - with self.tx_storage.allow_partially_validated_context(): - for tx in self.tx_storage.get_all_transactions(): - tx.reset_metadata() + for tx in self.tx_storage.get_all_transactions(): + tx.reset_metadata() self.log.debug('load blocks and transactions') for tx in self.tx_storage._topological_sort_dfs(): - tx.update_initial_metadata() - assert tx.hash is not None tx_meta = tx.get_metadata() @@ -430,10 +435,18 @@ def _initialize_components_full_verification(self) -> None: try: # TODO: deal with invalid tx + tx.calculate_height() + tx._update_parents_children_metadata() + if tx.can_validate_full(): - self.tx_storage.add_to_indexes(tx) + tx.update_initial_metadata() + tx.calculate_min_height() + if tx.is_genesis: + assert tx.validate_checkpoint(self.checkpoints) assert tx.validate_full(skip_block_weight_verification=skip_block_weight_verification) - self.consensus_algorithm.update(tx) + self.tx_storage.add_to_indexes(tx) + with self.tx_storage.allow_only_valid_context(): + self.consensus_algorithm.update(tx) self.tx_storage.indexes.update(tx) if self.tx_storage.indexes.mempool_tips is not None: self.tx_storage.indexes.mempool_tips.update(tx) # XXX: move to indexes.update @@ -442,8 +455,7 @@ def _initialize_components_full_verification(self) -> None: self.tx_storage.save_transaction(tx, only_metadata=True) else: assert tx.validate_basic(skip_block_weight_verification=skip_block_weight_verification) - with self.tx_storage.allow_partially_validated_context(): - self.tx_storage.save_transaction(tx, only_metadata=True) + self.tx_storage.save_transaction(tx, only_metadata=True) except (InvalidNewTransaction, TxValidationError): self.log.error('unexpected error when initializing', tx=tx, exc_info=True) raise @@ -478,6 +490,8 @@ def _initialize_components_full_verification(self) -> None: # we have to have a best_block by now # assert best_block is not None + self.tx_storage.indexes._manually_initialize(self.tx_storage) + self.log.debug('done loading transactions') # Check if all checkpoints in database are ok @@ -585,7 +599,8 @@ def _verify_soft_voided_txs(self) -> None: # that already has the soft voided transactions marked for soft_voided_id in self.consensus_algorithm.soft_voided_tx_ids: try: - soft_voided_tx = self.tx_storage.get_transaction(soft_voided_id) + with self.tx_storage.allow_only_valid_context(): + soft_voided_tx = self.tx_storage.get_transaction(soft_voided_id) except TransactionDoesNotExist: # This database does not have this tx that should be soft voided # so it's fine, we will mark it as soft voided when we get it through sync @@ -654,10 +669,11 @@ def _sync_v2_resume_validations(self) -> None: for tx_hash in self.tx_storage.indexes.deps.iter(): if not self.tx_storage.transaction_exists(tx_hash): continue - tx = self.tx_storage.get_transaction(tx_hash) + with self.tx_storage.allow_partially_validated_context(): + tx = self.tx_storage.get_transaction(tx_hash) if tx.get_metadata().validation.is_final(): depended_final_txs.append(tx) - self.sync_v2_step_validations(depended_final_txs, quiet=False) + self.sync_v2_step_validations(depended_final_txs, quiet=True) self.log.debug('pending validations finished') def add_listen_address(self, addr: str) -> None: @@ -927,12 +943,11 @@ def on_new_tx(self, tx: BaseTransaction, *, conn: Optional[HathorProtocol] = Non """ assert self.tx_storage.is_only_valid_allowed() assert tx.hash is not None + + already_exists = False if self.tx_storage.transaction_exists(tx.hash): self.tx_storage.compare_bytes_with_local_tx(tx) - if not fails_silently: - raise InvalidNewTransaction('Transaction already exists {}'.format(tx.hash_hex)) - self.log.warn('on_new_tx(): Transaction already exists', tx=tx.hash_hex) - return False + already_exists = True if tx.timestamp - self.reactor.seconds() > settings.MAX_FUTURE_TIMESTAMP_ALLOWED: if not fails_silently: @@ -949,8 +964,14 @@ def on_new_tx(self, tx: BaseTransaction, *, conn: Optional[HathorProtocol] = Non metadata = tx.get_metadata() except TransactionDoesNotExist: if not fails_silently: - raise InvalidNewTransaction('missing parent') - self.log.warn('on_new_tx(): missing parent', tx=tx.hash_hex) + raise InvalidNewTransaction('cannot get metadata') + self.log.warn('on_new_tx(): cannot get metadata', tx=tx.hash_hex) + return False + + if already_exists and metadata.validation.is_fully_connected(): + if not fails_silently: + raise InvalidNewTransaction('Transaction already exists {}'.format(tx.hash_hex)) + self.log.warn('on_new_tx(): Transaction already exists', tx=tx.hash_hex) return False if metadata.validation.is_invalid(): @@ -980,7 +1001,7 @@ def on_new_tx(self, tx: BaseTransaction, *, conn: Optional[HathorProtocol] = Non except HathorError as e: if not fails_silently: raise InvalidNewTransaction('consensus update failed') from e - self.log.warn('on_new_tx(): consensus update failed', tx=tx.hash_hex) + self.log.warn('on_new_tx(): consensus update failed', tx=tx.hash_hex, exc_info=True) return False assert tx.validate_full(skip_block_weight_verification=True, reject_locked_reward=reject_locked_reward) @@ -1028,24 +1049,26 @@ def sync_v2_step_validations(self, txs: Iterable[BaseTransaction], *, quiet: boo for ready_tx in txs: assert ready_tx.hash is not None self.tx_storage.indexes.deps.remove_ready_for_validation(ready_tx.hash) - it_next_ready = self.tx_storage.indexes.deps.next_ready_for_validation(self.tx_storage) - for tx in map(self.tx_storage.get_transaction, it_next_ready): - assert tx.hash is not None - tx.update_initial_metadata() - try: - # XXX: `reject_locked_reward` might not apply, partial validation is only used on sync-v2 - # TODO: deal with `reject_locked_reward` on sync-v2 - assert tx.validate_full(reject_locked_reward=True) - except (AssertionError, HathorError): - # TODO - raise - else: - self.tx_storage.add_to_indexes(tx) - self.consensus_algorithm.update(tx) - self.tx_storage.indexes.update(tx) - if self.tx_storage.indexes.mempool_tips: - self.tx_storage.indexes.mempool_tips.update(tx) # XXX: move to indexes.update - self.tx_fully_validated(tx, quiet=quiet) + with self.tx_storage.allow_partially_validated_context(): + for tx in map(self.tx_storage.get_transaction, + self.tx_storage.indexes.deps.next_ready_for_validation(self.tx_storage)): + assert tx.hash is not None + tx.update_initial_metadata() + with self.tx_storage.allow_only_valid_context(): + try: + # XXX: `reject_locked_reward` might not apply, partial validation is only used on sync-v2 + # TODO: deal with `reject_locked_reward` on sync-v2 + assert tx.validate_full(reject_locked_reward=False) + except (AssertionError, HathorError): + # TODO + raise + else: + self.tx_storage.add_to_indexes(tx) + self.consensus_algorithm.update(tx) + self.tx_storage.indexes.update(tx) + if self.tx_storage.indexes.mempool_tips: + self.tx_storage.indexes.mempool_tips.update(tx) # XXX: move to indexes.update + self.tx_fully_validated(tx, quiet=quiet) def tx_fully_validated(self, tx: BaseTransaction, *, quiet: bool) -> None: """ Handle operations that need to happen once the tx becomes fully validated. diff --git a/hathor/p2p/manager.py b/hathor/p2p/manager.py index 37a912897..007851ff6 100644 --- a/hathor/p2p/manager.py +++ b/hathor/p2p/manager.py @@ -29,7 +29,7 @@ from hathor.p2p.protocol import HathorProtocol from hathor.p2p.rate_limiter import RateLimiter from hathor.p2p.states.ready import ReadyState -from hathor.p2p.sync_factory import SyncManagerFactory +from hathor.p2p.sync_factory import SyncAgentFactory from hathor.p2p.sync_version import SyncVersion from hathor.p2p.utils import description_to_connection_string, parse_whitelist from hathor.pubsub import HathorEvents, PubSubManager @@ -83,7 +83,7 @@ class GlobalRateLimiter: connecting_peers: dict[IStreamClientEndpoint, _ConnectingPeer] handshaking_peers: set[HathorProtocol] whitelist_only: bool - _sync_factories: dict[SyncVersion, SyncManagerFactory] + _sync_factories: dict[SyncVersion, SyncAgentFactory] rate_limiter: RateLimiter @@ -100,6 +100,7 @@ def __init__(self, enable_sync_v1_1: bool) -> None: from hathor.p2p.sync_v1.factory_v1_0 import SyncV10Factory from hathor.p2p.sync_v1.factory_v1_1 import SyncV11Factory + from hathor.p2p.sync_v2.factory import SyncV2Factory if not (enable_sync_v1 or enable_sync_v1_1 or enable_sync_v2): raise TypeError(f'{type(self).__name__}() at least one sync version is required') @@ -185,7 +186,7 @@ def __init__(self, if enable_sync_v1_1: self._sync_factories[SyncVersion.V1_1] = SyncV11Factory(self) if enable_sync_v2: - self._sync_factories[SyncVersion.V2] = SyncV10Factory(self) + self._sync_factories[SyncVersion.V2] = SyncV2Factory(self) def set_manager(self, manager: 'HathorManager') -> None: """Set the manager. This method must be called before start().""" @@ -257,7 +258,7 @@ def get_sync_versions(self) -> set[SyncVersion]: # XXX: this is to make it easy to simulate old behavior if we disable the sync-version capability return {SyncVersion.V1} - def get_sync_factory(self, sync_version: SyncVersion) -> SyncManagerFactory: + def get_sync_factory(self, sync_version: SyncVersion) -> SyncAgentFactory: """Get the sync factory for a given version, support MUST be checked beforehand or it will raise an assert.""" assert sync_version in self._sync_factories, 'get_sync_factory must be called for a supported version' return self._sync_factories[sync_version] diff --git a/hathor/p2p/messages.py b/hathor/p2p/messages.py index 507acab23..91b20d8e6 100644 --- a/hathor/p2p/messages.py +++ b/hathor/p2p/messages.py @@ -77,6 +77,12 @@ class ProtocolMessages(Enum): # Pong is a response to a PING command. PONG = 'PONG' + # Request a list of blocks from the best blockchain + GET_BEST_BLOCKCHAIN = 'GET-BEST-BLOCKCHAIN' + + # Send back the blockchain requested + BEST_BLOCKCHAIN = 'BEST-BLOCKCHAIN' + # --- # Hathor Specific Messages # --- @@ -103,14 +109,12 @@ class ProtocolMessages(Enum): GET_BEST_BLOCK = 'GET-BEST-BLOCK' # Request the best block of the peer BEST_BLOCK = 'BEST-BLOCK' # Send the best block to your peer - GET_BLOCK_TXS = 'GET-BLOCK-TXS' # TODO: rename, maybe GET-TX-RANGE or repurpose GET-TRANSACTIONS above + GET_TRANSACTIONS_BFS = 'GET-TRANSACTIONS-BFS' TRANSACTION = 'TRANSACTION' + TRANSACTIONS_END = 'TRANSACTIONS-END' - GET_MEMPOOL = 'GET-MEMPOOL' # TODO: rename, maybe GET-TX-RANGE or repurpose GET-TRANSACTIONS above - MEMPOOL_END = 'MEMPOOL-END' # End of mempool sync - - GET_COMMON_CHAIN = 'GET-COMMON-CHAIN' - COMMON_CHAIN = 'COMMON-CHAIN' + GET_MEMPOOL = 'GET-MEMPOOL' + MEMPOOL_END = 'MEMPOOL-END' GET_PEER_BLOCK_HASHES = 'GET-PEER-BLOCK-HASHES' PEER_BLOCK_HASHES = 'PEER-BLOCK-HASHES' diff --git a/hathor/p2p/protocol.py b/hathor/p2p/protocol.py index 651442b50..f2d729c87 100644 --- a/hathor/p2p/protocol.py +++ b/hathor/p2p/protocol.py @@ -90,6 +90,7 @@ class WarningFlags(str, Enum): diff_timestamp: Optional[int] idle_timeout: int sync_version: Optional[SyncVersion] # version chosen to be used on this connection + capabilities: set[str] # capabilities received from the peer in HelloState def __init__(self, network: str, my_peer: PeerId, p2p_manager: 'ConnectionsManager', *, use_ssl: bool, inbound: bool) -> None: @@ -157,6 +158,8 @@ def __init__(self, network: str, my_peer: PeerId, p2p_manager: 'ConnectionsManag self.log = logger.new() + self.capabilities = set() + def change_state(self, state_enum: PeerState) -> None: """Called to change the state of the connection.""" if state_enum not in self._state_instances: @@ -363,21 +366,21 @@ def is_sync_enabled(self) -> bool: if not self.is_state(self.PeerState.READY): return False assert isinstance(self.state, ReadyState) - return self.state.sync_manager.is_sync_enabled() + return self.state.sync_agent.is_sync_enabled() def enable_sync(self) -> None: """Enable sync for this connection.""" assert self.is_state(self.PeerState.READY) assert isinstance(self.state, ReadyState) self.log.info('enable sync') - self.state.sync_manager.enable_sync() + self.state.sync_agent.enable_sync() def disable_sync(self) -> None: """Disable sync for this connection.""" assert self.is_state(self.PeerState.READY) assert isinstance(self.state, ReadyState) self.log.info('disable sync') - self.state.sync_manager.disable_sync() + self.state.sync_agent.disable_sync() class HathorLineReceiver(LineReceiver, HathorProtocol): diff --git a/hathor/p2p/resources/status.py b/hathor/p2p/resources/status.py index fdefb58a5..bda60cc1c 100644 --- a/hathor/p2p/resources/status.py +++ b/hathor/p2p/resources/status.py @@ -58,7 +58,7 @@ def render_GET(self, request): for conn in self.manager.connections.iter_ready_connections(): remote = conn.transport.getPeer() status = {} - status[conn.state.sync_manager.name] = conn.state.sync_manager.get_status() + status[conn.state.sync_agent.name] = conn.state.sync_agent.get_status() connected_peers.append({ 'id': conn.peer.id, 'app_version': conn.app_version, diff --git a/hathor/p2p/states/hello.py b/hathor/p2p/states/hello.py index bf91756eb..ee42c5e4f 100644 --- a/hathor/p2p/states/hello.py +++ b/hathor/p2p/states/hello.py @@ -108,6 +108,9 @@ def handle_hello(self, payload: str) -> None: protocol.send_error_and_close_connection('Must have whitelist capability.') return + # another status can use the informed capabilities + protocol.capabilities = set(data['capabilities']) + my_sync_versions = self._get_sync_versions() try: remote_sync_versions = _parse_sync_versions(data) diff --git a/hathor/p2p/states/peer_id.py b/hathor/p2p/states/peer_id.py index 73ac42768..9b91b5b62 100644 --- a/hathor/p2p/states/peer_id.py +++ b/hathor/p2p/states/peer_id.py @@ -156,8 +156,11 @@ def _should_block_peer(self, peer_id: str) -> bool: # when ENABLE_PEER_WHITELIST is set, we check if we're on sync-v1 to block non-whitelisted peers if settings.ENABLE_PEER_WHITELIST: assert self.protocol.sync_version is not None - if self.protocol.sync_version.is_v1() and not peer_is_whitelisted: - return True + if not peer_is_whitelisted: + if self.protocol.sync_version.is_v1(): + return True + elif settings.USE_PEER_WHITELIST_ON_SYNC_V2: + return True # otherwise we block non-whitelisted peers when on "whitelist-only mode" if self.protocol.connections is not None: diff --git a/hathor/p2p/states/ready.py b/hathor/p2p/states/ready.py index f035cf241..b6813c9c4 100644 --- a/hathor/p2p/states/ready.py +++ b/hathor/p2p/states/ready.py @@ -18,10 +18,13 @@ from structlog import get_logger from twisted.internet.task import LoopingCall +from hathor.conf import HathorSettings +from hathor.indexes.height_index import HeightInfo from hathor.p2p.messages import ProtocolMessages from hathor.p2p.peer_id import PeerId from hathor.p2p.states.base import BaseState -from hathor.p2p.sync_manager import SyncManager +from hathor.p2p.states.utils import to_height_info +from hathor.p2p.sync_agent import SyncAgent from hathor.transaction import BaseTransaction from hathor.util import json_dumps, json_loads @@ -30,10 +33,13 @@ logger = get_logger() +settings = HathorSettings() + class ReadyState(BaseState): def __init__(self, protocol: 'HathorProtocol') -> None: super().__init__(protocol) + self.log = logger.new(**self.protocol.get_logger_context()) self.reactor = self.protocol.node.reactor @@ -57,16 +63,32 @@ def __init__(self, protocol: 'HathorProtocol') -> None: # Minimum round-trip time among PING/PONG. self.ping_min_rtt: float = inf + # The last blocks from the best blockchain in the peer + self.peer_best_blockchain: list[HeightInfo] = [] + self.cmd_map.update({ # p2p control messages ProtocolMessages.PING: self.handle_ping, ProtocolMessages.PONG: self.handle_pong, ProtocolMessages.GET_PEERS: self.handle_get_peers, ProtocolMessages.PEERS: self.handle_peers, - # Other messages are added by the sync manager. }) + self.lc_get_best_blockchain: Optional[LoopingCall] = None + + # if the peer has the GET-BEST-BLOCKCHAIN capability + common_capabilities = protocol.capabilities & set(protocol.node.capabilities) + if (settings.CAPABILITY_GET_BEST_BLOCKCHAIN in common_capabilities): + # set the loop to get the best blockchain from the peer + self.lc_get_best_blockchain = LoopingCall(self.send_get_best_blockchain) + self.lc_get_best_blockchain.clock = self.reactor + self.cmd_map.update({ + # extend the p2p control messages + ProtocolMessages.GET_BEST_BLOCKCHAIN: self.handle_get_best_blockchain, + ProtocolMessages.BEST_BLOCKCHAIN: self.handle_best_blockchain, + }) + # Initialize sync manager and add its commands to the list of available commands. connections = self.protocol.connections assert connections is not None @@ -77,8 +99,8 @@ def __init__(self, protocol: 'HathorProtocol') -> None: self.log.debug(f'loading {sync_version}') sync_factory = connections.get_sync_factory(sync_version) - self.sync_manager: SyncManager = sync_factory.create_sync_manager(self.protocol, reactor=self.reactor) - self.cmd_map.update(self.sync_manager.get_cmd_dict()) + self.sync_agent: SyncAgent = sync_factory.create_sync_agent(self.protocol, reactor=self.reactor) + self.cmd_map.update(self.sync_agent.get_cmd_dict()) def on_enter(self) -> None: if self.protocol.connections: @@ -87,24 +109,30 @@ def on_enter(self) -> None: self.lc_ping.start(1, now=False) self.send_get_peers() - self.sync_manager.start() + if self.lc_get_best_blockchain is not None: + self.lc_get_best_blockchain.start(settings.BEST_BLOCKCHAIN_INTERVAL, now=False) + + self.sync_agent.start() def on_exit(self) -> None: if self.lc_ping.running: self.lc_ping.stop() - if self.sync_manager.is_started(): - self.sync_manager.stop() + if self.lc_get_best_blockchain is not None and self.lc_get_best_blockchain.running: + self.lc_get_best_blockchain.stop() + + if self.sync_agent.is_started(): + self.sync_agent.stop() def prepare_to_disconnect(self) -> None: - if self.sync_manager.is_started(): - self.sync_manager.stop() + if self.sync_agent.is_started(): + self.sync_agent.stop() def send_tx_to_peer(self, tx: BaseTransaction) -> None: - self.sync_manager.send_tx_to_peer_if_possible(tx) + self.sync_agent.send_tx_to_peer_if_possible(tx) def is_synced(self) -> bool: - return self.sync_manager.is_synced() + return self.sync_agent.is_synced() def send_get_peers(self) -> None: """ Send a GET-PEERS command, requesting a list of nodes. @@ -180,3 +208,52 @@ def handle_pong(self, payload: str) -> None: self.ping_min_rtt = min(self.ping_min_rtt, self.ping_rtt) self.ping_start_time = None self.log.debug('rtt updated', rtt=self.ping_rtt, min_rtt=self.ping_min_rtt) + + def send_get_best_blockchain(self, n_blocks: int = settings.DEFAULT_BEST_BLOCKCHAIN_BLOCKS) -> None: + """ Send a GET-BEST-BLOCKCHAIN command, requesting a list of the latest + N blocks from the best blockchain. + """ + self.send_message(ProtocolMessages.GET_BEST_BLOCKCHAIN, str(n_blocks)) + + def handle_get_best_blockchain(self, payload: str) -> None: + """ Executed when a GET-BEST-BLOCKCHAIN command is received. + It just responds with a list with N blocks from the best blockchain + in descending order. + """ + try: + n_blocks = int(payload) + except ValueError: + self.protocol.send_error_and_close_connection( + f'Invalid param type. \'payload\' should be an int but we got {payload}.' + ) + return + + if not (0 < n_blocks <= settings.MAX_BEST_BLOCKCHAIN_BLOCKS): + self.protocol.send_error_and_close_connection( + f'N out of bounds. Valid range: [1, {settings.MAX_BEST_BLOCKCHAIN_BLOCKS}].' + ) + return + self.protocol.my_peer + + best_blockchain = self.protocol.node.tx_storage.get_n_height_tips(n_blocks) + self.send_best_blockchain(best_blockchain) + + def send_best_blockchain(self, best_blockchain: list[HeightInfo]) -> None: + """ Send a BEST-BLOCKCHAIN command with a best blockchain of N blocks. + """ + serialiable_best_blockchain = [(hi.height, hi.id.hex()) for hi in best_blockchain] + self.send_message(ProtocolMessages.BEST_BLOCKCHAIN, json_dumps(serialiable_best_blockchain)) + + def handle_best_blockchain(self, payload: str) -> None: + """ Executed when a BEST-BLOCKCHAIN command is received. It updates + the best blockchain. + """ + restored_blocks = json_loads(payload) + try: + best_blockchain = [to_height_info(raw) for raw in restored_blocks] + except Exception: + self.protocol.send_error_and_close_connection( + 'Invalid HeightInfo while handling best_blockchain response.' + ) + return + self.peer_best_blockchain = best_blockchain diff --git a/hathor/p2p/states/utils.py b/hathor/p2p/states/utils.py new file mode 100644 index 000000000..317077f3b --- /dev/null +++ b/hathor/p2p/states/utils.py @@ -0,0 +1,24 @@ +import re + +from hathor.indexes.height_index import HeightInfo + + +def to_height_info(raw: tuple[int, str]) -> HeightInfo: + """ Instantiate HeightInfo from a literal tuple. + """ + if not (isinstance(raw, list) and len(raw) == 2): + raise ValueError(f"block_info_raw must be a tuple with length 3. We got {raw}.") + + height, id = raw + + if not isinstance(id, str): + raise ValueError(f"hash_hex must be a string. We got {id}.") + hash_pattern = r'[a-fA-F\d]{64}' + if not re.match(hash_pattern, id): + raise ValueError(f"hash_hex must be valid. We got {id}.") + if not isinstance(height, int): + raise ValueError(f"height must be an integer. We got {height}.") + if height < 0: + raise ValueError(f"height must greater than or equal to 0. We got {height}.") + + return HeightInfo(height, bytes.fromhex(id)) diff --git a/hathor/p2p/sync_manager.py b/hathor/p2p/sync_agent.py similarity index 96% rename from hathor/p2p/sync_manager.py rename to hathor/p2p/sync_agent.py index cc6f2b141..a700335ed 100644 --- a/hathor/p2p/sync_manager.py +++ b/hathor/p2p/sync_agent.py @@ -19,7 +19,7 @@ from hathor.transaction import BaseTransaction -class SyncManager(ABC): +class SyncAgent(ABC): @abstractmethod def is_started(self) -> bool: """Whether the manager started running""" @@ -55,14 +55,17 @@ def is_errored(self) -> bool: """Whether the manager entered an error state""" raise NotImplementedError + @abstractmethod def is_sync_enabled(self) -> bool: """Return true if the sync is enabled.""" raise NotImplementedError + @abstractmethod def enable_sync(self) -> None: """Enable sync.""" raise NotImplementedError + @abstractmethod def disable_sync(self) -> None: """Disable sync.""" raise NotImplementedError diff --git a/hathor/p2p/sync_factory.py b/hathor/p2p/sync_factory.py index f171f432b..4f04a734b 100644 --- a/hathor/p2p/sync_factory.py +++ b/hathor/p2p/sync_factory.py @@ -15,14 +15,14 @@ from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Optional -from hathor.p2p.sync_manager import SyncManager +from hathor.p2p.sync_agent import SyncAgent from hathor.util import Reactor if TYPE_CHECKING: from hathor.p2p.protocol import HathorProtocol -class SyncManagerFactory(ABC): +class SyncAgentFactory(ABC): @abstractmethod - def create_sync_manager(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncManager: + def create_sync_agent(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncAgent: pass diff --git a/hathor/p2p/sync_v1/agent.py b/hathor/p2p/sync_v1/agent.py index 3cdc8965f..7ec670065 100644 --- a/hathor/p2p/sync_v1/agent.py +++ b/hathor/p2p/sync_v1/agent.py @@ -26,7 +26,7 @@ from hathor.conf import HathorSettings from hathor.p2p.messages import GetNextPayload, GetTipsPayload, NextPayload, ProtocolMessages, TipsPayload -from hathor.p2p.sync_manager import SyncManager +from hathor.p2p.sync_agent import SyncAgent from hathor.p2p.sync_v1.downloader import Downloader from hathor.transaction import BaseTransaction from hathor.transaction.base_transaction import tx_or_block_from_bytes @@ -172,7 +172,7 @@ def stopProducing(self) -> None: self.priority_queue.clear() -class NodeSyncTimestamp(SyncManager): +class NodeSyncTimestamp(SyncAgent): """ An algorithm to sync the DAG between two peers using the timestamp of the transactions. This algorithm must assume that a new item may arrive while it is running. The item's timestamp diff --git a/hathor/p2p/sync_v1/factory_v1_0.py b/hathor/p2p/sync_v1/factory_v1_0.py index 5b618a7a1..acd430474 100644 --- a/hathor/p2p/sync_v1/factory_v1_0.py +++ b/hathor/p2p/sync_v1/factory_v1_0.py @@ -15,8 +15,8 @@ from typing import TYPE_CHECKING, Optional from hathor.p2p.manager import ConnectionsManager -from hathor.p2p.sync_factory import SyncManagerFactory -from hathor.p2p.sync_manager import SyncManager +from hathor.p2p.sync_agent import SyncAgent +from hathor.p2p.sync_factory import SyncAgentFactory from hathor.p2p.sync_v1.agent import NodeSyncTimestamp from hathor.p2p.sync_v1.downloader import Downloader from hathor.util import Reactor @@ -25,7 +25,7 @@ from hathor.p2p.protocol import HathorProtocol -class SyncV10Factory(SyncManagerFactory): +class SyncV10Factory(SyncAgentFactory): def __init__(self, connections: ConnectionsManager): self.connections = connections self._downloader: Optional[Downloader] = None @@ -36,5 +36,5 @@ def get_downloader(self) -> Downloader: self._downloader = Downloader(self.connections.manager) return self._downloader - def create_sync_manager(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncManager: + def create_sync_agent(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncAgent: return NodeSyncTimestamp(protocol, downloader=self.get_downloader(), reactor=reactor) diff --git a/hathor/p2p/sync_v1/factory_v1_1.py b/hathor/p2p/sync_v1/factory_v1_1.py index 43c7e11a8..57d8819ae 100644 --- a/hathor/p2p/sync_v1/factory_v1_1.py +++ b/hathor/p2p/sync_v1/factory_v1_1.py @@ -15,8 +15,8 @@ from typing import TYPE_CHECKING, Optional from hathor.p2p.manager import ConnectionsManager -from hathor.p2p.sync_factory import SyncManagerFactory -from hathor.p2p.sync_manager import SyncManager +from hathor.p2p.sync_agent import SyncAgent +from hathor.p2p.sync_factory import SyncAgentFactory from hathor.p2p.sync_v1.agent import NodeSyncTimestamp from hathor.p2p.sync_v1.downloader import Downloader from hathor.util import Reactor @@ -25,7 +25,7 @@ from hathor.p2p.protocol import HathorProtocol -class SyncV11Factory(SyncManagerFactory): +class SyncV11Factory(SyncAgentFactory): def __init__(self, connections: ConnectionsManager): self.connections = connections self._downloader: Optional[Downloader] = None @@ -36,5 +36,5 @@ def get_downloader(self) -> Downloader: self._downloader = Downloader(self.connections.manager) return self._downloader - def create_sync_manager(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncManager: + def create_sync_agent(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncAgent: return NodeSyncTimestamp(protocol, downloader=self.get_downloader(), reactor=reactor) diff --git a/hathor/p2p/sync_v2/__init__.py b/hathor/p2p/sync_v2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/hathor/p2p/sync_v2/factory.py b/hathor/p2p/sync_v2/factory.py new file mode 100644 index 000000000..40b2b8294 --- /dev/null +++ b/hathor/p2p/sync_v2/factory.py @@ -0,0 +1,32 @@ +# Copyright 2021 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Optional + +from hathor.p2p.manager import ConnectionsManager +from hathor.p2p.sync_agent import SyncAgent +from hathor.p2p.sync_factory import SyncAgentFactory +from hathor.p2p.sync_v2.manager import NodeBlockSync +from hathor.util import Reactor + +if TYPE_CHECKING: + from hathor.p2p.protocol import HathorProtocol + + +class SyncV2Factory(SyncAgentFactory): + def __init__(self, connections: ConnectionsManager): + self.connections = connections + + def create_sync_agent(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncAgent: + return NodeBlockSync(protocol, reactor=reactor) diff --git a/hathor/p2p/sync_v2/manager.py b/hathor/p2p/sync_v2/manager.py new file mode 100644 index 000000000..3fa948823 --- /dev/null +++ b/hathor/p2p/sync_v2/manager.py @@ -0,0 +1,1179 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import json +import math +import struct +from collections import OrderedDict +from enum import Enum +from typing import TYPE_CHECKING, Any, Callable, Generator, Optional, cast + +from structlog import get_logger +from twisted.internet.defer import Deferred, inlineCallbacks +from twisted.internet.task import LoopingCall + +from hathor.conf import HathorSettings +from hathor.p2p.messages import ProtocolMessages +from hathor.p2p.sync_agent import SyncAgent +from hathor.p2p.sync_v2.mempool import SyncMempoolManager +from hathor.p2p.sync_v2.streamers import DEFAULT_STREAMING_LIMIT, BlockchainStreaming, StreamEnd, TransactionsStreaming +from hathor.transaction import BaseTransaction, Block, Transaction +from hathor.transaction.base_transaction import tx_or_block_from_bytes +from hathor.transaction.exceptions import HathorError +from hathor.transaction.storage.exceptions import TransactionDoesNotExist +from hathor.types import VertexId +from hathor.util import Reactor, collect_n + +if TYPE_CHECKING: + from hathor.p2p.protocol import HathorProtocol + +settings = HathorSettings() +logger = get_logger() + +MAX_GET_TRANSACTIONS_BFS_LEN: int = 8 + + +class PeerState(Enum): + ERROR = 'error' + UNKNOWN = 'unknown' + SYNCING_BLOCKS = 'syncing-blocks' + SYNCING_TRANSACTIONS = 'syncing-transactions' + SYNCING_MEMPOOL = 'syncing-mempool' + + +class NodeBlockSync(SyncAgent): + """ An algorithm to sync two peers based on their blockchain. + """ + name: str = 'node-block-sync' + + def __init__(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> None: + """ + :param protocol: Protocol of the connection. + :type protocol: HathorProtocol + + :param reactor: Reactor to schedule later calls. (default=twisted.internet.reactor) + :type reactor: Reactor + """ + self.protocol = protocol + self.manager = protocol.node + self.tx_storage = protocol.node.tx_storage + self.state = PeerState.UNKNOWN + + self.DEFAULT_STREAMING_LIMIT = DEFAULT_STREAMING_LIMIT + + if reactor is None: + from hathor.util import reactor as twisted_reactor + reactor = twisted_reactor + assert reactor is not None + self.reactor: Reactor = reactor + self._is_streaming: bool = False + + # Create logger with context + self.log = logger.new(peer=self.protocol.get_short_peer_id()) + + # Extra + self._blk_size = 0 + self._blk_end_hash = settings.GENESIS_BLOCK_HASH + self._blk_max_quantity = 0 + + # indicates whether we're receiving a stream from the peer + self.receiving_stream = False + + # highest block where we are synced + self.synced_height = 0 + + # highest block peer has + self.peer_height = 0 + + # Latest deferred waiting for a reply. + self._deferred_txs: dict[VertexId, Deferred[BaseTransaction]] = {} + self._deferred_tips: Optional[Deferred[list[bytes]]] = None + self._deferred_best_block: Optional[Deferred[dict[str, Any]]] = None + self._deferred_peer_block_hashes: Optional[Deferred[list[tuple[int, bytes]]]] = None + + # When syncing blocks we start streaming with all peers + # so the moment I get some repeated blocks, I stop the download + # because it's probably a streaming that I've just received + self.max_repeated_blocks = 10 + + # Streaming objects + self.blockchain_streaming: Optional[BlockchainStreaming] = None + self.transactions_streaming: Optional[TransactionsStreaming] = None + + # Whether the peers are synced, i.e. our best height and best block are the same + self._synced = False + + # Indicate whether the sync manager has been started. + self._started: bool = False + + # Saves the last received block from the block streaming # this is useful to be used when running the sync of + # transactions in the case when I am downloading a side chain. Starts at the genesis, which is common to all + # peers on the network + self._last_received_block: Optional[Block] = None + + # Saves if I am in the middle of a mempool sync + # we don't execute any sync while in the middle of it + self.mempool_manager = SyncMempoolManager(self) + self._receiving_tips: Optional[list[bytes]] = None + + # Cache for get_tx calls + self._get_tx_cache: OrderedDict[bytes, BaseTransaction] = OrderedDict() + self._get_tx_cache_maxsize = 1000 + + # Looping call of the main method + self._lc_run = LoopingCall(self.run_sync) + self._lc_run.clock = self.reactor + self._is_running = False + + # Whether we propagate transactions or not + self._is_relaying = False + + # This stores the final height that we expect the last "get blocks" stream to end on + self._blk_end_height: Optional[int] = None + + # Whether to sync with this peer + self._is_enabled: bool = False + + def get_status(self) -> dict[str, Any]: + """ Return the status of the sync. + """ + res = { + 'is_enabled': self.is_sync_enabled(), + 'peer_height': self.peer_height, + 'synced_height': self.synced_height, + 'synced': self._synced, + 'state': self.state.value, + } + return res + + def is_synced(self) -> bool: + return self._synced + + def is_errored(self) -> bool: + return self.state is PeerState.ERROR + + def is_sync_enabled(self) -> bool: + return self._is_enabled + + def enable_sync(self) -> None: + self._is_enabled = True + + def disable_sync(self) -> None: + self._is_enabled = False + + def send_tx_to_peer_if_possible(self, tx: BaseTransaction) -> None: + if not self._is_enabled: + self.log.debug('sync is disabled') + return + if not self.is_synced(): + # XXX Should we accept any tx while I am not synced? + return + + # XXX When we start having many txs/s this become a performance issue + # Then we could change this to be a streaming of real time data with + # blocks as priorities to help miners get the blocks as fast as we can + # We decided not to implement this right now because we already have some producers + # being used in the sync algorithm and the code was becoming a bit too complex + if self._is_relaying: + self.send_data(tx) + + def is_started(self) -> bool: + return self._started + + def start(self) -> None: + if self._started: + raise Exception('NodeSyncBlock is already running') + self._started = True + self._lc_run.start(5) + + def stop(self) -> None: + if not self._started: + raise Exception('NodeSyncBlock is already stopped') + self._started = False + self._lc_run.stop() + + def get_cmd_dict(self) -> dict[ProtocolMessages, Callable[[str], None]]: + """ Return a dict of messages of the plugin. + + For further information about each message, see the RFC. + Link: https://github.com/HathorNetwork/rfcs/blob/master/text/0025-p2p-sync-v2.md#p2p-sync-protocol-messages + """ + return { + ProtocolMessages.GET_NEXT_BLOCKS: self.handle_get_next_blocks, + ProtocolMessages.BLOCKS: self.handle_blocks, + ProtocolMessages.BLOCKS_END: self.handle_blocks_end, + ProtocolMessages.GET_BEST_BLOCK: self.handle_get_best_block, + ProtocolMessages.BEST_BLOCK: self.handle_best_block, + ProtocolMessages.GET_TRANSACTIONS_BFS: self.handle_get_transactions_bfs, + ProtocolMessages.TRANSACTION: self.handle_transaction, + ProtocolMessages.TRANSACTIONS_END: self.handle_transactions_end, + ProtocolMessages.GET_PEER_BLOCK_HASHES: self.handle_get_peer_block_hashes, + ProtocolMessages.PEER_BLOCK_HASHES: self.handle_peer_block_hashes, + ProtocolMessages.STOP_BLOCK_STREAMING: self.handle_stop_block_streaming, + ProtocolMessages.GET_TIPS: self.handle_get_tips, + ProtocolMessages.TIPS: self.handle_tips, + ProtocolMessages.TIPS_END: self.handle_tips_end, + # XXX: overriding ReadyState.handle_error + ProtocolMessages.ERROR: self.handle_error, + ProtocolMessages.GET_DATA: self.handle_get_data, + ProtocolMessages.DATA: self.handle_data, + ProtocolMessages.RELAY: self.handle_relay, + ProtocolMessages.NOT_FOUND: self.handle_not_found, + } + + def handle_not_found(self, payload: str) -> None: + """ Handle a received NOT-FOUND message. + """ + # XXX: NOT_FOUND is a valid message, but we shouldn't ever receive it unless the other peer is running with a + # modified code or if there is a bug + self.log.warn('not found? close connection', payload=payload) + self.protocol.send_error_and_close_connection('Unexpected NOT_FOUND') + + def handle_error(self, payload: str) -> None: + """ Override protocols original handle_error so we can recover a sync in progress. + """ + assert self.protocol.connections is not None + # forward message to overloaded handle_error: + self.protocol.handle_error(payload) + + def update_synced(self, synced: bool) -> None: + self._synced = synced + + @inlineCallbacks + def run_sync(self) -> Generator[Any, Any, None]: + """ Async step of the sync algorithm. + + This is the entrypoint for the sync. It is always safe to call this method. + """ + if not self._is_enabled: + self.log.debug('sync is disabled') + return + if self._is_running: + # Already running... + self.log.debug('already running') + return + self._is_running = True + try: + yield self._run_sync() + finally: + self._is_running = False + + @inlineCallbacks + def _run_sync(self) -> Generator[Any, Any, None]: + """ Actual implementation of the sync step logic in run_sync. + """ + if self.receiving_stream: + # If we're receiving a stream, wait for it to finish before running sync. + # If we're sending a stream, do the sync to update the peer's synced block + self.log.debug('receiving stream, try again later') + return + + if self.mempool_manager.is_running(): + # It's running a mempool sync, so we wait until it finishes + self.log.debug('running mempool sync, try again later') + return + + bestblock = self.tx_storage.get_best_block() + meta = bestblock.get_metadata() + + self.log.debug('run sync', height=meta.height) + + assert self.protocol.connections is not None + assert self.tx_storage.indexes is not None + assert self.tx_storage.indexes.deps is not None + + if self.tx_storage.indexes.deps.has_needed_tx(): + self.log.debug('needed tx exist, sync transactions') + self.update_synced(False) + # TODO: find out whether we can sync transactions from this peer to speed things up + self.run_sync_transactions() + else: + # I am already in sync with all checkpoints, sync next blocks + yield self.run_sync_blocks() + + def run_sync_transactions(self) -> None: + """ Run a step of the transaction syncing phase. + """ + self.state = PeerState.SYNCING_TRANSACTIONS + + assert self.protocol.connections is not None + assert self.tx_storage.indexes is not None + assert self.tx_storage.indexes.deps is not None + + # start_hash = self.tx_storage.indexes.deps.get_next_needed_tx() + needed_txs, _ = collect_n(self.tx_storage.indexes.deps.iter_next_needed_txs(), + MAX_GET_TRANSACTIONS_BFS_LEN) + + # Start with the last received block and find the best block full validated in its chain + block = self._last_received_block + if block is None: + block = cast(Block, self.tx_storage.get_genesis(settings.GENESIS_BLOCK_HASH)) + else: + with self.tx_storage.allow_partially_validated_context(): + while not block.get_metadata().validation.is_valid(): + block = block.get_block_parent() + assert block is not None + assert block.hash is not None + block_height = block.get_height() + + self.log.info('run sync transactions', start=[i.hex() for i in needed_txs], end_block_hash=block.hash.hex(), + end_block_height=block_height) + self.send_get_transactions_bfs(needed_txs, block.hash) + + @inlineCallbacks + def run_sync_blocks(self) -> Generator[Any, Any, None]: + """ Async step of the block syncing phase. + """ + assert self.tx_storage.indexes is not None + self.state = PeerState.SYNCING_BLOCKS + + # Find my height + bestblock = self.tx_storage.get_best_block() + assert bestblock.hash is not None + meta = bestblock.get_metadata() + my_height = meta.height + + self.log.debug('run sync blocks', my_height=my_height) + + # Find best block + data = yield self.get_peer_best_block() + peer_best_block = data['block'] + peer_best_height = data['height'] + self.peer_height = peer_best_height + + # find best common block + yield self.find_best_common_block(peer_best_height, peer_best_block) + self.log.debug('run_sync_blocks', peer_height=self.peer_height, synced_height=self.synced_height) + + if self.synced_height < self.peer_height: + # sync from common block + peer_block_at_height = yield self.get_peer_block_hashes([self.synced_height]) + self.run_block_sync(peer_block_at_height[0][1], self.synced_height, peer_best_block, peer_best_height) + elif my_height == self.synced_height == self.peer_height: + # we're synced and on the same height, get their mempool + self.state = PeerState.SYNCING_MEMPOOL + self.mempool_manager.run() + elif self._is_relaying: + # TODO: validate if this is when we should disable relaying + self.send_relay(enable=False) + else: + # we got all the peer's blocks but aren't on the same height, nothing to do + pass + + def get_tips(self) -> Deferred[list[bytes]]: + """ Async method to request the remote peer's tips. + """ + if self._deferred_tips is None: + self._deferred_tips = Deferred() + self.send_get_tips() + else: + assert self._receiving_tips is not None + return self._deferred_tips + + def send_get_tips(self) -> None: + """ Send a GET-TIPS message. + """ + self.log.debug('get tips') + self.send_message(ProtocolMessages.GET_TIPS) + self._receiving_tips = [] + + def handle_get_tips(self, _payload: str) -> None: + """ Handle a GET-TIPS message. + """ + assert self.tx_storage.indexes is not None + assert self.tx_storage.indexes.mempool_tips is not None + if self._is_streaming: + self.log.warn('can\'t send while streaming') # XXX: or can we? + self.send_message(ProtocolMessages.MEMPOOL_END) + return + self.log.debug('handle_get_tips') + # TODO Use a streaming of tips + for txid in self.tx_storage.indexes.mempool_tips.get(): + self.send_tips(txid) + self.send_message(ProtocolMessages.TIPS_END) + + def send_tips(self, tx_id: bytes) -> None: + """ Send a TIPS message. + """ + self.send_message(ProtocolMessages.TIPS, json.dumps([tx_id.hex()])) + + def handle_tips(self, payload: str) -> None: + """ Handle a TIPS message. + """ + self.log.debug('tips', receiving_tips=self._receiving_tips) + if self._receiving_tips is None: + self.protocol.send_error_and_close_connection('TIPS not expected') + return + data = json.loads(payload) + data = [bytes.fromhex(x) for x in data] + # filter-out txs we already have + self._receiving_tips.extend(tx_id for tx_id in data if not self.partial_vertex_exists(tx_id)) + + def handle_tips_end(self, _payload: str) -> None: + """ Handle a TIPS-END message. + """ + assert self._receiving_tips is not None + deferred = self._deferred_tips + self._deferred_tips = None + if deferred is None: + self.protocol.send_error_and_close_connection('TIPS-END not expected') + return + deferred.callback(self._receiving_tips) + self._receiving_tips = None + + def send_relay(self, *, enable: bool = True) -> None: + """ Send a RELAY message. + """ + self.log.debug('send_relay', enable=enable) + self.send_message(ProtocolMessages.RELAY, json.dumps(enable)) + + def handle_relay(self, payload: str) -> None: + """ Handle a RELAY message. + """ + if not payload: + # XXX: "legacy" nothing means enable + self._is_relaying = True + else: + val = json.loads(payload) + if isinstance(val, bool): + self._is_relaying = val + else: + self.protocol.send_error_and_close_connection('RELAY: invalid value') + return + + def _setup_block_streaming(self, start_hash: bytes, start_height: int, end_hash: bytes, end_height: int, + reverse: bool) -> None: + """ Common setup before starting an outgoing block stream. + """ + self._blk_start_hash = start_hash + self._blk_start_height = start_height + self._blk_end_hash = end_hash + self._blk_end_height = end_height + self._blk_received = 0 + self._blk_repeated = 0 + raw_quantity = end_height - start_height + 1 + self._blk_max_quantity = -raw_quantity if reverse else raw_quantity + self._blk_prev_hash: Optional[bytes] = None + self._blk_stream_reverse = reverse + self._last_received_block = None + + def run_block_sync(self, start_hash: bytes, start_height: int, end_hash: bytes, end_height: int) -> None: + """ Called when the bestblock is after all checkpoints. + + It must syncs to the left until it reaches the remote's best block or the max stream limit. + """ + self._setup_block_streaming(start_hash, start_height, end_hash, end_height, False) + quantity = end_height - start_height + self.log.info('get next blocks', start_height=start_height, end_height=end_height, quantity=quantity, + start_hash=start_hash.hex(), end_hash=end_hash.hex()) + self.send_get_next_blocks(start_hash, end_hash) + + def send_message(self, cmd: ProtocolMessages, payload: Optional[str] = None) -> None: + """ Helper to send a message. + """ + assert self.protocol.state is not None + self.protocol.state.send_message(cmd, payload) + + def partial_vertex_exists(self, vertex_id: VertexId) -> bool: + """ Return true if the vertex exists no matter its validation state. + """ + with self.tx_storage.allow_partially_validated_context(): + return self.tx_storage.transaction_exists(vertex_id) + + @inlineCallbacks + def find_best_common_block(self, peer_best_height: int, peer_best_block: bytes) -> Generator[Any, Any, None]: + """ Search for the highest block/height where we're synced. + """ + assert self.tx_storage.indexes is not None + my_best_height = self.tx_storage.get_height_best_block() + + self.log.debug('find common chain', peer_height=peer_best_height, my_height=my_best_height) + + if peer_best_height <= my_best_height: + my_block = self.tx_storage.indexes.height.get(peer_best_height) + if my_block == peer_best_block: + # we have all the peer's blocks + if peer_best_height == my_best_height: + # We are in sync, ask for relay so the remote sends transactions in real time + self.update_synced(True) + self.send_relay() + else: + self.update_synced(False) + + self.log.debug('synced to the latest peer block', height=peer_best_height) + self.synced_height = peer_best_height + return + else: + # TODO peer is on a different best chain + self.log.warn('peer on different chain', peer_height=peer_best_height, + peer_block=peer_best_block.hex(), my_block=(my_block.hex() if my_block is not None else + None)) + + self.update_synced(False) + not_synced = min(peer_best_height, my_best_height) + synced = self.synced_height + + while not_synced - synced > 1: + self.log.debug('find_best_common_block synced not_synced', synced=synced, not_synced=not_synced) + step = math.ceil((not_synced - synced)/10) + heights = [] + height = synced + while height < not_synced: + heights.append(height) + height += step + heights.append(not_synced) + block_height_list = yield self.get_peer_block_hashes(heights) + block_height_list.reverse() + for height, block_hash in block_height_list: + try: + # We must check only fully validated transactions. + blk = self.tx_storage.get_transaction(block_hash) + assert blk.get_metadata().validation.is_fully_connected() + assert isinstance(blk, Block) + if height != blk.get_height(): + # WTF?! It should never happen. + self.state = PeerState.ERROR + return + synced = height + break + except TransactionDoesNotExist: + not_synced = height + + self.log.debug('find_best_common_block finished synced not_synced', synced=synced, not_synced=not_synced) + self.synced_height = synced + + def get_peer_block_hashes(self, heights: list[int]) -> Deferred[list[tuple[int, bytes]]]: + """ Returns the peer's block hashes in the given heights. + """ + if self._deferred_peer_block_hashes is not None: + raise Exception('latest_deferred is not None') + self.send_get_peer_block_hashes(heights) + self._deferred_peer_block_hashes = Deferred() + return self._deferred_peer_block_hashes + + def send_get_peer_block_hashes(self, heights: list[int]) -> None: + """ Send a GET-PEER-BLOCK-HASHES message. + """ + payload = json.dumps(heights) + self.send_message(ProtocolMessages.GET_PEER_BLOCK_HASHES, payload) + + def handle_get_peer_block_hashes(self, payload: str) -> None: + """ Handle a GET-PEER-BLOCK-HASHES message. + """ + assert self.tx_storage.indexes is not None + heights = json.loads(payload) + if len(heights) > 20: + self.protocol.send_error_and_close_connection('GET-PEER-BLOCK-HASHES: too many heights') + return + data = [] + for h in heights: + blk_hash = self.tx_storage.indexes.height.get(h) + if blk_hash is None: + break + blk = self.tx_storage.get_transaction(blk_hash) + if blk.get_metadata().voided_by: + # The height index might have voided blocks when there is a draw. + # Let's try again soon. + self.reactor.callLater(3, self.handle_get_peer_block_hashes, payload) + return + data.append((h, blk_hash.hex())) + payload = json.dumps(data) + self.send_message(ProtocolMessages.PEER_BLOCK_HASHES, payload) + + def handle_peer_block_hashes(self, payload: str) -> None: + """ Handle a PEER-BLOCK-HASHES message. + """ + data = json.loads(payload) + data = [(h, bytes.fromhex(block_hash)) for (h, block_hash) in data] + deferred = self._deferred_peer_block_hashes + self._deferred_peer_block_hashes = None + if deferred: + deferred.callback(data) + + def send_get_next_blocks(self, start_hash: bytes, end_hash: bytes) -> None: + """ Send a PEER-BLOCK-HASHES message. + """ + payload = json.dumps(dict( + start_hash=start_hash.hex(), + end_hash=end_hash.hex(), + )) + self.send_message(ProtocolMessages.GET_NEXT_BLOCKS, payload) + self.receiving_stream = True + + def handle_get_next_blocks(self, payload: str) -> None: + """ Handle a GET-NEXT-BLOCKS message. + """ + self.log.debug('handle GET-NEXT-BLOCKS') + if self._is_streaming: + self.protocol.send_error_and_close_connection('GET-NEXT-BLOCKS received before previous one finished') + return + data = json.loads(payload) + self.send_next_blocks( + start_hash=bytes.fromhex(data['start_hash']), + end_hash=bytes.fromhex(data['end_hash']), + ) + + def send_next_blocks(self, start_hash: bytes, end_hash: bytes) -> None: + """ Send a NEXT-BLOCKS message. + """ + self.log.debug('start NEXT-BLOCKS stream') + try: + blk = self.tx_storage.get_transaction(start_hash) + except TransactionDoesNotExist: + # In case the tx does not exist we send a NOT-FOUND message + self.log.debug('requested start_hash not found', start_hash=start_hash.hex()) + self.send_message(ProtocolMessages.NOT_FOUND, start_hash.hex()) + return + assert isinstance(blk, Block) + assert blk.hash is not None + # XXX: it is not an error for the other peer to request a voided block, we'll pretend it doesn't exist, butf + blk_meta = blk.get_metadata() + if blk_meta.voided_by: + # In case the tx does not exist we send a NOT-FOUND message + self.log.debug('requested start_hash is voided, continue anyway', start_hash=start_hash.hex(), + voided_by=[i.hex() for i in blk_meta.voided_by]) + # XXX: we want to be able to not send this, but we do because the remote node could get stuck otherwise + # (tracked by issue #711) + # self.send_message(ProtocolMessages.NOT_FOUND, start_hash.hex()) + # return + if self.blockchain_streaming is not None and self.blockchain_streaming.is_running: + self.blockchain_streaming.stop() + self.blockchain_streaming = BlockchainStreaming(self, blk, end_hash, limit=self.DEFAULT_STREAMING_LIMIT) + self.blockchain_streaming.start() + + def send_blocks(self, blk: Block) -> None: + """ Send a BLOCKS message. + + This message is called from a streamer for each block to being sent. + """ + payload = base64.b64encode(bytes(blk)).decode('ascii') + self.send_message(ProtocolMessages.BLOCKS, payload) + + def send_blocks_end(self, response_code: StreamEnd) -> None: + """ Send a BLOCKS-END message. + + This message marks the end of a stream of BLOCKS messages. It is mandatory to send any BLOCKS messages before, + in which case it would be an "empty" stream. + """ + payload = str(int(response_code)) + self.log.debug('send BLOCKS-END', payload=payload) + self.send_message(ProtocolMessages.BLOCKS_END, payload) + + def handle_blocks_end(self, payload: str) -> None: + """ Handle a BLOCKS-END message. + + This is important to know that the other peer will not send any BLOCKS messages anymore as a response to a + previous command. + """ + self.log.debug('recv BLOCKS-END', payload=payload, size=self._blk_size) + + response_code = StreamEnd(int(payload)) + self.receiving_stream = False + assert self.protocol.connections is not None + + if self.state is not PeerState.SYNCING_BLOCKS: + self.log.error('unexpected BLOCKS-END', state=self.state) + self.protocol.send_error_and_close_connection('Not expecting to receive BLOCKS-END message') + return + + self.log.debug('block streaming ended', reason=str(response_code)) + + def handle_blocks(self, payload: str) -> None: + """ Handle a BLOCKS message. + """ + if self.state is not PeerState.SYNCING_BLOCKS: + self.log.error('unexpected BLOCK', state=self.state) + self.protocol.send_error_and_close_connection('Not expecting to receive BLOCK message') + return + + assert self.protocol.connections is not None + + blk_bytes = base64.b64decode(payload) + blk = tx_or_block_from_bytes(blk_bytes) + if not isinstance(blk, Block): + # Not a block. Punish peer? + return + blk.storage = self.tx_storage + + assert blk.hash is not None + + self._blk_received += 1 + if self._blk_received > self._blk_max_quantity + 1: + self.log.warn('too many blocks received', last_block=blk.hash_hex) + # Too many blocks. Punish peer? + self.state = PeerState.ERROR + return + + if self.partial_vertex_exists(blk.hash): + # We reached a block we already have. Skip it. + self._blk_prev_hash = blk.hash + self._blk_repeated += 1 + if self.receiving_stream and self._blk_repeated > self.max_repeated_blocks: + self.log.debug('repeated block received', total_repeated=self._blk_repeated) + self.handle_many_repeated_blocks() + + # basic linearity validation, crucial for correctly predicting the next block's height + if self._blk_stream_reverse: + if self._last_received_block and blk.hash != self._last_received_block.get_block_parent_hash(): + self.handle_invalid_block('received block is not parent of previous block') + return + else: + if self._last_received_block and blk.get_block_parent_hash() != self._last_received_block.hash: + self.handle_invalid_block('received block is not child of previous block') + return + + try: + # this methods takes care of checking if the block already exists, + # it will take care of doing at least a basic validation + # self.log.debug('add new block', block=blk.hash_hex) + if self.partial_vertex_exists(blk.hash): + # XXX: early terminate? + self.log.debug('block early terminate?', blk_id=blk.hash.hex()) + else: + self.log.debug('block received', blk_id=blk.hash.hex()) + self.on_new_tx(blk, propagate_to_peers=False, quiet=True) + except HathorError: + self.handle_invalid_block(exc_info=True) + return + else: + self._last_received_block = blk + self._blk_repeated = 0 + # XXX: debugging log, maybe add timing info + if self._blk_received % 500 == 0: + self.log.debug('block streaming in progress', blocks_received=self._blk_received) + + def handle_invalid_block(self, msg: Optional[str] = None, *, exc_info: bool = False) -> None: + """ Call this method when receiving an invalid block. + """ + kwargs: dict[str, Any] = {} + if msg is not None: + kwargs['error'] = msg + if exc_info: + kwargs['exc_info'] = True + self.log.warn('invalid new block', **kwargs) + # Invalid block?! + self.state = PeerState.ERROR + + def handle_many_repeated_blocks(self) -> None: + """ Call this when a stream sends too many blocks in sequence that we already have. + """ + self.send_stop_block_streaming() + self.receiving_stream = False + + def send_stop_block_streaming(self) -> None: + """ Send a STOP-BLOCK-STREAMING message. + + This asks the other peer to stop a running block stream. + """ + self.send_message(ProtocolMessages.STOP_BLOCK_STREAMING) + + def handle_stop_block_streaming(self, payload: str) -> None: + """ Handle a STOP-BLOCK-STREAMING message. + + This means the remote peer wants to stop the current block stream. + """ + if not self.blockchain_streaming or not self._is_streaming: + self.log.debug('got stop streaming message with no streaming running') + return + + self.log.debug('got stop streaming message') + self.blockchain_streaming.stop() + self.blockchain_streaming = None + + def get_peer_best_block(self) -> Deferred[dict[str, Any]]: + """ Async call to get the remote peer's best block. + """ + if self._deferred_best_block is not None: + raise Exception('latest_deferred is not None') + + self.send_get_best_block() + self._deferred_best_block = Deferred() + return self._deferred_best_block + + def send_get_best_block(self) -> None: + """ Send a GET-BEST-BLOCK messsage. + """ + self.send_message(ProtocolMessages.GET_BEST_BLOCK) + + def handle_get_best_block(self, payload: str) -> None: + """ Handle a GET-BEST-BLOCK message. + """ + best_block = self.tx_storage.get_best_block() + meta = best_block.get_metadata() + data = {'block': best_block.hash_hex, 'height': meta.height} + self.send_message(ProtocolMessages.BEST_BLOCK, json.dumps(data)) + + def handle_best_block(self, payload: str) -> None: + """ Handle a BEST-BLOCK message. + """ + data = json.loads(payload) + assert self.protocol.connections is not None + self.log.debug('got best block', **data) + data['block'] = bytes.fromhex(data['block']) + + deferred = self._deferred_best_block + self._deferred_best_block = None + if deferred: + deferred.callback(data) + + def _setup_tx_streaming(self): + """ Common setup before starting an outgoing transaction stream. + """ + self._tx_received = 0 + self._tx_max_quantity = DEFAULT_STREAMING_LIMIT # XXX: maybe this is redundant + # XXX: what else can we add for checking if everything is going well? + + def send_get_transactions_bfs(self, start_from: list[bytes], until_first_block: bytes) -> None: + """ Send a GET-TRANSACTIONS-BFS message. + + This will request a BFS of all transactions starting from start_from list and walking back into parents/inputs. + + The start_from list can contain blocks, but they won't be sent. For example if a block B1 has T1 and T2 as + transaction parents, start_from=[B1] and start_from=[T1, T2] will have the same result. + + The stop condition is reaching transactions/inputs that have a first_block of height less or equal than the + height of until_first_block. The other peer will return an empty response if it doesn't have any of the + transactions in start_from or if it doesn't have the until_first_block block. + """ + self._setup_tx_streaming() + start_from_hexlist = [tx.hex() for tx in start_from] + until_first_block_hex = until_first_block.hex() + self.log.debug('send_get_transactions_bfs', start_from=start_from_hexlist, last_block=until_first_block_hex) + payload = json.dumps(dict( + start_from=start_from_hexlist, + until_first_block=until_first_block_hex, + )) + self.send_message(ProtocolMessages.GET_TRANSACTIONS_BFS, payload) + self.receiving_stream = True + + def handle_get_transactions_bfs(self, payload: str) -> None: + """ Handle a GET-TRANSACTIONS-BFS message. + """ + if self._is_streaming: + self.log.warn('ignore GET-TRANSACTIONS-BFS, already streaming') + return + data = json.loads(payload) + # XXX: todo verify this limit while parsing the payload. + start_from = data['start_from'] + if len(start_from) > MAX_GET_TRANSACTIONS_BFS_LEN: + self.log.error('too many transactions in GET-TRANSACTIONS-BFS', state=self.state) + self.protocol.send_error_and_close_connection('Too many transactions in GET-TRANSACTIONS-BFS') + return + self.log.debug('handle_get_transactions_bfs', **data) + start_from = [bytes.fromhex(tx_hash_hex) for tx_hash_hex in start_from] + until_first_block = bytes.fromhex(data['until_first_block']) + self.send_transactions_bfs(start_from, until_first_block) + + def send_transactions_bfs(self, start_from: list[bytes], until_first_block: bytes) -> None: + """ Start a transactions BFS stream. + """ + start_from_txs = [] + for start_from_hash in start_from: + try: + start_from_txs.append(self.tx_storage.get_transaction(start_from_hash)) + except TransactionDoesNotExist: + # In case the tx does not exist we send a NOT-FOUND message + self.log.debug('requested start_from_hash not found', start_from_hash=start_from_hash.hex()) + self.send_message(ProtocolMessages.NOT_FOUND, start_from_hash.hex()) + return + if not self.tx_storage.transaction_exists(until_first_block): + # In case the tx does not exist we send a NOT-FOUND message + self.log.debug('requested until_first_block not found', until_first_block=until_first_block.hex()) + self.send_message(ProtocolMessages.NOT_FOUND, until_first_block.hex()) + return + if self.transactions_streaming is not None and self.transactions_streaming.is_running: + self.transactions_streaming.stop() + self.transactions_streaming = TransactionsStreaming(self, start_from_txs, until_first_block, + limit=self.DEFAULT_STREAMING_LIMIT) + self.transactions_streaming.start() + + def send_transaction(self, tx: Transaction) -> None: + """ Send a TRANSACTION message. + """ + # payload = bytes(tx).hex() # fails for big transactions + payload = base64.b64encode(bytes(tx)).decode('ascii') + self.send_message(ProtocolMessages.TRANSACTION, payload) + + def send_transactions_end(self, response_code: StreamEnd) -> None: + """ Send a TRANSACTIONS-END message. + """ + payload = str(int(response_code)) + self.log.debug('send TRANSACTIONS-END', payload=payload) + self.send_message(ProtocolMessages.TRANSACTIONS_END, payload) + + def handle_transactions_end(self, payload: str) -> None: + """ Handle a TRANSACTIONS-END message. + """ + self.log.debug('recv TRANSACTIONS-END', payload=payload, size=self._blk_size) + + response_code = StreamEnd(int(payload)) + self.receiving_stream = False + assert self.protocol.connections is not None + + if self.state is not PeerState.SYNCING_TRANSACTIONS: + self.log.error('unexpected TRANSACTIONS-END', state=self.state) + self.protocol.send_error_and_close_connection('Not expecting to receive TRANSACTIONS-END message') + return + + self.log.debug('transaction streaming ended', reason=str(response_code)) + + def handle_transaction(self, payload: str) -> None: + """ Handle a TRANSACTION message. + """ + assert self.protocol.connections is not None + + # tx_bytes = bytes.fromhex(payload) + tx_bytes = base64.b64decode(payload) + tx = tx_or_block_from_bytes(tx_bytes) + assert tx.hash is not None + if not isinstance(tx, Transaction): + self.log.warn('not a transaction', hash=tx.hash_hex) + # Not a transaction. Punish peer? + return + + self._tx_received += 1 + if self._tx_received > self._tx_max_quantity + 1: + self.log.warn('too many txs received') + self.state = PeerState.ERROR + return + + try: + # this methods takes care of checking if the tx already exists, it will take care of doing at least + # a basic validation + # self.log.debug('add new tx', tx=tx.hash_hex) + if self.partial_vertex_exists(tx.hash): + # XXX: early terminate? + self.log.debug('tx early terminate?', tx_id=tx.hash.hex()) + else: + self.log.debug('tx received', tx_id=tx.hash.hex()) + self.on_new_tx(tx, propagate_to_peers=False, quiet=True, reject_locked_reward=True) + except HathorError: + self.log.warn('invalid new tx', exc_info=True) + # Invalid block?! + # Invalid transaction?! + # Maybe stop syncing and punish peer. + self.state = PeerState.ERROR + return + else: + # XXX: debugging log, maybe add timing info + if self._tx_received % 100 == 0: + self.log.debug('tx streaming in progress', txs_received=self._tx_received) + + @inlineCallbacks + def get_tx(self, tx_id: bytes) -> Generator[Deferred, Any, BaseTransaction]: + """ Async method to get a transaction from the db/cache or to download it. + """ + tx = self._get_tx_cache.get(tx_id) + if tx is not None: + self.log.debug('tx in cache', tx=tx_id.hex()) + return tx + try: + tx = self.tx_storage.get_transaction(tx_id) + except TransactionDoesNotExist: + tx = yield self.get_data(tx_id, 'mempool') + assert tx is not None + if tx.hash != tx_id: + self.protocol.send_error_and_close_connection(f'DATA mempool {tx_id.hex()} hash mismatch') + raise + return tx + + def get_data(self, tx_id: bytes, origin: str) -> Deferred[BaseTransaction]: + """ Async method to request a tx by id. + """ + # TODO: deal with stale `get_data` calls + if origin != 'mempool': + raise ValueError(f'origin={origin} not supported, only origin=mempool is supported') + deferred = self._deferred_txs.get(tx_id, None) + if deferred is None: + deferred = self._deferred_txs[tx_id] = Deferred() + self.send_get_data(tx_id, origin=origin) + self.log.debug('get_data of new tx_id', deferred=deferred, key=tx_id.hex()) + else: + # XXX: can we re-use deferred objects like this? + self.log.debug('get_data of same tx_id, reusing deferred', deferred=deferred, key=tx_id.hex()) + return deferred + + def _on_get_data(self, tx: BaseTransaction, origin: str) -> None: + """ Called when a requested tx is received. + """ + assert tx.hash is not None + deferred = self._deferred_txs.pop(tx.hash, None) + if deferred is None: + # Peer sent the wrong transaction?! + # XXX: ban peer? + self.protocol.send_error_and_close_connection(f'DATA {origin}: with tx that was not requested') + return + self.log.debug('get_data fulfilled', deferred=deferred, key=tx.hash.hex()) + self._get_tx_cache[tx.hash] = tx + if len(self._get_tx_cache) > self._get_tx_cache_maxsize: + self._get_tx_cache.popitem(last=False) + deferred.callback(tx) + + def send_data(self, tx: BaseTransaction, *, origin: str = '') -> None: + """ Send a DATA message. + """ + self.log.debug('send tx', tx=tx.hash_hex) + tx_payload = base64.b64encode(tx.get_struct()).decode('ascii') + if not origin: + payload = tx_payload + else: + payload = ' '.join([origin, tx_payload]) + self.send_message(ProtocolMessages.DATA, payload) + + def send_get_data(self, txid: bytes, *, origin: Optional[str] = None) -> None: + """ Send a GET-DATA message for a given txid. + """ + data = { + 'txid': txid.hex(), + } + if origin is not None: + data['origin'] = origin + payload = json.dumps(data) + self.send_message(ProtocolMessages.GET_DATA, payload) + + def handle_get_data(self, payload: str) -> None: + """ Handle a GET-DATA message. + """ + data = json.loads(payload) + txid_hex = data['txid'] + origin = data.get('origin', '') + # self.log.debug('handle_get_data', payload=hash_hex) + try: + tx = self.protocol.node.tx_storage.get_transaction(bytes.fromhex(txid_hex)) + self.send_data(tx, origin=origin) + except TransactionDoesNotExist: + # In case the tx does not exist we send a NOT-FOUND message + self.send_message(ProtocolMessages.NOT_FOUND, txid_hex) + + def handle_data(self, payload: str) -> None: + """ Handle a DATA message. + """ + if not payload: + return + part1, _, part2 = payload.partition(' ') + if not part2: + origin = None + data = base64.b64decode(part1) + else: + origin = part1 + data = base64.b64decode(part2) + + try: + tx = tx_or_block_from_bytes(data) + except struct.error: + # Invalid data for tx decode + return + + if origin: + if origin != 'mempool': + # XXX: ban peer? + self.protocol.send_error_and_close_connection(f'DATA {origin}: unsupported origin') + return + assert tx is not None + self._on_get_data(tx, origin) + return + + assert tx is not None + assert tx.hash is not None + if self.protocol.node.tx_storage.get_genesis(tx.hash): + # We just got the data of a genesis tx/block. What should we do? + # Will it reduce peer reputation score? + return + + tx.storage = self.protocol.node.tx_storage + assert tx.hash is not None + + if self.partial_vertex_exists(tx.hash): + # transaction already added to the storage, ignore it + # XXX: maybe we could add a hash blacklist and punish peers propagating known bad txs + self.manager.tx_storage.compare_bytes_with_local_tx(tx) + return + else: + # If we have not requested the data, it is a new transaction being propagated + # in the network, thus, we propagate it as well. + if tx.can_validate_full(): + self.log.info('tx received in real time from peer', tx=tx.hash_hex, peer=self.protocol.get_peer_id()) + self.on_new_tx(tx, propagate_to_peers=True) + else: + self.log.info('skipping tx received in real time from peer', + tx=tx.hash_hex, peer=self.protocol.get_peer_id()) + + def on_new_tx(self, tx: BaseTransaction, *, quiet: bool = False, propagate_to_peers: bool = True, + sync_checkpoints: bool = False, reject_locked_reward: bool = True) -> bool: + """ This method handle everything related to adding potentially partially validated transactions. + + Call this instead of HathorManager.on_new_tx, unless `tx` must be fully validated (for example when receiving + realtime DATA pushes). + """ + + assert self.tx_storage.indexes is not None + assert tx.hash is not None + + # XXX: "refresh" the transaction so there isn't a duplicate in memory + if self.partial_vertex_exists(tx.hash): + with self.tx_storage.allow_partially_validated_context(): + self.tx_storage.compare_bytes_with_local_tx(tx) + tx = self.tx_storage.get_transaction(tx.hash) + assert tx.hash is not None + + tx.storage = self.tx_storage + + with self.tx_storage.allow_partially_validated_context(): + metadata = tx.get_metadata() + + if metadata.validation.is_fully_connected() or tx.can_validate_full(): + if not self.manager.on_new_tx(tx): + return False + elif sync_checkpoints: + assert self.tx_storage.indexes.deps is not None + with self.tx_storage.allow_partially_validated_context(): + metadata.children = self.tx_storage.indexes.deps.known_children(tx) + try: + tx.validate_checkpoint(self.manager.checkpoints) + except HathorError: + self.log.warn('on_new_tx(): checkpoint validation failed', tx=tx.hash_hex, exc_info=True) + return False + self.tx_storage.save_transaction(tx) + self.tx_storage.indexes.deps.add_tx(tx) + self.manager.log_new_object(tx, 'new {} partially accepted while syncing checkpoints', quiet=quiet) + else: + assert self.tx_storage.indexes.deps is not None + with self.tx_storage.allow_partially_validated_context(): + if isinstance(tx, Block) and not tx.has_basic_block_parent(): + self.log.warn('on_new_tx(): block parent needs to be at least basic-valid', tx=tx.hash_hex) + return False + if not tx.validate_basic(): + self.log.warn('on_new_tx(): basic validation failed', tx=tx.hash_hex) + return False + + # The method below adds the tx as a child of the parents + # This needs to be called right before the save because we were adding the children + # in the tx parents even if the tx was invalid (failing the verifications above) + # then I would have a children that was not in the storage + self.tx_storage.save_transaction(tx) + self.tx_storage.indexes.deps.add_tx(tx) + self.manager.log_new_object(tx, 'new {} partially accepted', quiet=quiet) + + if self.tx_storage.indexes.deps is not None: + self.tx_storage.indexes.deps.remove_from_needed_index(tx.hash) + + if self.tx_storage.indexes.deps is not None: + try: + self.manager.sync_v2_step_validations([tx], quiet=quiet) + except (AssertionError, HathorError): + self.log.warn('on_new_tx(): step validations failed', tx=tx.hash_hex, exc_info=True) + return False + + return True diff --git a/hathor/p2p/sync_v2/mempool.py b/hathor/p2p/sync_v2/mempool.py new file mode 100644 index 000000000..da7f5d040 --- /dev/null +++ b/hathor/p2p/sync_v2/mempool.py @@ -0,0 +1,121 @@ +# Copyright 2020 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import deque +from typing import TYPE_CHECKING, Any, Generator, Optional + +from structlog import get_logger +from twisted.internet.defer import Deferred, inlineCallbacks + +from hathor.transaction import BaseTransaction + +if TYPE_CHECKING: + from hathor.p2p.sync_v2.manager import NodeBlockSync + +logger = get_logger() + + +class SyncMempoolManager: + """Manage the sync-v2 mempool with one peer. + """ + def __init__(self, sync_agent: 'NodeBlockSync'): + """Initialize the sync-v2 mempool manager.""" + self.log = logger.new(peer=sync_agent.protocol.get_short_peer_id()) + + # Shortcuts. + self.sync_agent = sync_agent + self.manager = self.sync_agent.manager + self.tx_storage = self.manager.tx_storage + self.reactor = self.sync_agent.reactor + + # Set of tips we know but couldn't add to the DAG yet. + self.missing_tips: set[bytes] = set() + + # Maximum number of items in the DFS. + self.MAX_STACK_LENGTH: int = 1000 + + # Whether the mempool algorithm is running + self._is_running = False + + def is_running(self) -> bool: + """Whether the sync-mempool is currently running.""" + return self._is_running + + def run(self) -> None: + """Starts _run in, won't start again if already running.""" + if self.is_running(): + self.log.warn('already started') + return + self._is_running = True + self.reactor.callLater(0, self._run) + + @inlineCallbacks + def _run(self) -> Generator[Deferred, Any, None]: + try: + yield self._unsafe_run() + finally: + # sync_agent.run_sync will start it again when needed + self._is_running = False + + @inlineCallbacks + def _unsafe_run(self) -> Generator[Deferred, Any, None]: + """Run a single loop of the sync-v2 mempool.""" + if not self.missing_tips: + # No missing tips? Let's get them! + tx_hashes: list[bytes] = yield self.sync_agent.get_tips() + self.missing_tips.update(h for h in tx_hashes if not self.tx_storage.transaction_exists(h)) + + while self.missing_tips: + self.log.debug('We have missing tips! Let\'s start!', missing_tips=[x.hex() for x in self.missing_tips]) + tx_id = next(iter(self.missing_tips)) + tx: BaseTransaction = yield self.sync_agent.get_tx(tx_id) + # Stack used by the DFS in the dependencies. + # We use a deque for performance reasons. + self.log.debug('start mempool DSF', tx=tx.hash_hex) + yield self._dfs(deque([tx])) + + @inlineCallbacks + def _dfs(self, stack: deque[BaseTransaction]) -> Generator[Deferred, Any, None]: + """DFS method.""" + while stack: + tx = stack[-1] + self.log.debug('step mempool DSF', tx=tx.hash_hex, stack_len=len(stack)) + missing_dep = self._next_missing_dep(tx) + if missing_dep is None: + self.log.debug(r'No dependencies missing! \o/') + self._add_tx(tx) + assert tx == stack.pop() + else: + self.log.debug('Iterate in the DFS.', missing_dep=missing_dep.hex()) + tx_dep = yield self.sync_agent.get_tx(missing_dep) + stack.append(tx_dep) + if len(stack) > self.MAX_STACK_LENGTH: + stack.popleft() + + def _next_missing_dep(self, tx: BaseTransaction) -> Optional[bytes]: + """Get the first missing dependency found of tx.""" + assert not tx.is_block + for txin in tx.inputs: + if not self.tx_storage.transaction_exists(txin.tx_id): + return txin.tx_id + for parent in tx.parents: + if not self.tx_storage.transaction_exists(parent): + return parent + return None + + def _add_tx(self, tx: BaseTransaction) -> None: + """Add tx to the DAG.""" + assert tx.hash is not None + self.missing_tips.discard(tx.hash) + self.manager.on_new_tx(tx) diff --git a/hathor/p2p/sync_v2/streamers.py b/hathor/p2p/sync_v2/streamers.py new file mode 100644 index 000000000..968741c65 --- /dev/null +++ b/hathor/p2p/sync_v2/streamers.py @@ -0,0 +1,260 @@ +# Copyright 2021 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import IntFlag +from typing import TYPE_CHECKING, Optional + +from structlog import get_logger +from twisted.internet.interfaces import IConsumer, IDelayedCall, IPushProducer +from zope.interface import implementer + +from hathor.transaction import BaseTransaction, Block, Transaction +from hathor.transaction.storage.traversal import BFSOrderWalk +from hathor.util import verified_cast + +if TYPE_CHECKING: + from hathor.p2p.protocol import HathorProtocol + from hathor.p2p.sync_v2.manager import NodeBlockSync + +logger = get_logger() + +DEFAULT_STREAMING_LIMIT = 1000 + + +class StreamEnd(IntFlag): + END_HASH_REACHED = 0 + NO_MORE_BLOCKS = 1 + LIMIT_EXCEEDED = 2 + STREAM_BECAME_VOIDED = 3 # this will happen when the current chain becomes voided while it is being sent + TX_NOT_CONFIRMED = 4 + + def __str__(self): + if self is StreamEnd.END_HASH_REACHED: + return 'end hash reached' + elif self is StreamEnd.NO_MORE_BLOCKS: + return 'end of blocks, no more blocks to download from this peer' + elif self is StreamEnd.LIMIT_EXCEEDED: + return 'streaming limit exceeded' + elif self is StreamEnd.STREAM_BECAME_VOIDED: + return 'streamed block chain became voided' + elif self is StreamEnd.TX_NOT_CONFIRMED: + return 'streamed reached a tx that is not confirmed' + else: + raise ValueError(f'invalid StreamEnd value: {self.value}') + + +@implementer(IPushProducer) +class _StreamingBase: + def __init__(self, node_sync: 'NodeBlockSync', *, limit: int = DEFAULT_STREAMING_LIMIT): + self.node_sync = node_sync + self.protocol: 'HathorProtocol' = node_sync.protocol + assert self.protocol.transport is not None + self.consumer = verified_cast(IConsumer, self.protocol.transport) + + self.counter = 0 + self.limit = limit + + self.is_running: bool = False + self.is_producing: bool = False + + self.delayed_call: Optional[IDelayedCall] = None + self.log = logger.new(peer=node_sync.protocol.get_short_peer_id()) + + def schedule_if_needed(self) -> None: + """Schedule `send_next` if needed.""" + if not self.is_running: + return + + if not self.is_producing: + return + + if self.delayed_call and self.delayed_call.active(): + return + + self.delayed_call = self.node_sync.reactor.callLater(0, self.send_next) + + def start(self) -> None: + """Start pushing.""" + self.log.debug('start streaming') + assert not self.node_sync._is_streaming + self.node_sync._is_streaming = True + self.is_running = True + self.consumer.registerProducer(self, True) + self.resumeProducing() + + def stop(self) -> None: + """Stop pushing.""" + self.log.debug('stop streaming') + assert self.node_sync._is_streaming + self.is_running = False + self.pauseProducing() + self.consumer.unregisterProducer() + self.node_sync._is_streaming = False + + def send_next(self) -> None: + """Push next block to peer.""" + raise NotImplementedError + + def resumeProducing(self) -> None: + """This method is automatically called to resume pushing data.""" + self.is_producing = True + self.schedule_if_needed() + + def pauseProducing(self) -> None: + """This method is automatically called to pause pushing data.""" + self.is_producing = False + if self.delayed_call and self.delayed_call.active(): + self.delayed_call.cancel() + + def stopProducing(self) -> None: + """This method is automatically called to stop pushing data.""" + self.pauseProducing() + + +class BlockchainStreaming(_StreamingBase): + def __init__(self, node_sync: 'NodeBlockSync', start_block: Block, end_hash: bytes, + *, limit: int = DEFAULT_STREAMING_LIMIT, reverse: bool = False): + super().__init__(node_sync, limit=limit) + + self.start_block = start_block + self.current_block: Optional[Block] = start_block + self.end_hash = end_hash + self.reverse = reverse + + def send_next(self) -> None: + """Push next block to peer.""" + assert self.is_running + assert self.is_producing + assert self.current_block is not None + + cur = self.current_block + assert cur is not None + assert cur.hash is not None + + if cur.hash == self.end_hash: + # only send the last when not reverse + if not self.reverse: + self.log.debug('send next block', blk_id=cur.hash.hex()) + self.node_sync.send_blocks(cur) + self.stop() + self.node_sync.send_blocks_end(StreamEnd.END_HASH_REACHED) + return + + if self.counter >= self.limit: + # only send the last when not reverse + if not self.reverse: + self.log.debug('send next block', blk_id=cur.hash.hex()) + self.node_sync.send_blocks(cur) + self.stop() + self.node_sync.send_blocks_end(StreamEnd.LIMIT_EXCEEDED) + return + + self.counter += 1 + + self.log.debug('send next block', blk_id=cur.hash.hex()) + self.node_sync.send_blocks(cur) + + if self.reverse: + self.current_block = cur.get_block_parent() + else: + self.current_block = cur.get_next_block_best_chain() + + # XXX: don't send the genesis or the current block + if self.current_block is None or self.current_block.is_genesis: + self.stop() + self.node_sync.send_blocks_end(StreamEnd.NO_MORE_BLOCKS) + return + + self.schedule_if_needed() + + +class TransactionsStreaming(_StreamingBase): + """Streams all transactions confirmed by the given block, from right to left (decreasing timestamp). + """ + + def __init__(self, node_sync: 'NodeBlockSync', start_from: list[BaseTransaction], last_block_hash: bytes, + *, limit: int = DEFAULT_STREAMING_LIMIT): + # XXX: is limit needed for tx streaming? Or let's always send all txs for + # a block? Very unlikely we'll reach this limit + super().__init__(node_sync, limit=limit) + + assert len(start_from) > 0 + assert start_from[0].storage is not None + self.storage = start_from[0].storage + self.last_block_hash = last_block_hash + self.last_block_height = 0 + + self.bfs = BFSOrderWalk(self.storage, is_dag_verifications=True, is_dag_funds=True, is_left_to_right=False) + self.iter = self.bfs.run(start_from, skip_root=False) + + def start(self) -> None: + super().start() + last_blk = self.storage.get_transaction(self.last_block_hash) + assert isinstance(last_blk, Block) + self.last_block_height = last_blk.get_height() + + # TODO: make this generic too? + def send_next(self) -> None: + """Push next transaction to peer.""" + assert self.is_running + assert self.is_producing + + try: + cur = next(self.iter) + except StopIteration: + # nothing more to send + self.stop() + self.node_sync.send_transactions_end(StreamEnd.END_HASH_REACHED) + return + + if cur.is_block: + if cur.hash == self.last_block_hash: + self.bfs.skip_neighbors(cur) + self.schedule_if_needed() + return + + assert isinstance(cur, Transaction) + assert cur.hash is not None + + cur_metadata = cur.get_metadata() + if cur_metadata.first_block is None: + self.log.debug('reached a tx that is not confirming, continuing anyway') + # XXX: related to issue #711 + # self.stop() + # self.node_sync.send_transactions_end(StreamEnd.TX_NOT_CONFIRMED) + # return + else: + assert cur_metadata.first_block is not None + first_blk_meta = self.storage.get_metadata(cur_metadata.first_block) + assert first_blk_meta is not None + confirmed_by_height = first_blk_meta.height + assert confirmed_by_height is not None + if confirmed_by_height <= self.last_block_height: + # got to a tx that is confirmed by the given last-block or an older block + self.log.debug('tx confirmed by block older than last_block', tx=cur.hash_hex, + confirmed_by_height=confirmed_by_height, last_block_height=self.last_block_height) + self.bfs.skip_neighbors(cur) + self.schedule_if_needed() + return + + self.log.debug('send next transaction', tx_id=cur.hash.hex()) + self.node_sync.send_transaction(cur) + + self.counter += 1 + if self.counter >= self.limit: + self.stop() + self.node_sync.send_transactions_end(StreamEnd.LIMIT_EXCEEDED) + return + + self.schedule_if_needed() diff --git a/hathor/p2p/sync_version.py b/hathor/p2p/sync_version.py index 8c9ab7ee6..8db49918a 100644 --- a/hathor/p2p/sync_version.py +++ b/hathor/p2p/sync_version.py @@ -24,7 +24,7 @@ class SyncVersion(Enum): # on. V1 = 'v1' V1_1 = 'v1.1' - V2 = 'v2-fake' # uses sync-v1 to mock sync-v2 + V2 = 'v2' def __str__(self): return f'sync-{self.value}' diff --git a/hathor/simulator/fake_connection.py b/hathor/simulator/fake_connection.py index 2bb061a0d..9db893559 100644 --- a/hathor/simulator/fake_connection.py +++ b/hathor/simulator/fake_connection.py @@ -41,7 +41,8 @@ def getPeerCertificate(self) -> X509: class FakeConnection: - def __init__(self, manager1: 'HathorManager', manager2: 'HathorManager', *, latency: float = 0): + def __init__(self, manager1: 'HathorManager', manager2: 'HathorManager', *, latency: float = 0, + autoreconnect: bool = False): """ :param: latency: Latency between nodes in seconds """ @@ -51,20 +52,14 @@ def __init__(self, manager1: 'HathorManager', manager2: 'HathorManager', *, late self.manager2 = manager2 self.latency = latency - self.is_connected = True - - self._proto1 = manager1.connections.server_factory.buildProtocol(HostnameAddress(b'fake', 0)) - self._proto2 = manager2.connections.client_factory.buildProtocol(HostnameAddress(b'fake', 0)) - - self.tr1 = HathorStringTransport(self._proto2.my_peer) - self.tr2 = HathorStringTransport(self._proto1.my_peer) + self.autoreconnect = autoreconnect + self.is_connected = False self._do_buffering = True self._buf1: deque[str] = deque() self._buf2: deque[str] = deque() - self._proto1.makeConnection(self.tr1) - self._proto2.makeConnection(self.tr2) + self.reconnect() @property def proto1(self): @@ -79,6 +74,35 @@ def disable_idle_timeout(self): self._proto1.disable_idle_timeout() self._proto2.disable_idle_timeout() + def is_both_synced(self) -> bool: + """Short-hand check that can be used to make "step loops" without having to guess the number of iterations.""" + from hathor.p2p.states.ready import ReadyState + conn1_aborting = self._proto1.aborting + conn2_aborting = self._proto2.aborting + if conn1_aborting or conn2_aborting: + self.log.debug('conn aborting', conn1_aborting=conn1_aborting, conn2_aborting=conn2_aborting) + return False + state1 = self._proto1.state + state2 = self._proto2.state + state1_is_ready = isinstance(state1, ReadyState) + state2_is_ready = isinstance(state2, ReadyState) + if not state1_is_ready or not state2_is_ready: + self.log.debug('peer not ready', peer1_ready=state1_is_ready, peer2_ready=state2_is_ready) + return False + assert isinstance(state1, ReadyState) # mypy can't infer this from the above + assert isinstance(state2, ReadyState) # mypy can't infer this from the above + state1_is_errored = state1.sync_agent.is_errored() + state2_is_errored = state2.sync_agent.is_errored() + if state1_is_errored or state2_is_errored: + self.log.debug('peer errored', peer1_errored=state1_is_errored, peer2_errored=state2_is_errored) + return False + state1_is_synced = state1.sync_agent.is_synced() + state2_is_synced = state2.sync_agent.is_synced() + if not state1_is_synced or not state2_is_synced: + self.log.debug('peer not synced', peer1_synced=state1_is_synced, peer2_synced=state2_is_synced) + return False + return True + def can_step(self) -> bool: """Short-hand check that can be used to make "step loops" without having to guess the number of iterations.""" from hathor.p2p.states.ready import ReadyState @@ -96,13 +120,13 @@ def can_step(self) -> bool: return True assert isinstance(state1, ReadyState) # mypy can't infer this from the above assert isinstance(state2, ReadyState) # mypy can't infer this from the above - state1_is_errored = state1.sync_manager.is_errored() - state2_is_errored = state2.sync_manager.is_errored() + state1_is_errored = state1.sync_agent.is_errored() + state2_is_errored = state2.sync_agent.is_errored() if state1_is_errored or state2_is_errored: self.log.debug('peer errored', peer1_errored=state1_is_errored, peer2_errored=state2_is_errored) return False - state1_is_synced = state1.sync_manager.is_synced() - state2_is_synced = state2.sync_manager.is_synced() + state1_is_synced = state1.sync_agent.is_synced() + state2_is_synced = state2.sync_agent.is_synced() if not state1_is_synced or not state2_is_synced: self.log.debug('peer not synced', peer1_synced=state1_is_synced, peer2_synced=state2_is_synced) return True @@ -155,6 +179,9 @@ def run_one_step(self, debug=False, force=False): if debug: self.log.debug('[2->1] delivered', line=line2) + if self.autoreconnect and self._proto1.aborting and self._proto2.aborting: + self.reconnect() + return True def run_until_empty(self, max_steps: Optional[int] = None, debug: bool = False, force: bool = False) -> None: @@ -178,6 +205,20 @@ def disconnect(self, reason): self._proto2.connectionLost(reason) self.is_connected = False + def reconnect(self) -> None: + from twisted.python.failure import Failure + if self.is_connected: + self.disconnect(Failure(Exception('forced reconnection'))) + self._buf1.clear() + self._buf2.clear() + self._proto1 = self.manager1.connections.server_factory.buildProtocol(HostnameAddress(b'fake', 0)) + self._proto2 = self.manager2.connections.client_factory.buildProtocol(HostnameAddress(b'fake', 0)) + self.tr1 = HathorStringTransport(self._proto2.my_peer) + self.tr2 = HathorStringTransport(self._proto1.my_peer) + self._proto1.makeConnection(self.tr1) + self._proto2.makeConnection(self.tr2) + self.is_connected = True + def is_empty(self): if self._do_buffering and (self._buf1 or self._buf2): return False diff --git a/hathor/simulator/trigger.py b/hathor/simulator/trigger.py index 2df07a457..2d54831f5 100644 --- a/hathor/simulator/trigger.py +++ b/hathor/simulator/trigger.py @@ -13,9 +13,10 @@ # limitations under the License. from abc import ABC, abstractmethod -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Callable if TYPE_CHECKING: + from hathor.simulator.fake_connection import FakeConnection from hathor.simulator.miner import AbstractMiner from hathor.simulator.tx_generator import RandomTransactionGenerator from hathor.wallet import BaseWallet @@ -71,3 +72,33 @@ def reset(self) -> None: def should_stop(self) -> bool: diff = self.tx_generator.transactions_found - self.initial_counter return diff >= self.quantity + + +class StopWhenTrue(Trigger): + """Stop the simulation when a function returns true.""" + def __init__(self, fn: Callable[[], bool]) -> None: + self.fn = fn + + def should_stop(self) -> bool: + return self.fn() + + +class StopWhenSynced(Trigger): + """Stop the simulation when both agents runnning on a connection report that they have synced.""" + def __init__(self, connection: 'FakeConnection') -> None: + self.connection = connection + + def should_stop(self) -> bool: + return self.connection.is_both_synced() + + +class All(Trigger): + """Aggregator that returns True when all sub-triggers return True. + + XXX: note that not all sub-triggers will be called, this will short-circuit, in order, if one sub-trigger returns + False, which follows the same behavior of builtins.all""" + def __init__(self, sub_triggers: list[Trigger]) -> None: + self._sub_triggers = sub_triggers + + def should_stop(self) -> bool: + return all(trigger.should_stop() for trigger in self._sub_triggers) diff --git a/hathor/transaction/base_transaction.py b/hathor/transaction/base_transaction.py index f87f1193e..10c4dfd0c 100644 --- a/hathor/transaction/base_transaction.py +++ b/hathor/transaction/base_transaction.py @@ -534,6 +534,7 @@ def validate_full(self, skip_block_weight_verification: bool = False, sync_check from hathor.transaction.transaction_metadata import ValidationState meta = self.get_metadata() + # skip full validation when it is a checkpoint if meta.validation.is_checkpoint(): self.set_validation(ValidationState.CHECKPOINT_FULL) diff --git a/hathor/transaction/block.py b/hathor/transaction/block.py index 51ffacc55..699c6e763 100644 --- a/hathor/transaction/block.py +++ b/hathor/transaction/block.py @@ -147,7 +147,18 @@ def _get_previous_feature_activation_bit_counts(self) -> list[int]: return parent_block.get_feature_activation_bit_counts() def get_next_block_best_chain_hash(self) -> Optional[bytes]: - """Return the hash of the next (child/left-to-right) block in the best blockchain. + """Return the hash of the next block in the best blockchain. The blockchain is + written from left-to-righ (->), meaning the next block has a greater height. + In a timeline, the parent block (left) comes first of the child (right). + + +-----------+ +-----------+ +-----------+ + --->| height: 1 |------>| height: 2 |------>| height: 3 |---> + | parent | | current | | child | + +-----------+ +-----------+ +-----------+ + left right + past future + + "left-to-right" """ assert self.storage is not None meta = self.get_metadata() @@ -168,7 +179,18 @@ def get_next_block_best_chain_hash(self) -> Optional[bytes]: return candidates[0] def get_next_block_best_chain(self) -> Optional['Block']: - """Return the next (child/left-to-right)block in the best blockchain. + """Return the next block in the best blockchain. The blockchain is written + from left-to-righ (->), meaning the next block has a greater height. + In a timeline, the parent block (left) comes first of the child (right). + + +-----------+ +-----------+ +-----------+ + --->| height: 1 |------>| height: 2 |------>| height: 3 |---> + | parent | | current | | child | + +-----------+ +-----------+ +-----------+ + left right + past future + + "left-to-right" """ assert self.storage is not None h = self.get_next_block_best_chain_hash() @@ -291,7 +313,8 @@ def has_basic_block_parent(self) -> bool: if not self.storage.transaction_exists(parent_block_hash): return False metadata = self.storage.get_metadata(parent_block_hash) - assert metadata is not None + if metadata is None: + return False return metadata.validation.is_at_least_basic() def verify_basic(self, skip_block_weight_verification: bool = False) -> None: @@ -314,7 +337,7 @@ def verify_checkpoint(self, checkpoints: list[Checkpoint]) -> None: raise CheckpointError(f'Invalid new block {self.hash_hex}: checkpoint hash does not match') else: # TODO: check whether self is a parent of any checkpoint-valid block, this is left for a future PR - raise NotImplementedError + pass def verify_weight(self) -> None: """Validate minimum block difficulty.""" @@ -439,3 +462,9 @@ def update_feature_state(self, *, feature: Feature, state: FeatureState) -> None metadata.feature_states = feature_states self.storage.save_transaction(self, only_metadata=True) + + def get_feature_activation_bit_value(self, bit: int) -> int: + """Get the feature activation bit value for a specific bit position.""" + bit_list = self._get_feature_activation_bit_list() + + return bit_list[bit] diff --git a/hathor/transaction/storage/cache_storage.py b/hathor/transaction/storage/cache_storage.py index 8a49b4938..0cc41d359 100644 --- a/hathor/transaction/storage/cache_storage.py +++ b/hathor/transaction/storage/cache_storage.py @@ -13,7 +13,7 @@ # limitations under the License. from collections import OrderedDict -from typing import Any, Iterator, Optional, Set +from typing import Any, Iterator, Optional from twisted.internet import threads @@ -29,8 +29,8 @@ class TransactionCacheStorage(BaseTransactionStorage): """Caching storage to be used 'on top' of other storages. """ - cache: 'OrderedDict[bytes, BaseTransaction]' - dirty_txs: Set[bytes] + cache: OrderedDict[bytes, BaseTransaction] + dirty_txs: set[bytes] def __init__(self, store: 'BaseTransactionStorage', reactor: Reactor, interval: int = 5, capacity: int = 10000, *, indexes: Optional[IndexesManager], _clone_if_needed: bool = False): @@ -63,7 +63,7 @@ def __init__(self, store: 'BaseTransactionStorage', reactor: Reactor, interval: self._clone_if_needed = _clone_if_needed self.cache = OrderedDict() # dirty_txs has the txs that have been modified but are not persisted yet - self.dirty_txs = set() # Set[bytes(hash)] + self.dirty_txs = set() self.stats = dict(hit=0, miss=0) # we need to use only one weakref dict, so we must first initialize super, and then @@ -120,7 +120,7 @@ def _start_flush_thread(self) -> None: deferred.addErrback(self._err_flush_thread) self.flush_deferred = deferred - def _cb_flush_thread(self, flushed_txs: Set[bytes]) -> None: + def _cb_flush_thread(self, flushed_txs: set[bytes]) -> None: self.reactor.callLater(self.interval, self._start_flush_thread) self.flush_deferred = None @@ -129,7 +129,7 @@ def _err_flush_thread(self, reason: Any) -> None: self.reactor.callLater(self.interval, self._start_flush_thread) self.flush_deferred = None - def _flush_to_storage(self, dirty_txs_copy: Set[bytes]) -> None: + def _flush_to_storage(self, dirty_txs_copy: set[bytes]) -> None: """Write dirty pages to disk.""" for tx_hash in dirty_txs_copy: # a dirty tx might be removed from self.cache outside this thread: if _update_cache is called @@ -155,7 +155,7 @@ def save_transaction(self, tx: 'BaseTransaction', *, only_metadata: bool = False # call super which adds to index if needed super().save_transaction(tx, only_metadata=only_metadata) - def get_all_genesis(self) -> Set[BaseTransaction]: + def get_all_genesis(self) -> set[BaseTransaction]: return self.store.get_all_genesis() def _save_transaction(self, tx: BaseTransaction, *, only_metadata: bool = False) -> None: diff --git a/hathor/transaction/storage/rocksdb_storage.py b/hathor/transaction/storage/rocksdb_storage.py index e2bbd7b84..ec74e6227 100644 --- a/hathor/transaction/storage/rocksdb_storage.py +++ b/hathor/transaction/storage/rocksdb_storage.py @@ -49,6 +49,7 @@ def __init__(self, rocksdb_storage: RocksDBStorage, indexes: Optional[IndexesMan self._cf_attr = rocksdb_storage.get_or_create_column_family(_CF_NAME_ATTR) self._cf_migrations = rocksdb_storage.get_or_create_column_family(_CF_NAME_MIGRATIONS) + self._rocksdb_storage = rocksdb_storage self._db = rocksdb_storage.get_db() super().__init__(indexes=indexes) diff --git a/hathor/transaction/storage/transaction_storage.py b/hathor/transaction/storage/transaction_storage.py index 485483732..bd089dcb1 100644 --- a/hathor/transaction/storage/transaction_storage.py +++ b/hathor/transaction/storage/transaction_storage.py @@ -25,6 +25,7 @@ from hathor.conf import HathorSettings from hathor.indexes import IndexesManager +from hathor.indexes.height_index import HeightInfo from hathor.profiler import get_cpu_profiler from hathor.pubsub import PubSubManager from hathor.transaction.base_transaction import BaseTransaction @@ -590,8 +591,12 @@ def get_metadata(self, hash_bytes: bytes) -> Optional[TransactionMetadata]: def get_all_transactions(self) -> Iterator[BaseTransaction]: """Return all vertices (transactions and blocks) within the allowed scope. """ + # It is necessary to retain a copy of the current scope because this method will yield + # and the scope may undergo changes. By doing so, we ensure the usage of the scope at the + # time of iterator creation. + scope = self.get_allow_scope() for tx in self._get_all_transactions(): - if self.get_allow_scope().is_allowed(tx): + if scope.is_allowed(tx): yield tx @abstractmethod @@ -645,6 +650,11 @@ def get_best_block_tips(self, timestamp: Optional[float] = None, *, skip_cache: self._best_block_tips_cache = best_tip_blocks[:] return best_tip_blocks + @abstractmethod + def get_n_height_tips(self, n_blocks: int) -> list[HeightInfo]: + assert self.indexes is not None + return self.indexes.height.get_n_height_tips(n_blocks) + def get_weight_best_block(self) -> float: heads = [self.get_transaction(h) for h in self.get_best_block_tips()] highest_weight = 0.0 @@ -1039,6 +1049,14 @@ def iter_mempool_from_tx_tips(self) -> Iterator[Transaction]: assert isinstance(tx, Transaction) yield tx + def iter_mempool_tips_from_best_index(self) -> Iterator[Transaction]: + """Get tx tips in the mempool, using the best available index (mempool_tips or tx_tips)""" + assert self.indexes is not None + if self.indexes.mempool_tips is not None: + yield from self.indexes.mempool_tips.iter(self) + else: + yield from self.iter_mempool_tips_from_tx_tips() + def iter_mempool_from_best_index(self) -> Iterator[Transaction]: """Get all transactions in the mempool, using the best available index (mempool_tips or tx_tips)""" assert self.indexes is not None @@ -1074,6 +1092,8 @@ def __init__(self, indexes: Optional[IndexesManager] = None, pubsub: Optional[An # Either save or verify all genesis. self._save_or_verify_genesis() + self._latest_n_height_tips: list[HeightInfo] = [] + @property def latest_timestamp(self) -> int: assert self.indexes is not None @@ -1102,6 +1122,16 @@ def remove_cache(self) -> None: def get_best_block_tips(self, timestamp: Optional[float] = None, *, skip_cache: bool = False) -> list[bytes]: return super().get_best_block_tips(timestamp, skip_cache=skip_cache) + def get_n_height_tips(self, n_blocks: int) -> list[HeightInfo]: + block = self.get_best_block() + if self._latest_n_height_tips: + best_block = self._latest_n_height_tips[0] + if block.hash == best_block.id and n_blocks <= len(self._latest_n_height_tips): + return self._latest_n_height_tips[:n_blocks] + + self._latest_n_height_tips = super().get_n_height_tips(n_blocks) + return self._latest_n_height_tips[:n_blocks] + def get_weight_best_block(self) -> float: return super().get_weight_best_block() diff --git a/hathor/types.py b/hathor/types.py index a77a774fc..07c42194a 100644 --- a/hathor/types.py +++ b/hathor/types.py @@ -15,7 +15,7 @@ # XXX There is a lot of refactor to be done before we can use `NewType`. # So, let's skip using NewType until everything is refactored. -VertexId = bytes # NewType('TxId', bytes) +VertexId = bytes # NewType('TxId', bytes) Address = bytes # NewType('Address', bytes) TxOutputScript = bytes # NewType('TxOutputScript', bytes) Timestamp = int # NewType('Timestamp', int) diff --git a/hathor/util.py b/hathor/util.py index 1d57fe16a..248ee465e 100644 --- a/hathor/util.py +++ b/hathor/util.py @@ -363,6 +363,11 @@ def collect_n(it: Iterator[_T], n: int) -> tuple[list[_T], bool]: >>> collect_n(iter(range(10)), 8) ([0, 1, 2, 3, 4, 5, 6, 7], True) + + # This also works for checking (albeit destructively, because it consumes from the itreator), if it is empty + + >>> collect_n(iter(range(10)), 0) + ([], True) """ if n < 0: raise ValueError(f'n must be non-negative, got {n}') diff --git a/hathor/utils/api.py b/hathor/utils/api.py index f614cdab3..52728c67a 100644 --- a/hathor/utils/api.py +++ b/hathor/utils/api.py @@ -11,8 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import cgi -from typing import Union +from typing import Type, TypeVar, Union from pydantic import Field, ValidationError, validator from twisted.web.http import Request @@ -21,6 +22,8 @@ from hathor.utils.list import single_or_none from hathor.utils.pydantic import BaseModel +T = TypeVar('T', bound='QueryParams') + class QueryParams(BaseModel): """Class used to parse Twisted HTTP Request query parameters. @@ -31,7 +34,7 @@ class QueryParams(BaseModel): _list_to_single_item_validator = validator('*', pre=True, allow_reuse=True)(single_or_none) @classmethod - def from_request(cls, request: Request) -> Union['QueryParams', 'ErrorResponse']: + def from_request(cls: Type[T], request: Request) -> Union[T, 'ErrorResponse']: """Creates an instance from a Twisted Request.""" encoding = 'utf8' diff --git a/hathor/websocket/protocol.py b/hathor/websocket/protocol.py index 993cc2200..5429b9506 100644 --- a/hathor/websocket/protocol.py +++ b/hathor/websocket/protocol.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Set, Union +from typing import TYPE_CHECKING, Union from autobahn.twisted.websocket import WebSocketServerProtocol from structlog import get_logger @@ -33,7 +33,7 @@ class HathorAdminWebsocketProtocol(WebSocketServerProtocol): def __init__(self, factory: 'HathorAdminWebsocketFactory') -> None: self.log = logger.new() self.factory = factory - self.subscribed_to: Set[str] = set() + self.subscribed_to: set[str] = set() super().__init__() def onConnect(self, request): diff --git a/poetry.lock b/poetry.lock index ee5f1d46e..b977d1f2d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "aiohttp" version = "3.8.3" description = "Async http client/server framework (asyncio)" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -113,7 +112,6 @@ speedups = ["Brotli", "aiodns", "cchardet"] name = "aiosignal" version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -128,7 +126,6 @@ frozenlist = ">=1.1.0" name = "appnope" version = "0.1.3" description = "Disable App Nap on macOS >= 10.9" -category = "main" optional = false python-versions = "*" files = [ @@ -140,7 +137,6 @@ files = [ name = "asttokens" version = "2.2.1" description = "Annotate AST trees with source code positions" -category = "main" optional = false python-versions = "*" files = [ @@ -158,7 +154,6 @@ test = ["astroid", "pytest"] name = "async-timeout" version = "4.0.2" description = "Timeout context manager for asyncio programs" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -170,7 +165,6 @@ files = [ name = "attrs" version = "22.1.0" description = "Classes Without Boilerplate" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -186,13 +180,12 @@ tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy [[package]] name = "autobahn" -version = "22.7.1" +version = "23.6.2" description = "WebSocket client & server library, WAMP real-time framework" -category = "main" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "autobahn-22.7.1.tar.gz", hash = "sha256:8b462ea2e6aad6b4dc0ed45fb800b6cbfeb0325e7fe6983907f122f2be4a1fe9"}, + {file = "autobahn-23.6.2.tar.gz", hash = "sha256:ec9421c52a2103364d1ef0468036e6019ee84f71721e86b36fe19ad6966c1181"}, ] [package.dependencies] @@ -202,22 +195,21 @@ setuptools = "*" txaio = ">=21.2.1" [package.extras] -all = ["PyGObject (>=3.40.0)", "argon2_cffi (>=20.1.0)", "attrs (>=20.3.0)", "base58 (>=2.1.0)", "cbor2 (>=5.2.0)", "cffi (>=1.14.5)", "click (>=8.1.2)", "ecdsa (>=0.16.1)", "eth-abi (>=2.1.1)", "flatbuffers (>=1.12)", "hkdf (>=0.0.3)", "jinja2 (>=2.11.3)", "mnemonic (>=0.19)", "msgpack (>=1.0.2)", "passlib (>=1.7.4)", "py-ecc (>=5.1.0)", "py-eth-sig-utils (>=0.4.0)", "py-multihash (>=2.0.1)", "py-ubjson (>=0.16.1)", "pynacl (>=1.4.0)", "pyopenssl (>=20.0.1)", "python-snappy (>=0.6.0)", "pytrie (>=0.4.0)", "qrcode (>=7.3.1)", "rlp (>=2.0.1)", "service_identity (>=18.1.0)", "spake2 (>=0.8)", "twisted (>=20.3.0)", "ujson (>=4.0.2)", "web3 (>=5.29.0)", "xbr (>=21.2.1)", "yapf (==0.29.0)", "zlmdb (>=21.2.1)", "zope.interface (>=5.2.0)"] +all = ["PyGObject (>=3.40.0)", "argon2_cffi (>=20.1.0)", "attrs (>=20.3.0)", "base58 (>=2.1.0)", "bitarray (>=2.7.5)", "cbor2 (>=5.2.0)", "cffi (>=1.14.5)", "click (>=8.1.2)", "ecdsa (>=0.16.1)", "eth-abi (>=4.0.0)", "flatbuffers (>=22.12.6)", "hkdf (>=0.0.3)", "jinja2 (>=2.11.3)", "mnemonic (>=0.19)", "msgpack (>=1.0.2)", "passlib (>=1.7.4)", "py-ecc (>=5.1.0)", "py-eth-sig-utils (>=0.4.0)", "py-multihash (>=2.0.1)", "py-ubjson (>=0.16.1)", "pynacl (>=1.4.0)", "pyopenssl (>=20.0.1)", "python-snappy (>=0.6.0)", "pytrie (>=0.4.0)", "qrcode (>=7.3.1)", "rlp (>=2.0.1)", "service_identity (>=18.1.0)", "spake2 (>=0.8)", "twisted (>=20.3.0)", "ujson (>=4.0.2)", "web3[ipfs] (>=6.0.0)", "xbr (>=21.2.1)", "yapf (==0.29.0)", "zlmdb (>=21.2.1)", "zope.interface (>=5.2.0)"] compress = ["python-snappy (>=0.6.0)"] -dev = ["awscli", "backports.tempfile (>=1.0)", "bumpversion (>=0.5.3)", "codecov (>=2.0.15)", "flake8 (<5)", "humanize (>=0.5.1)", "mypy (>=0.610)", "passlib", "pep8-naming (>=0.3.3)", "pip (>=9.0.1)", "pyenchant (>=1.6.6)", "pyflakes (>=1.0.0)", "pyinstaller (>=4.2)", "pylint (>=1.9.2)", "pytest (>=3.4.2)", "pytest-aiohttp", "pytest-asyncio (>=0.14.0)", "pytest-runner (>=2.11.1)", "pyyaml (>=4.2b4)", "qualname", "sphinx (>=1.7.1)", "sphinx-autoapi (>=1.7.0)", "sphinx_rtd_theme (>=0.1.9)", "sphinxcontrib-images (>=0.9.1)", "tox (>=2.9.1)", "tox-gh-actions (>=2.2.0)", "twine (>=3.3.0)", "twisted (>=18.7.0)", "txaio (>=20.4.1)", "watchdog (>=0.8.3)", "wheel (>=0.36.2)", "yapf (==0.29.0)"] +dev = ["backports.tempfile (>=1.0)", "bumpversion (>=0.5.3)", "codecov (>=2.0.15)", "flake8 (<5)", "humanize (>=0.5.1)", "mypy (>=0.610)", "passlib", "pep8-naming (>=0.3.3)", "pip (>=9.0.1)", "pyenchant (>=1.6.6)", "pyflakes (>=1.0.0)", "pyinstaller (>=4.2)", "pylint (>=1.9.2)", "pytest (>=3.4.2)", "pytest-aiohttp", "pytest-asyncio (>=0.14.0)", "pytest-runner (>=2.11.1)", "pyyaml (>=4.2b4)", "qualname", "sphinx (>=1.7.1)", "sphinx-autoapi (>=1.7.0)", "sphinx_rtd_theme (>=0.1.9)", "sphinxcontrib-images (>=0.9.1)", "tox (>=4.2.8)", "tox-gh-actions (>=2.2.0)", "twine (>=3.3.0)", "twisted (>=22.10.0)", "txaio (>=20.4.1)", "watchdog (>=0.8.3)", "wheel (>=0.36.2)", "yapf (==0.29.0)"] encryption = ["pynacl (>=1.4.0)", "pyopenssl (>=20.0.1)", "pytrie (>=0.4.0)", "qrcode (>=7.3.1)", "service_identity (>=18.1.0)"] nvx = ["cffi (>=1.14.5)"] scram = ["argon2_cffi (>=20.1.0)", "cffi (>=1.14.5)", "passlib (>=1.7.4)"] -serialization = ["cbor2 (>=5.2.0)", "flatbuffers (>=1.12)", "msgpack (>=1.0.2)", "py-ubjson (>=0.16.1)", "ujson (>=4.0.2)"] +serialization = ["cbor2 (>=5.2.0)", "flatbuffers (>=22.12.6)", "msgpack (>=1.0.2)", "py-ubjson (>=0.16.1)", "ujson (>=4.0.2)"] twisted = ["attrs (>=20.3.0)", "twisted (>=20.3.0)", "zope.interface (>=5.2.0)"] ui = ["PyGObject (>=3.40.0)"] -xbr = ["base58 (>=2.1.0)", "cbor2 (>=5.2.0)", "click (>=8.1.2)", "ecdsa (>=0.16.1)", "eth-abi (>=2.1.1)", "hkdf (>=0.0.3)", "jinja2 (>=2.11.3)", "mnemonic (>=0.19)", "py-ecc (>=5.1.0)", "py-eth-sig-utils (>=0.4.0)", "py-multihash (>=2.0.1)", "rlp (>=2.0.1)", "spake2 (>=0.8)", "twisted (>=20.3.0)", "web3 (>=5.29.0)", "xbr (>=21.2.1)", "yapf (==0.29.0)", "zlmdb (>=21.2.1)"] +xbr = ["base58 (>=2.1.0)", "bitarray (>=2.7.5)", "cbor2 (>=5.2.0)", "click (>=8.1.2)", "ecdsa (>=0.16.1)", "eth-abi (>=4.0.0)", "hkdf (>=0.0.3)", "jinja2 (>=2.11.3)", "mnemonic (>=0.19)", "py-ecc (>=5.1.0)", "py-eth-sig-utils (>=0.4.0)", "py-multihash (>=2.0.1)", "rlp (>=2.0.1)", "spake2 (>=0.8)", "twisted (>=20.3.0)", "web3[ipfs] (>=6.0.0)", "xbr (>=21.2.1)", "yapf (==0.29.0)", "zlmdb (>=21.2.1)"] [[package]] name = "automat" version = "22.10.0" description = "Self-service finite-state machines for the programmer on the go." -category = "main" optional = false python-versions = "*" files = [ @@ -236,7 +228,6 @@ visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"] name = "backcall" version = "0.2.0" description = "Specifications for callback functions passed in to an API" -category = "main" optional = false python-versions = "*" files = [ @@ -248,7 +239,6 @@ files = [ name = "base58" version = "2.1.1" description = "Base58 and Base58Check implementation." -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -263,7 +253,6 @@ tests = ["PyHamcrest (>=2.0.2)", "mypy", "pytest (>=4.6)", "pytest-benchmark", " name = "certifi" version = "2022.12.7" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -275,7 +264,6 @@ files = [ name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -category = "main" optional = false python-versions = "*" files = [ @@ -352,7 +340,6 @@ pycparser = "*" name = "charset-normalizer" version = "2.1.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.6.0" files = [ @@ -367,7 +354,6 @@ unicode-backport = ["unicodedata2"] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -379,7 +365,6 @@ files = [ name = "configargparse" version = "1.5.3" description = "A drop-in replacement for argparse that allows options to also be set via config files and/or environment variables." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -395,7 +380,6 @@ yaml = ["PyYAML"] name = "constantly" version = "15.1.0" description = "Symbolic constants in Python" -category = "main" optional = false python-versions = "*" files = [ @@ -407,7 +391,6 @@ files = [ name = "coverage" version = "6.5.0" description = "Code coverage measurement for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -473,7 +456,6 @@ toml = ["tomli"] name = "cryptography" version = "38.0.4" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -520,7 +502,6 @@ test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0 name = "decorator" version = "5.1.1" description = "Decorators for Humans" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -532,7 +513,6 @@ files = [ name = "exceptiongroup" version = "1.0.4" description = "Backport of PEP 654 (exception groups)" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -547,7 +527,6 @@ test = ["pytest (>=6)"] name = "execnet" version = "1.9.0" description = "execnet: rapid multi-Python deployment" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -562,7 +541,6 @@ testing = ["pre-commit"] name = "executing" version = "1.2.0" description = "Get the currently executing AST node of a frame, and other information" -category = "main" optional = false python-versions = "*" files = [ @@ -577,7 +555,6 @@ tests = ["asttokens", "littleutils", "pytest", "rich"] name = "flake8" version = "6.0.0" description = "the modular source code checker: pep8 pyflakes and co" -category = "dev" optional = false python-versions = ">=3.8.1" files = [ @@ -594,7 +571,6 @@ pyflakes = ">=3.0.0,<3.1.0" name = "flaky" version = "3.7.0" description = "Plugin for nose or pytest that automatically reruns flaky tests." -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -606,7 +582,6 @@ files = [ name = "frozenlist" version = "1.3.3" description = "A list-like structure which implements collections.abc.MutableSequence" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -690,7 +665,6 @@ files = [ name = "graphviz" version = "0.20.1" description = "Simple Python interface for Graphviz" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -707,7 +681,6 @@ test = ["coverage", "mock (>=4)", "pytest (>=7)", "pytest-cov", "pytest-mock (>= name = "hathorlib" version = "0.3.0" description = "Hathor Network base objects library" -category = "main" optional = false python-versions = ">=3.6,<4.0" files = [ @@ -727,7 +700,6 @@ client = ["aiohttp (>=3.7.0)", "structlog (>=20.0.0)"] name = "hyperlink" version = "21.0.0" description = "A featureful, immutable, and correct URL for Python." -category = "main" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -742,7 +714,6 @@ idna = ">=2.5" name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -754,7 +725,6 @@ files = [ name = "incremental" version = "22.10.0" description = "\"A small library that versions your Python projects.\"" -category = "main" optional = false python-versions = "*" files = [ @@ -770,7 +740,6 @@ scripts = ["click (>=6.0)", "twisted (>=16.4.0)"] name = "iniconfig" version = "1.1.1" description = "iniconfig: brain-dead simple config-ini parsing" -category = "dev" optional = false python-versions = "*" files = [ @@ -782,7 +751,6 @@ files = [ name = "intervaltree" version = "3.1.0" description = "Editable interval tree data structure for Python 2 and 3" -category = "main" optional = false python-versions = "*" files = [ @@ -796,7 +764,6 @@ sortedcontainers = ">=2.0,<3.0" name = "ipython" version = "8.7.0" description = "IPython: Productive Interactive Computing" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -835,7 +802,6 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.20)", "pa name = "isort" version = "5.10.1" description = "A Python utility / library to sort Python imports." -category = "dev" optional = false python-versions = ">=3.6.1,<4.0" files = [ @@ -856,7 +822,6 @@ requirements-deprecated-finder = ["pip-api", "pipreqs"] name = "jedi" version = "0.18.2" description = "An autocompletion tool for Python that can be used for text editors." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -876,7 +841,6 @@ testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] name = "matplotlib-inline" version = "0.1.6" description = "Inline Matplotlib backend for Jupyter" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -891,7 +855,6 @@ traitlets = "*" name = "mccabe" version = "0.7.0" description = "McCabe checker, plugin for flake8" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -903,7 +866,6 @@ files = [ name = "mnemonic" version = "0.20" description = "Implementation of Bitcoin BIP-0039" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -915,7 +877,6 @@ files = [ name = "multidict" version = "6.0.3" description = "multidict implementation" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -999,7 +960,6 @@ files = [ name = "mypy" version = "1.4.1" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1046,7 +1006,6 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -1058,7 +1017,6 @@ files = [ name = "mypy-zope" version = "1.0.0" description = "Plugin for mypy to support zope interfaces" -category = "dev" optional = false python-versions = "*" files = [ @@ -1078,7 +1036,6 @@ test = ["lxml", "pytest (>=4.6)", "pytest-cov"] name = "packaging" version = "22.0" description = "Core utilities for Python packages" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1090,7 +1047,6 @@ files = [ name = "parso" version = "0.8.3" description = "A Python Parser" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1106,7 +1062,6 @@ testing = ["docopt", "pytest (<6.0.0)"] name = "pathspec" version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1118,7 +1073,6 @@ files = [ name = "pexpect" version = "4.8.0" description = "Pexpect allows easy control of interactive console applications." -category = "main" optional = false python-versions = "*" files = [ @@ -1133,7 +1087,6 @@ ptyprocess = ">=0.5" name = "pickleshare" version = "0.7.5" description = "Tiny 'shelve'-like database with concurrency support" -category = "main" optional = false python-versions = "*" files = [ @@ -1145,7 +1098,6 @@ files = [ name = "pluggy" version = "1.0.0" description = "plugin and hook calling mechanisms for python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1161,7 +1113,6 @@ testing = ["pytest", "pytest-benchmark"] name = "prometheus-client" version = "0.15.0" description = "Python client for the Prometheus monitoring system." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1176,7 +1127,6 @@ twisted = ["twisted"] name = "prompt-toolkit" version = "3.0.36" description = "Library for building powerful interactive command lines in Python" -category = "main" optional = false python-versions = ">=3.6.2" files = [ @@ -1191,7 +1141,6 @@ wcwidth = "*" name = "ptyprocess" version = "0.7.0" description = "Run a subprocess in a pseudo terminal" -category = "main" optional = false python-versions = "*" files = [ @@ -1203,7 +1152,6 @@ files = [ name = "pure-eval" version = "0.2.2" description = "Safely evaluate AST nodes without side effects" -category = "main" optional = false python-versions = "*" files = [ @@ -1218,7 +1166,6 @@ tests = ["pytest"] name = "pyasn1" version = "0.4.8" description = "ASN.1 types and codecs" -category = "main" optional = false python-versions = "*" files = [ @@ -1230,7 +1177,6 @@ files = [ name = "pyasn1-modules" version = "0.2.8" description = "A collection of ASN.1-based protocols modules." -category = "main" optional = false python-versions = "*" files = [ @@ -1245,7 +1191,6 @@ pyasn1 = ">=0.4.6,<0.5.0" name = "pycodestyle" version = "2.10.0" description = "Python style guide checker" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1257,7 +1202,6 @@ files = [ name = "pycoin" version = "0.92.20220529" description = "Utilities for Bitcoin and altcoin addresses and transaction manipulation." -category = "main" optional = false python-versions = "*" files = [ @@ -1268,7 +1212,6 @@ files = [ name = "pycparser" version = "2.21" description = "C parser in Python" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -1280,7 +1223,6 @@ files = [ name = "pydantic" version = "1.10.11" description = "Data validation and settings management using python type hints" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1333,7 +1275,6 @@ email = ["email-validator (>=1.0.3)"] name = "pyflakes" version = "3.0.1" description = "passive checker of Python programs" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1345,7 +1286,6 @@ files = [ name = "pygments" version = "2.13.0" description = "Pygments is a syntax highlighting package written in Python." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1360,7 +1300,6 @@ plugins = ["importlib-metadata"] name = "pyopenssl" version = "22.1.0" description = "Python wrapper module around the OpenSSL library" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1379,7 +1318,6 @@ test = ["flaky", "pretend", "pytest (>=3.0.1)"] name = "pytest" version = "7.2.0" description = "pytest: simple powerful testing with Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1403,7 +1341,6 @@ testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2. name = "pytest-cov" version = "4.0.0" description = "Pytest plugin for measuring coverage." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1422,7 +1359,6 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale name = "pytest-xdist" version = "3.2.0" description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1443,7 +1379,6 @@ testing = ["filelock"] name = "pywin32" version = "305" description = "Python for Window Extensions" -category = "main" optional = false python-versions = "*" files = [ @@ -1465,59 +1400,57 @@ files = [ [[package]] name = "pyyaml" -version = "6.0" +version = "6.0.1" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, - {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, - {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, - {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, - {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, - {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, - {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, - {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, - {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, - {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, - {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, - {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, - {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, - {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, - {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, - {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, - {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, - {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] [[package]] name = "requests" version = "2.28.1" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7, <4" files = [ @@ -1539,7 +1472,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "rocksdb" version = "0.9.1" description = "Python bindings for RocksDB" -category = "main" optional = false python-versions = "*" files = [] @@ -1562,7 +1494,6 @@ resolved_reference = "72edcfbd22f4a3ca816f94096d3ec181da41031e" name = "sentry-sdk" version = "1.11.1" description = "Python client for Sentry (https://sentry.io)" -category = "main" optional = true python-versions = "*" files = [ @@ -1599,7 +1530,6 @@ tornado = ["tornado (>=5)"] name = "service-identity" version = "21.1.0" description = "Service identity verification for pyOpenSSL & cryptography." -category = "main" optional = false python-versions = "*" files = [ @@ -1624,7 +1554,6 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"] name = "setproctitle" version = "1.3.2" description = "A Python module to customize the process title" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1709,7 +1638,6 @@ test = ["pytest"] name = "setuptools" version = "65.6.3" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1726,7 +1654,6 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -1738,7 +1665,6 @@ files = [ name = "sortedcontainers" version = "2.4.0" description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" -category = "main" optional = false python-versions = "*" files = [ @@ -1750,7 +1676,6 @@ files = [ name = "stack-data" version = "0.6.2" description = "Extract data from python stack frames and tracebacks for informative displays" -category = "main" optional = false python-versions = "*" files = [ @@ -1770,7 +1695,6 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] name = "structlog" version = "22.3.0" description = "Structured Logging for Python" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1788,7 +1712,6 @@ typing = ["mypy", "rich", "twisted"] name = "structlog-sentry" version = "1.4.0" description = "Sentry integration for structlog" -category = "main" optional = true python-versions = ">=3.6,<4.0" files = [ @@ -1803,7 +1726,6 @@ sentry-sdk = "*" name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1815,7 +1737,6 @@ files = [ name = "traitlets" version = "5.7.0" description = "Traitlets Python configuration system" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1833,7 +1754,6 @@ typing = ["mypy (>=0.990)"] name = "twisted" version = "22.10.0" description = "An asynchronous networking framework written in Python" -category = "main" optional = false python-versions = ">=3.7.1" files = [ @@ -1872,7 +1792,6 @@ windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0. name = "twisted-iocpsupport" version = "1.0.2" description = "An extension for use in the twisted I/O Completion Ports reactor." -category = "main" optional = false python-versions = "*" files = [ @@ -1894,7 +1813,6 @@ files = [ name = "txaio" version = "22.2.1" description = "Compatibility API between asyncio/Twisted/Trollius" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1911,7 +1829,6 @@ twisted = ["twisted (>=20.3.0)", "zope.interface (>=5.2.0)"] name = "types-cryptography" version = "3.3.23.2" description = "Typing stubs for cryptography" -category = "dev" optional = false python-versions = "*" files = [ @@ -1923,7 +1840,6 @@ files = [ name = "types-pyopenssl" version = "22.1.0.2" description = "Typing stubs for pyOpenSSL" -category = "dev" optional = false python-versions = "*" files = [ @@ -1938,7 +1854,6 @@ types-cryptography = "*" name = "types-pyyaml" version = "6.0.12.9" description = "Typing stubs for PyYAML" -category = "dev" optional = false python-versions = "*" files = [ @@ -1950,7 +1865,6 @@ files = [ name = "types-requests" version = "2.28.11.4" description = "Typing stubs for requests" -category = "dev" optional = false python-versions = "*" files = [ @@ -1965,7 +1879,6 @@ types-urllib3 = "<1.27" name = "types-urllib3" version = "1.26.25.4" description = "Typing stubs for urllib3" -category = "dev" optional = false python-versions = "*" files = [ @@ -1977,7 +1890,6 @@ files = [ name = "typing-extensions" version = "4.4.0" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1989,7 +1901,6 @@ files = [ name = "urllib3" version = "1.26.13" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ @@ -2006,7 +1917,6 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] name = "wcwidth" version = "0.2.5" description = "Measures the displayed width of unicode strings in a terminal" -category = "main" optional = false python-versions = "*" files = [ @@ -2018,7 +1928,6 @@ files = [ name = "yamllint" version = "1.31.0" description = "A linter for YAML files." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2037,7 +1946,6 @@ dev = ["doc8", "flake8", "flake8-import-order", "rstcheck[sphinx]", "sphinx"] name = "yarl" version = "1.8.2" description = "Yet another URL library" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2125,7 +2033,6 @@ multidict = ">=4.0" name = "zope-event" version = "4.5.0" description = "Very basic event publishing system" -category = "dev" optional = false python-versions = "*" files = [ @@ -2144,7 +2051,6 @@ test = ["zope.testrunner"] name = "zope-interface" version = "5.5.2" description = "Interfaces for Python" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -2198,7 +2104,6 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] name = "zope-schema" version = "6.2.1" description = "zope.interface extension for defining data schemas" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -2221,4 +2126,4 @@ sentry = ["sentry-sdk", "structlog-sentry"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "78fac4e595777b8ed2480214e6fb50e46215128c0847168bfc6b6983feff72bd" +content-hash = "6ff762cf3cfa31bece485b6db166cea7a2f08f16b1d9111a08607d3959b93ba4" diff --git a/pyproject.toml b/pyproject.toml index 5ba20bbd5..a5f5964ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,7 +53,7 @@ types-pyyaml = "=6.0.12.9" [tool.poetry.dependencies] python = ">=3.9,<4" twisted = "~22.10.0" -autobahn = "~22.7.1" +autobahn = "~23.6.2" base58 = "~2.1.1" colorama = "~0.4.6" configargparse = "~1.5.3" @@ -78,7 +78,7 @@ sentry-sdk = {version = "^1.5.11", optional = true} structlog-sentry = {version = "^1.4.0", optional = true} hathorlib = "0.3.0" pydantic = "~1.10.11" -pyyaml = "~6.0" +pyyaml = "^6.0.1" [tool.poetry.extras] sentry = ["sentry-sdk", "structlog-sentry"] diff --git a/tests/event/test_event_simulation_scenarios.py b/tests/event/test_event_simulation_scenarios.py index c5f8398f7..fea2548ec 100644 --- a/tests/event/test_event_simulation_scenarios.py +++ b/tests/event/test_event_simulation_scenarios.py @@ -243,30 +243,30 @@ def test_reorg(self): UnorderedList([ # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa E501 # Also one VERTEX_METADATA_CHANGED for the previous block, voiding it - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=9, timestamp=1578878955.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=12, timestamp=1578878955.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=11, timestamp=1578878955.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', nonce=2246536493, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', spent_outputs=[], conflict_with=[], voided_by=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=10, timestamp=1578878955.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=9, timestamp=1578878949.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=12, timestamp=1578878949.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], twins=[], accumulated_weight=2.0, score=2.0, first_block=None, height=0, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=11, timestamp=1578878949.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', nonce=2246536493, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUPXOcGnrN0ZB2WrnPVcjdCCcacL+IrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', spent_outputs=[], conflict_with=[], voided_by=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=10, timestamp=1578878949.75, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 ]), [ # One NEW_VERTEX_ACCEPTED for a new block from manager2 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=13, timestamp=1578878955.75, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=13, timestamp=1578878949.75, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7'], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=None), latest_event_id=20), # noqa E501 # REORG_STARTED caused by a new block from manager2 (below) - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=14, timestamp=1578878956.0, type=EventType.REORG_STARTED, data=ReorgData(reorg_size=1, previous_best_block='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', new_best_block='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', common_block='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792'), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=14, timestamp=1578878950.0, type=EventType.REORG_STARTED, data=ReorgData(reorg_size=1, previous_best_block='3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', new_best_block='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', common_block='339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792'), group_id=0), latest_event_id=20), # noqa E501 ], UnorderedList([ # One VERTEX_METADATA_CHANGED for a new block (below), and one VERTEX_METADATA_CHANGED for each genesis tx (2), adding the new block as their child # noqa E501 # Also one VERTEX_METADATA_CHANGED for the previous block, un-voiding it as it's now part of the best blockchain # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=15, timestamp=1578878956.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', 'a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=2.0, first_block='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=18, timestamp=1578878956.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', 'a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=2.0, first_block='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=17, timestamp=1578878956.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=16, timestamp=1578878956.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', nonce=4136633663, timestamp=1578878941, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', token_data=0)], parents=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=15, timestamp=1578878950.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', nonce=2, timestamp=1572636345, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', 'a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=2.0, first_block='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=18, timestamp=1578878950.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', nonce=6, timestamp=1572636344, version=1, weight=2.0, inputs=[], outputs=[], parents=[], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['3cfde60f140d2838581d885e656af1049fa8eab964defc5bca3d883b83c9afc3', '4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', 'a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=2.0, first_block='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', height=0, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=17, timestamp=1578878950.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', nonce=1279525218, timestamp=1578878940, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUfBo1MGBHkHtXDktO+BxtBdh5T5GIrA==', token_data=0)], parents=['339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=['a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4'], twins=[], accumulated_weight=2.0, score=4.0, first_block=None, height=1, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=16, timestamp=1578878950.0, type=EventType.VERTEX_METADATA_CHANGED, data=TxData(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', nonce=4136633663, timestamp=1578878941, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', token_data=0)], parents=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=0), latest_event_id=20), # noqa E501 ]), [ # REORG_FINISHED - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=19, timestamp=1578878956.0, type=EventType.REORG_FINISHED, data=EmptyData(), group_id=0), latest_event_id=20), # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=19, timestamp=1578878950.0, type=EventType.REORG_FINISHED, data=EmptyData(), group_id=0), latest_event_id=20), # noqa E501 # One NEW_VERTEX_ACCEPTED for a new block from manager2 - EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=20, timestamp=1578878956.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', nonce=4136633663, timestamp=1578878941, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', token_data=0)], parents=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=20) # noqa E501 + EventResponse(type='EVENT', event=BaseEvent(peer_id=self.peer_id, id=20, timestamp=1578878950.0, type=EventType.NEW_VERTEX_ACCEPTED, data=TxData(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', nonce=4136633663, timestamp=1578878941, version=0, weight=2.0, inputs=[], outputs=[TxOutput(value=6400, script='dqkUgQrqLefPfPVpkXlfvvAp943epyOIrA==', token_data=0)], parents=['4f0b3d13966f95f461d4edc6389c8440955e13a75f87c6bc2a4b455d813fb2f7', '16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952', '33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869'], tokens=[], token_name=None, token_symbol=None, metadata=TxMetadata(hash='a729a7abb4248dffa492dd2c2634aa6d92b3e1e05bfa6614118b6ba97bfba5c4', spent_outputs=[], conflict_with=[], voided_by=[], received_by=[], children=[], twins=[], accumulated_weight=2.0, score=4.321928094887363, first_block=None, height=2, validation='full'), aux_pow=None), group_id=None), latest_event_id=20) # noqa E501 ] ] diff --git a/tests/feature_activation/test_bit_signaling_service.py b/tests/feature_activation/test_bit_signaling_service.py new file mode 100644 index 000000000..b46951d8b --- /dev/null +++ b/tests/feature_activation/test_bit_signaling_service.py @@ -0,0 +1,288 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import Mock + +import pytest +from structlog.testing import capture_logs + +from hathor.feature_activation.bit_signaling_service import BitSignalingService +from hathor.feature_activation.feature import Feature +from hathor.feature_activation.feature_service import FeatureService +from hathor.feature_activation.model.criteria import Criteria +from hathor.feature_activation.model.feature_description import FeatureDescription +from hathor.feature_activation.model.feature_state import FeatureState +from hathor.feature_activation.settings import Settings as FeatureSettings +from hathor.transaction import Block +from hathor.transaction.storage import TransactionStorage + + +@pytest.mark.parametrize( + 'features_description', + [ + {}, + { + Feature.NOP_FEATURE_1: FeatureDescription(state=FeatureState.DEFINED, criteria=Mock()) + }, + { + Feature.NOP_FEATURE_1: FeatureDescription(state=FeatureState.FAILED, criteria=Mock()), + Feature.NOP_FEATURE_2: FeatureDescription(state=FeatureState.ACTIVE, criteria=Mock()) + } + ] +) +@pytest.mark.parametrize( + ['support_features', 'not_support_features'], + [ + ({Feature.NOP_FEATURE_1}, set()), + (set(), {Feature.NOP_FEATURE_2}), + ({Feature.NOP_FEATURE_1}, {Feature.NOP_FEATURE_2}), + ({Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_2}, set()), + ] +) +def test_generate_signal_bits_no_signaling_features( + features_description: dict[Feature, FeatureDescription], + support_features: set[Feature], + not_support_features: set[Feature] +) -> None: + signal_bits = _test_generate_signal_bits(features_description, support_features, not_support_features) + + assert signal_bits == 0 + + +@pytest.mark.parametrize( + ['support_features', 'not_support_features', 'expected_signal_bits'], + [ + ({Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_3}, set(), 0b1001), + (set(), {Feature.NOP_FEATURE_2}, 0b0000), + ({Feature.NOP_FEATURE_1}, {Feature.NOP_FEATURE_2, Feature.NOP_FEATURE_3}, 0b0001), + ({Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_2, Feature.NOP_FEATURE_3}, set(), 0b1011), + ] +) +def test_generate_signal_bits_signaling_features( + support_features: set[Feature], + not_support_features: set[Feature], + expected_signal_bits: int, +) -> None: + features_description = { + Feature.NOP_FEATURE_1: FeatureDescription( + state=FeatureState.STARTED, + criteria=Criteria( + bit=0, + start_height=0, + timeout_height=2*40320, + version='0.0.0' + ) + ), + Feature.NOP_FEATURE_2: FeatureDescription( + state=FeatureState.MUST_SIGNAL, + criteria=Criteria( + bit=1, + start_height=0, + timeout_height=2*40320, + version='0.0.0' + ) + ), + Feature.NOP_FEATURE_3: FeatureDescription( + state=FeatureState.LOCKED_IN, + criteria=Criteria( + bit=3, + start_height=0, + timeout_height=2*40320, + version='0.0.0' + ) + ) + } + + signal_bits = _test_generate_signal_bits(features_description, support_features, not_support_features) + + assert signal_bits == expected_signal_bits + + +@pytest.mark.parametrize( + ['support_features', 'not_support_features', 'expected_signal_bits'], + [ + ({Feature.NOP_FEATURE_3, Feature.NOP_FEATURE_2}, set(), 0b1011), + (set(), {Feature.NOP_FEATURE_1}, 0b0010), + ({Feature.NOP_FEATURE_2}, {Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_3}, 0b0010), + ({Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_2, Feature.NOP_FEATURE_3}, set(), 0b1011), + (set(), {Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_2, Feature.NOP_FEATURE_3}, 0b0000), + ] +) +def test_generate_signal_bits_signaling_features_with_defaults( + support_features: set[Feature], + not_support_features: set[Feature], + expected_signal_bits: int, +) -> None: + features_description = { + Feature.NOP_FEATURE_1: FeatureDescription( + state=FeatureState.STARTED, + criteria=Criteria( + bit=0, + start_height=0, + timeout_height=2*40320, + version='0.0.0', + signal_support_by_default=True + ) + ), + Feature.NOP_FEATURE_2: FeatureDescription( + state=FeatureState.MUST_SIGNAL, + criteria=Criteria( + bit=1, + start_height=0, + timeout_height=2*40320, + version='0.0.0', + signal_support_by_default=True + ) + ), + Feature.NOP_FEATURE_3: FeatureDescription( + state=FeatureState.LOCKED_IN, + criteria=Criteria( + bit=3, + start_height=0, + timeout_height=2*40320, + version='0.0.0', + ) + ) + } + + signal_bits = _test_generate_signal_bits(features_description, support_features, not_support_features) + + assert signal_bits == expected_signal_bits + + +def _test_generate_signal_bits( + features_description: dict[Feature, FeatureDescription], + support_features: set[Feature], + not_support_features: set[Feature] +) -> int: + feature_service = Mock(spec_set=FeatureService) + feature_service.get_bits_description = lambda block: features_description + + service = BitSignalingService( + feature_settings=FeatureSettings(), + feature_service=feature_service, + tx_storage=Mock(), + support_features=support_features, + not_support_features=not_support_features + ) + + return service.generate_signal_bits(block=Mock()) + + +@pytest.mark.parametrize( + ['support_features', 'not_support_features', 'invalid_features'], + [ + ( + {Feature.NOP_FEATURE_2}, + {Feature.NOP_FEATURE_2}, + ['NOP_FEATURE_2'], + ), + ( + {Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_2}, + {Feature.NOP_FEATURE_2}, + ['NOP_FEATURE_2'], + ), + ( + {Feature.NOP_FEATURE_1}, + {Feature.NOP_FEATURE_2, Feature.NOP_FEATURE_1}, + ['NOP_FEATURE_1'], + ), + ( + {Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_2}, + {Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_2}, + ['NOP_FEATURE_1', 'NOP_FEATURE_2'], + ) + ] +) +def test_support_intersection_validation( + support_features: set[Feature], + not_support_features: set[Feature], + invalid_features: list[str] +) -> None: + with pytest.raises(ValueError) as e: + BitSignalingService( + feature_settings=Mock(), + feature_service=Mock(), + tx_storage=Mock(), + support_features=support_features, + not_support_features=not_support_features, + ) + + message = str(e.value) + assert 'Cannot signal both "support" and "not support" for features' in message + + for feature in invalid_features: + assert feature in message + + +@pytest.mark.parametrize( + ['support_features', 'not_support_features', 'non_signaling_features'], + [ + ( + {Feature.NOP_FEATURE_1}, + set(), + {'NOP_FEATURE_1'} + ), + ( + set(), + {Feature.NOP_FEATURE_2}, + {'NOP_FEATURE_2'} + ), + ( + {Feature.NOP_FEATURE_1, Feature.NOP_FEATURE_2}, + set(), + {'NOP_FEATURE_1', 'NOP_FEATURE_2'} + ), + ] +) +def test_non_signaling_features_warning( + support_features: set[Feature], + not_support_features: set[Feature], + non_signaling_features: set[str], +) -> None: + best_block = Mock(spec_set=Block) + best_block.get_height = Mock(return_value=123) + best_block.hash_hex = 'abc' + tx_storage = Mock(spec_set=TransactionStorage) + tx_storage.get_best_block = lambda: best_block + + def get_bits_description_mock(block): + if block == best_block: + return {} + raise NotImplementedError + + feature_service = Mock(spec_set=FeatureService) + feature_service.get_bits_description = get_bits_description_mock + + service = BitSignalingService( + feature_settings=FeatureSettings(), + feature_service=feature_service, + tx_storage=tx_storage, + support_features=support_features, + not_support_features=not_support_features, + ) + + with capture_logs() as logs: + service.start() + + expected_log = dict( + log_level='warning', + best_block_height=123, + best_block_hash='abc', + non_signaling_features=non_signaling_features, + event='Considering the current best block, there are signaled features outside their signaling period. ' + 'Therefore, signaling for them has no effect. Make sure you are signaling for the desired features.', + ) + + assert expected_log in logs diff --git a/tests/feature_activation/test_criteria.py b/tests/feature_activation/test_criteria.py index 617a86dd9..2d8e5774a 100644 --- a/tests/feature_activation/test_criteria.py +++ b/tests/feature_activation/test_criteria.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import patch - import pytest from pydantic import ValidationError @@ -30,128 +28,131 @@ ) -@patch('hathor.feature_activation.model.criteria.Criteria.evaluation_interval', 1000) -@patch('hathor.feature_activation.model.criteria.Criteria.max_signal_bits', 2) -class TestCriteria: - @pytest.mark.parametrize( - 'criteria', - [ - VALID_CRITERIA, - dict( - bit=1, - start_height=100_000, - timeout_height=102_000, - threshold=1000, - minimum_activation_height=101_000, - lock_in_on_timeout=True, - version='0.52.3' - ) - ] - ) - def test_valid_criteria(self, criteria): - Criteria(**criteria) - - @pytest.mark.parametrize( - ['bit', 'error'], - [ - (-10, 'ensure this value is greater than or equal to 0'), - (-1, 'ensure this value is greater than or equal to 0'), - (2, 'bit must be lower than max_signal_bits: 2 >= 2'), - (10, 'bit must be lower than max_signal_bits: 10 >= 2') - ] - ) - def test_bit(self, bit, error): - criteria = VALID_CRITERIA | dict(bit=bit) - with pytest.raises(ValidationError) as e: - Criteria(**criteria) - - errors = e.value.errors() - assert errors[0]['msg'] == error - - @pytest.mark.parametrize( - ['start_height', 'error'], - [ - (-10, 'ensure this value is greater than or equal to 0'), - (-1, 'ensure this value is greater than or equal to 0'), - (1, 'Should be a multiple of evaluation_interval: 1 % 1000 != 0'), - (45, 'Should be a multiple of evaluation_interval: 45 % 1000 != 0'), - (100, 'Should be a multiple of evaluation_interval: 100 % 1000 != 0') - ] - ) - def test_start_height(self, start_height, error): - criteria = VALID_CRITERIA | dict(start_height=start_height) - with pytest.raises(ValidationError) as e: - Criteria(**criteria) - - errors = e.value.errors() - assert errors[0]['msg'] == error - - @pytest.mark.parametrize( - ['timeout_height', 'error'], - [ - (-10, 'ensure this value is greater than or equal to 0'), - (-1, 'ensure this value is greater than or equal to 0'), - (1, 'timeout_height must be at least two evaluation intervals after the start_height: 1 < 3000'), - (45, 'timeout_height must be at least two evaluation intervals after the start_height: 45 < 3000'), - (100, 'timeout_height must be at least two evaluation intervals after the start_height: 100 < 3000'), - (3111, 'Should be a multiple of evaluation_interval: 3111 % 1000 != 0') - ] - ) - def test_timeout_height(self, timeout_height, error): - criteria = VALID_CRITERIA | dict(timeout_height=timeout_height) - with pytest.raises(ValidationError) as e: - Criteria(**criteria) - - errors = e.value.errors() - assert errors[0]['msg'] == error - - @pytest.mark.parametrize( - ['threshold', 'error'], - [ - (-10, 'ensure this value is greater than or equal to 0'), - (-1, 'ensure this value is greater than or equal to 0'), - (1001, 'threshold must not be greater than evaluation_interval: 1001 > 1000'), - (100000, 'threshold must not be greater than evaluation_interval: 100000 > 1000') - ] - ) - def test_threshold(self, threshold, error): - criteria = VALID_CRITERIA | dict(threshold=threshold) - with pytest.raises(ValidationError) as e: - Criteria(**criteria) - - errors = e.value.errors() - assert errors[0]['msg'] == error - - @pytest.mark.parametrize( - ['minimum_activation_height', 'error'], - [ - (-10, 'ensure this value is greater than or equal to 0'), - (-1, 'ensure this value is greater than or equal to 0'), - (1, 'Should be a multiple of evaluation_interval: 1 % 1000 != 0'), - (45, 'Should be a multiple of evaluation_interval: 45 % 1000 != 0'), - (100, 'Should be a multiple of evaluation_interval: 100 % 1000 != 0'), - ] - ) - def test_minimum_activation_height(self, minimum_activation_height, error): - criteria = VALID_CRITERIA | dict(minimum_activation_height=minimum_activation_height) - with pytest.raises(ValidationError) as e: - Criteria(**criteria) - - errors = e.value.errors() - assert errors[0]['msg'] == error - - @pytest.mark.parametrize( - ['version', 'error'], - [ - ('0', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"'), - ('alpha', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"'), - ('0.0', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"') - ] - ) - def test_version(self, version, error): - criteria = VALID_CRITERIA | dict(version=version) - with pytest.raises(ValidationError) as e: - Criteria(**criteria) - - errors = e.value.errors() - assert errors[0]['msg'] == error +@pytest.mark.parametrize( + 'criteria', + [ + VALID_CRITERIA, + dict( + bit=1, + start_height=100_000, + timeout_height=102_000, + threshold=1000, + minimum_activation_height=101_000, + lock_in_on_timeout=True, + version='0.52.3' + ) + ] +) +def test_valid_criteria(criteria): + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + + +@pytest.mark.parametrize( + ['bit', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (2, 'bit must be lower than max_signal_bits: 2 >= 2'), + (10, 'bit must be lower than max_signal_bits: 10 >= 2') + ] +) +def test_bit(bit, error): + criteria = VALID_CRITERIA | dict(bit=bit) + with pytest.raises(ValidationError) as e: + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + +@pytest.mark.parametrize( + ['start_height', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (1, 'Should be a multiple of evaluation_interval: 1 % 1000 != 0'), + (45, 'Should be a multiple of evaluation_interval: 45 % 1000 != 0'), + (100, 'Should be a multiple of evaluation_interval: 100 % 1000 != 0') + ] +) +def test_start_height(start_height, error): + criteria = VALID_CRITERIA | dict(start_height=start_height) + with pytest.raises(ValidationError) as e: + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + +@pytest.mark.parametrize( + ['timeout_height', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (1, 'timeout_height must be at least two evaluation intervals after the start_height: 1 < 3000'), + (45, 'timeout_height must be at least two evaluation intervals after the start_height: 45 < 3000'), + (100, 'timeout_height must be at least two evaluation intervals after the start_height: 100 < 3000'), + (3111, 'Should be a multiple of evaluation_interval: 3111 % 1000 != 0') + ] +) +def test_timeout_height(timeout_height, error): + criteria = VALID_CRITERIA | dict(timeout_height=timeout_height) + with pytest.raises(ValidationError) as e: + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + +@pytest.mark.parametrize( + ['threshold', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (1001, 'threshold must not be greater than evaluation_interval: 1001 > 1000'), + (100000, 'threshold must not be greater than evaluation_interval: 100000 > 1000') + ] +) +def test_threshold(threshold, error): + criteria = VALID_CRITERIA | dict(threshold=threshold) + with pytest.raises(ValidationError) as e: + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + +@pytest.mark.parametrize( + ['minimum_activation_height', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (1, 'Should be a multiple of evaluation_interval: 1 % 1000 != 0'), + (45, 'Should be a multiple of evaluation_interval: 45 % 1000 != 0'), + (100, 'Should be a multiple of evaluation_interval: 100 % 1000 != 0'), + ] +) +def test_minimum_activation_height(minimum_activation_height, error): + criteria = VALID_CRITERIA | dict(minimum_activation_height=minimum_activation_height) + with pytest.raises(ValidationError) as e: + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + +@pytest.mark.parametrize( + ['version', 'error'], + [ + ('0', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"'), + ('alpha', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"'), + ('0.0', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"') + ] +) +def test_version(version, error): + criteria = VALID_CRITERIA | dict(version=version) + with pytest.raises(ValidationError) as e: + Criteria(**criteria).to_validated(evaluation_interval=1000, max_signal_bits=2) + + errors = e.value.errors() + assert errors[0]['msg'] == error diff --git a/tests/p2p/test_capabilities.py b/tests/p2p/test_capabilities.py index d2e4f5737..874267910 100644 --- a/tests/p2p/test_capabilities.py +++ b/tests/p2p/test_capabilities.py @@ -1,4 +1,6 @@ from hathor.conf import HathorSettings +from hathor.p2p.sync_v1.agent import NodeSyncTimestamp +from hathor.p2p.sync_v2.manager import NodeBlockSync from hathor.simulator import FakeConnection from tests import unittest @@ -21,6 +23,8 @@ def test_capabilities(self): # Even if we don't have the capability we must connect because the whitelist url conf is None self.assertEqual(conn._proto1.state.state_name, 'READY') self.assertEqual(conn._proto2.state.state_name, 'READY') + self.assertIsInstance(conn._proto1.state.sync_agent, NodeSyncTimestamp) + self.assertIsInstance(conn._proto2.state.sync_agent, NodeSyncTimestamp) manager3 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST]) manager4 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST]) @@ -34,10 +38,46 @@ def test_capabilities(self): self.assertEqual(conn2._proto1.state.state_name, 'READY') self.assertEqual(conn2._proto2.state.state_name, 'READY') + self.assertIsInstance(conn2._proto1.state.sync_agent, NodeSyncTimestamp) + self.assertIsInstance(conn2._proto2.state.sync_agent, NodeSyncTimestamp) class SyncV2HathorCapabilitiesTestCase(unittest.SyncV2Params, unittest.TestCase): - __test__ = True + def test_capabilities(self): + network = 'testnet' + manager1 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST, + settings.CAPABILITY_SYNC_VERSION]) + manager2 = self.create_peer(network, capabilities=[settings.CAPABILITY_SYNC_VERSION]) + + conn = FakeConnection(manager1, manager2) + + # Run the p2p protocol. + for _ in range(100): + conn.run_one_step(debug=True) + self.clock.advance(0.1) + + # Even if we don't have the capability we must connect because the whitelist url conf is None + self.assertEqual(conn._proto1.state.state_name, 'READY') + self.assertEqual(conn._proto2.state.state_name, 'READY') + self.assertIsInstance(conn._proto1.state.sync_agent, NodeBlockSync) + self.assertIsInstance(conn._proto2.state.sync_agent, NodeBlockSync) + + manager3 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST, + settings.CAPABILITY_SYNC_VERSION]) + manager4 = self.create_peer(network, capabilities=[settings.CAPABILITY_WHITELIST, + settings.CAPABILITY_SYNC_VERSION]) + + conn2 = FakeConnection(manager3, manager4) + + # Run the p2p protocol. + for _ in range(100): + conn2.run_one_step(debug=True) + self.clock.advance(0.1) + + self.assertEqual(conn2._proto1.state.state_name, 'READY') + self.assertEqual(conn2._proto2.state.state_name, 'READY') + self.assertIsInstance(conn2._proto1.state.sync_agent, NodeBlockSync) + self.assertIsInstance(conn2._proto2.state.sync_agent, NodeBlockSync) # sync-bridge should behave like sync-v2 diff --git a/tests/p2p/test_double_spending.py b/tests/p2p/test_double_spending.py index e1e89b910..02c4b7441 100644 --- a/tests/p2p/test_double_spending.py +++ b/tests/p2p/test_double_spending.py @@ -1,5 +1,3 @@ -import random - from hathor.crypto.util import decode_address from tests import unittest from tests.utils import add_blocks_unlock_reward, add_new_blocks, add_new_tx @@ -21,7 +19,7 @@ def _add_new_transactions(self, manager, num_txs): txs = [] for _ in range(num_txs): address = self.get_address(0) - value = random.choice([5, 10, 15, 20]) + value = self.rng.choice([5, 10, 15, 20]) tx = add_new_tx(manager, address, value) txs.append(tx) return txs diff --git a/tests/p2p/test_get_best_blockchain.py b/tests/p2p/test_get_best_blockchain.py new file mode 100644 index 000000000..367fdc70e --- /dev/null +++ b/tests/p2p/test_get_best_blockchain.py @@ -0,0 +1,353 @@ +from hathor.conf import HathorSettings +from hathor.indexes.height_index import HeightInfo +from hathor.manager import DEFAULT_CAPABILITIES +from hathor.p2p.messages import ProtocolMessages +from hathor.p2p.states import ReadyState +from hathor.simulator import FakeConnection +from hathor.simulator.trigger import StopAfterNMinedBlocks +from hathor.util import json_dumps +from tests import unittest +from tests.simulation.base import SimulatorTestCase + +settings = HathorSettings() + + +class BaseGetBestBlockchainTestCase(SimulatorTestCase): + + def _send_cmd(self, proto, cmd, payload=None): + if not payload: + line = '{}\r\n'.format(cmd) + else: + line = '{} {}\r\n'.format(cmd, payload) + + if isinstance(line, str): + line = line.encode('utf-8') + + return proto.dataReceived(line) + + def test_get_best_blockchain(self): + manager1 = self.create_peer() + manager2 = self.create_peer() + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + self.simulator.run(3600) + + connected_peers1 = list(manager1.connections.connected_peers.values()) + connected_peers2 = list(manager2.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers1)) + self.assertEqual(1, len(connected_peers2)) + + # assert the protocol has capabilities + # HelloState is responsible to transmite to protocol the capabilities + protocol1 = connected_peers2[0] + protocol2 = connected_peers1[0] + self.assertIsNotNone(protocol1.capabilities) + self.assertIsNotNone(protocol2.capabilities) + + # assert the protocol has the GET_BEST_BLOCKCHAIN capability + self.assertIn(settings.CAPABILITY_GET_BEST_BLOCKCHAIN, protocol1.capabilities) + self.assertIn(settings.CAPABILITY_GET_BEST_BLOCKCHAIN, protocol2.capabilities) + + # assert the protocol is in ReadyState + state1 = protocol1.state + state2 = protocol2.state + self.assertIsInstance(state1, ReadyState) + self.assertIsInstance(state2, ReadyState) + + # assert ReadyState commands + self.assertIn(ProtocolMessages.GET_BEST_BLOCKCHAIN, state1.cmd_map) + self.assertIn(ProtocolMessages.BEST_BLOCKCHAIN, state1.cmd_map) + self.assertIn(ProtocolMessages.GET_BEST_BLOCKCHAIN, state2.cmd_map) + self.assertIn(ProtocolMessages.BEST_BLOCKCHAIN, state2.cmd_map) + + # assert best blockchain contains the genesis block + self.assertIsNotNone(state1.peer_best_blockchain) + self.assertIsNotNone(state2.peer_best_blockchain) + + # mine 20 blocks + miner = self.simulator.create_miner(manager1, hashpower=1e6) + miner.start() + trigger = StopAfterNMinedBlocks(miner, quantity=20) + self.assertTrue(self.simulator.run(7200, trigger=trigger)) + miner.stop() + + # assert best blockchain exchange + state1.send_get_best_blockchain() + state2.send_get_best_blockchain() + self.simulator.run(60) + self.assertEqual(settings.DEFAULT_BEST_BLOCKCHAIN_BLOCKS, len(state1.peer_best_blockchain)) + self.assertEqual(settings.DEFAULT_BEST_BLOCKCHAIN_BLOCKS, len(state2.peer_best_blockchain)) + + self.assertIsInstance(state1.peer_best_blockchain[0], HeightInfo) + self.assertIsInstance(state2.peer_best_blockchain[0], HeightInfo) + + def test_handle_get_best_blockchain(self): + manager1 = self.create_peer() + manager2 = self.create_peer() + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + + # mine 20 blocks + miner = self.simulator.create_miner(manager1, hashpower=1e6) + miner.start() + trigger = StopAfterNMinedBlocks(miner, quantity=20) + self.assertTrue(self.simulator.run(7200, trigger=trigger)) + miner.stop() + + connected_peers1 = list(manager1.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers1)) + protocol2 = connected_peers1[0] + state2 = protocol2.state + self.assertIsInstance(state2, ReadyState) + + connected_peers2 = list(manager2.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers2)) + protocol1 = connected_peers2[0] + state1 = protocol1.state + self.assertIsInstance(state1, ReadyState) + + # assert compliance with N blocks inside the boundaries + state1.send_get_best_blockchain(n_blocks=1) + self.simulator.run(60) + self.assertFalse(conn12.tr1.disconnecting) + + state2.send_get_best_blockchain(n_blocks=20) + self.simulator.run(60) + self.assertFalse(conn12.tr2.disconnecting) + + # assert compliance with N blocks under lower boundary + state1.send_get_best_blockchain(n_blocks=0) + self.simulator.run(60) + self.assertTrue(conn12.tr1.disconnecting) + + # assert compliance with N blocks beyond upper boundary + state2.send_get_best_blockchain(n_blocks=21) + self.simulator.run(60) + self.assertTrue(conn12.tr2.disconnecting) + + # prepare to assert param validation exception + manager1 = self.create_peer() + manager2 = self.create_peer() + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + self.simulator.run(60) + self.assertFalse(conn12.tr1.disconnecting) + self.assertFalse(conn12.tr2.disconnecting) + + connected_peers2 = list(manager2.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers2)) + protocol1 = connected_peers2[0] + state1 = protocol1.state + self.assertIsInstance(state1, ReadyState) + + # assert param validation exception closes connection + state1.handle_get_best_blockchain('invalid single value') + self.simulator.run(60) + # state1 is managed by manager2 + self.assertTrue(conn12.tr2.disconnecting) + + def test_handle_best_blockchain(self): + manager1 = self.create_peer() + manager2 = self.create_peer() + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + self.simulator.run(60) + + connected_peers1 = list(manager1.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers1)) + protocol2 = connected_peers1[0] + state2 = protocol2.state + self.assertIsInstance(state2, ReadyState) + + connected_peers2 = list(manager2.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers2)) + protocol1 = connected_peers2[0] + state1 = protocol1.state + self.assertIsInstance(state1, ReadyState) + + self.assertFalse(conn12.tr1.disconnecting) + self.simulator.run(60) + + # assert a valid blockchain keeps connections open + fake_blockchain = [ + (1, '0000000000000002eccfbca9bc06c449c01f37afb3cb49c04ee62921d9bcf9dc'), + (2, '00000000000000006c846e182462a2cc437070288a486dfa21aa64bb373b8507'), + ] + state1.handle_best_blockchain(json_dumps(fake_blockchain)) + state2.handle_best_blockchain(json_dumps(fake_blockchain)) + self.simulator.run(60) + self.assertFalse(conn12.tr1.disconnecting) + self.assertFalse(conn12.tr2.disconnecting) + + # assert an invalid HeightInfo closes connection + fake_blockchain = [ + # valid + (1, '0000000000000002eccfbca9bc06c449c01f37afb3cb49c04ee62921d9bcf9dc'), + # invalid because height is of float type + (3.1, '00000000000000006c846e182462a2cc437070288a486dfa21aa64bb373b8507'), + ] + state2.handle_best_blockchain(json_dumps(fake_blockchain)) + self.simulator.run(60) + self.assertTrue(conn12.tr1.disconnecting) + + fake_blockchain = [ + # valid + (1, '0000000000000002eccfbca9bc06c449c01f37afb3cb49c04ee62921d9bcf9dc'), + # invalid hash + (2, 'invalid hash'), + ] + state1.handle_best_blockchain(json_dumps(fake_blockchain)) + self.simulator.run(60) + self.assertTrue(conn12.tr2.disconnecting) + + def test_node_without_get_best_blockchain_capability(self): + manager1 = self.create_peer() + manager2 = self.create_peer() + + cababilities_without_get_best_blockchain = [ + settings.CAPABILITY_WHITELIST, + settings.CAPABILITY_SYNC_VERSION, + ] + manager2.capabilities = cababilities_without_get_best_blockchain + + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + self.simulator.run(60) + + # assert the nodes are connected + connected_peers1 = list(manager1.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers1)) + connected_peers2 = list(manager2.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers2)) + + # assert the peers have the proper capabilities + protocol2 = connected_peers1[0] + self.assertTrue(protocol2.capabilities.issuperset(set(cababilities_without_get_best_blockchain))) + protocol1 = connected_peers2[0] + self.assertTrue(protocol1.capabilities.issuperset(set(DEFAULT_CAPABILITIES))) + + # assert the peers don't engage in get_best_blockchain messages + state2 = protocol2.state + self.assertIsInstance(state2, ReadyState) + self.assertIsNone(state2.lc_get_best_blockchain) + state1 = protocol1.state + self.assertIsInstance(state1, ReadyState) + self.assertIsNone(state1.lc_get_best_blockchain) + + # assert the connections remains open + self.assertFalse(conn12.tr2.disconnecting) + self.assertFalse(conn12.tr1.disconnecting) + + # mine 10 blocks + miner = self.simulator.create_miner(manager1, hashpower=1e6) + miner.start() + trigger = StopAfterNMinedBlocks(miner, quantity=10) + self.assertTrue(self.simulator.run(720, trigger=trigger)) + miner.stop() + + # assert the best_blockchain remains empty even after mine + self.assertEqual([], state2.peer_best_blockchain) + self.assertEqual([], state1.peer_best_blockchain) + + # assert connections will close if force get_best_blockchain + state1.send_get_best_blockchain() + self.simulator.run(60) + self.assertTrue(conn12.tr1.disconnecting) + state2.send_get_best_blockchain() + self.simulator.run(60) + self.assertTrue(conn12.tr2.disconnecting) + + def test_best_blockchain_from_storage(self): + manager1 = self.create_peer() + manager2 = self.create_peer() + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + blocks = 10 + + # cache miss because the cache is empty + self.assertEqual(len(manager1.tx_storage._latest_n_height_tips), 0) + best_blockchain = manager1.tx_storage.get_n_height_tips(1) # there is only the genesis block + self.assertIsNotNone(manager1.tx_storage._latest_n_height_tips) + + # cache hit + block = best_blockchain[0] + best_blockchain = manager1.tx_storage.get_n_height_tips(1) # there is only the genesis block + memo_block = best_blockchain[0] + # can only produce the same object if use the memoized best_blockchain + self.assertTrue(block is memo_block) + + # cache miss if best block doesn't match + fake_block = HeightInfo(1, 'fake hash') + manager1._latest_n_height_tips = [fake_block] + best_blockchain = manager1.tx_storage.get_n_height_tips(1) # there is only the genesis block + block = best_blockchain[0] + # the memoized best_blockchain is skiped + # and a new best_blockchain object is generated + self.assertFalse(block is fake_block) + + # mine 10 blocks + miner = self.simulator.create_miner(manager1, hashpower=1e6) + miner.start() + trigger = StopAfterNMinedBlocks(miner, quantity=10) + self.assertTrue(self.simulator.run(720, trigger=trigger)) + miner.stop() + + # cache miss if n_blocks > cache length + manager1.tx_storage.get_n_height_tips(blocks) # update cache + memo_block = best_blockchain[0] + best_blockchain = manager1.tx_storage.get_n_height_tips(blocks+1) + block = best_blockchain[0] + self.assertFalse(block is memo_block) + + # cache hit if n_blocks <= cache length + memo_block = block + best_blockchain = manager1.tx_storage.get_n_height_tips(blocks-1) + block = best_blockchain[0] + self.assertTrue(block is memo_block) + + def test_stop_looping_on_exit(self): + manager1 = self.create_peer() + manager2 = self.create_peer() + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + self.simulator.run(60) + + connected_peers1 = list(manager1.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers1)) + protocol2 = connected_peers1[0] + state2 = protocol2.state + self.assertIsInstance(state2, ReadyState) + + connected_peers2 = list(manager2.connections.connected_peers.values()) + self.assertEqual(1, len(connected_peers2)) + protocol1 = connected_peers2[0] + state1 = protocol1.state + self.assertIsInstance(state1, ReadyState) + + self.assertIsNotNone(state1.lc_get_best_blockchain) + self.assertTrue(state1.lc_get_best_blockchain.running) + + self.assertIsNotNone(state2.lc_get_best_blockchain) + self.assertTrue(state2.lc_get_best_blockchain.running) + + state1.on_exit() + state2.on_exit() + + self.assertIsNotNone(state1.lc_get_best_blockchain) + self.assertFalse(state1.lc_get_best_blockchain.running) + + self.assertIsNotNone(state2.lc_get_best_blockchain) + self.assertFalse(state2.lc_get_best_blockchain.running) + + +class SyncV1GetBestBlockchainTestCase(unittest.SyncV1Params, BaseGetBestBlockchainTestCase): + __test__ = True + + +class SyncV2GetBestBlockchainTestCase(unittest.SyncV2Params, BaseGetBestBlockchainTestCase): + __test__ = True + + +# sync-bridge should behave like sync-v2 +class SyncBridgeGetBestBlockchainTestCase(unittest.SyncBridgeParams, BaseGetBestBlockchainTestCase): + __test__ = True diff --git a/tests/p2p/test_protocol.py b/tests/p2p/test_protocol.py index c661545b1..fc08d52cc 100644 --- a/tests/p2p/test_protocol.py +++ b/tests/p2p/test_protocol.py @@ -1,4 +1,5 @@ from json import JSONDecodeError +from typing import Optional from twisted.internet.defer import inlineCallbacks from twisted.python.failure import Failure @@ -25,6 +26,14 @@ def setUp(self): self.manager2 = self.create_peer(self.network, peer_id=self.peer_id2) self.conn = FakeConnection(self.manager1, self.manager2) + def assertAndStepConn(self, conn: FakeConnection, regex1: bytes, regex2: Optional[bytes] = None) -> None: + """If only one regex is given it is tested on both cons, if two are given they'll be used respectively.""" + if regex2 is None: + regex2 = regex1 + self.assertRegex(conn.peek_tr1_value(), regex1) + self.assertRegex(conn.peek_tr2_value(), regex2) + conn.run_one_step() + def assertIsConnected(self, conn=None): if conn is None: conn = self.conn @@ -157,20 +166,6 @@ def test_valid_hello(self): self.assertFalse(self.conn.tr1.disconnecting) self.assertFalse(self.conn.tr2.disconnecting) - @inlineCallbacks - def test_invalid_peer_id(self): - self.conn.run_one_step() # HELLO - self.conn.run_one_step() # PEER-ID - self.conn.run_one_step() # READY - self.conn.run_one_step() # GET-PEERS - self.conn.run_one_step() # GET-TIPS - self.conn.run_one_step() # PEERS - self.conn.run_one_step() # TIPS - invalid_payload = {'id': '123', 'entrypoints': ['tcp://localhost:1234']} - yield self._send_cmd(self.conn.proto1, 'PEER-ID', json_dumps(invalid_payload)) - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') - self.assertTrue(self.conn.tr1.disconnecting) - def test_invalid_same_peer_id(self): manager3 = self.create_peer(self.network, peer_id=self.peer_id1) conn = FakeConnection(self.manager1, manager3) @@ -218,14 +213,13 @@ def test_invalid_same_peer_id2(self): self.conn.run_until_empty() conn.run_until_empty() self.run_to_completion() - # one of the peers will close the connection. We don't know which on, as it depends + # one of the peers will close the connection. We don't know which one, as it depends # on the peer ids - conn1_value = self.conn.peek_tr1_value() + self.conn.peek_tr2_value() - conn2_value = conn.peek_tr1_value() + conn.peek_tr2_value() - if b'ERROR' in conn1_value: + + if self.conn.tr1.disconnecting or self.conn.tr2.disconnecting: conn_dead = self.conn conn_alive = conn - elif b'ERROR' in conn2_value: + elif conn.tr1.disconnecting or conn.tr2.disconnecting: conn_dead = conn conn_alive = self.conn else: @@ -248,51 +242,6 @@ def test_invalid_different_network(self): self.assertTrue(conn.tr1.disconnecting) conn.run_one_step() # ERROR - def test_valid_hello_and_peer_id(self): - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'HELLO') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'HELLO') - self.conn.run_one_step() # HELLO - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEER-ID') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'PEER-ID') - self.conn.run_one_step() # PEER-ID - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'READY') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'READY') - self.conn.run_one_step() # READY - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'GET-PEERS') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'GET-PEERS') - self.conn.run_one_step() # GET-PEERS - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'GET-TIPS') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'GET-TIPS') - self.conn.run_one_step() # GET-TIPS - self.assertIsConnected() - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEERS') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'PEERS') - self.conn.run_one_step() # PEERS - self._check_result_only_cmd(self.conn.peek_tr1_value(), b'TIPS') - self._check_result_only_cmd(self.conn.peek_tr2_value(), b'TIPS') - self.conn.run_one_step() # TIPS - self.assertIsConnected() - - def test_send_ping(self): - self.conn.run_one_step() # HELLO - self.conn.run_one_step() # PEER-ID - self.conn.run_one_step() # READY - self.conn.run_one_step() # GET-PEERS - self.conn.run_one_step() # GET-TIPS - self.conn.run_one_step() # PEERS - self.conn.run_one_step() # TIPS - self.assertIsConnected() - self.clock.advance(5) - self.assertEqual(b'PING\r\n', self.conn.peek_tr1_value()) - self.assertEqual(b'PING\r\n', self.conn.peek_tr2_value()) - self.conn.run_one_step() # PING - self.conn.run_one_step() # GET-TIPS - self.assertEqual(b'PONG\r\n', self.conn.peek_tr1_value()) - self.assertEqual(b'PONG\r\n', self.conn.peek_tr2_value()) - while b'PONG\r\n' in self.conn.peek_tr1_value(): - self.conn.run_one_step() - self.assertEqual(self.clock.seconds(), self.conn.proto1.last_message) - def test_send_invalid_unicode(self): # \xff is an invalid unicode. self.conn.proto1.dataReceived(b'\xff\r\n') @@ -330,6 +279,16 @@ def test_on_disconnect_after_peer_id(self): # Peer id 2 removed from peer_storage (known_peers) after disconnection and after looping call self.assertNotIn(self.peer_id2.id, self.manager1.connections.peer_storage) + def test_idle_connection(self): + self.clock.advance(settings.PEER_IDLE_TIMEOUT - 10) + self.assertIsConnected(self.conn) + self.clock.advance(15) + self.assertIsNotConnected(self.conn) + + +class SyncV1HathorProtocolTestCase(unittest.SyncV1Params, BaseHathorProtocolTestCase): + __test__ = True + def test_two_connections(self): self.conn.run_one_step() # HELLO self.conn.run_one_step() # PEER-ID @@ -347,12 +306,6 @@ def test_two_connections(self): self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEERS') self.conn.run_one_step() - def test_idle_connection(self): - self.clock.advance(settings.PEER_IDLE_TIMEOUT - 10) - self.assertIsConnected(self.conn) - self.clock.advance(15) - self.assertIsNotConnected(self.conn) - @inlineCallbacks def test_get_data(self): self.conn.run_one_step() # HELLO @@ -368,14 +321,180 @@ def test_get_data(self): self._check_result_only_cmd(self.conn.peek_tr1_value(), b'NOT-FOUND') self.conn.run_one_step() + def test_valid_hello_and_peer_id(self): + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'HELLO') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'HELLO') + self.conn.run_one_step() # HELLO + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEER-ID') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'PEER-ID') + self.conn.run_one_step() # PEER-ID + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'READY') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'READY') + self.conn.run_one_step() # READY + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'GET-PEERS') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'GET-PEERS') + self.conn.run_one_step() # GET-PEERS + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'GET-TIPS') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'GET-TIPS') + self.conn.run_one_step() # GET-TIPS + self.assertIsConnected() + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'PEERS') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'PEERS') + self.conn.run_one_step() # PEERS + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'TIPS') + self._check_result_only_cmd(self.conn.peek_tr2_value(), b'TIPS') + self.conn.run_one_step() # TIPS + self.assertIsConnected() -class SyncV1HathorProtocolTestCase(unittest.SyncV1Params, BaseHathorProtocolTestCase): - __test__ = True + def test_send_ping(self): + self.conn.run_one_step() # HELLO + self.conn.run_one_step() # PEER-ID + self.conn.run_one_step() # READY + self.conn.run_one_step() # GET-PEERS + self.conn.run_one_step() # GET-TIPS + self.conn.run_one_step() # PEERS + self.conn.run_one_step() # TIPS + self.assertIsConnected() + self.clock.advance(5) + self.assertEqual(b'PING\r\n', self.conn.peek_tr1_value()) + self.assertEqual(b'PING\r\n', self.conn.peek_tr2_value()) + self.conn.run_one_step() # PING + self.conn.run_one_step() # GET-TIPS + self.conn.run_one_step() # GET-BEST-BLOCKCHAIN + self.assertEqual(b'PONG\r\n', self.conn.peek_tr1_value()) + self.assertEqual(b'PONG\r\n', self.conn.peek_tr2_value()) + while b'PONG\r\n' in self.conn.peek_tr1_value(): + self.conn.run_one_step() + self.assertEqual(self.clock.seconds(), self.conn.proto1.last_message) + + @inlineCallbacks + def test_invalid_peer_id(self): + self.conn.run_one_step() # HELLO + self.conn.run_one_step() # PEER-ID + self.conn.run_one_step() # READY + self.conn.run_one_step() # GET-PEERS + self.conn.run_one_step() # GET-TIPS + self.conn.run_one_step() # PEERS + self.conn.run_one_step() # TIPS + invalid_payload = {'id': '123', 'entrypoints': ['tcp://localhost:1234']} + yield self._send_cmd(self.conn.proto1, 'PEER-ID', json_dumps(invalid_payload)) + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'ERROR') + self.assertTrue(self.conn.tr1.disconnecting) class SyncV2HathorProtocolTestCase(unittest.SyncV2Params, BaseHathorProtocolTestCase): __test__ = True + def test_two_connections(self): + self.assertAndStepConn(self.conn, b'^HELLO') + self.assertAndStepConn(self.conn, b'^PEER-ID') + self.assertAndStepConn(self.conn, b'^READY') + self.assertAndStepConn(self.conn, b'^GET-PEERS') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PEERS') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^RELAY') + self.assertIsConnected() + + # disable timeout because we will make several steps on a new conn and this might get left behind + self.conn.disable_idle_timeout() + + manager3 = self.create_peer(self.network, enable_sync_v2=True) + conn = FakeConnection(self.manager1, manager3) + self.assertAndStepConn(conn, b'^HELLO') + self.assertAndStepConn(conn, b'^PEER-ID') + self.assertAndStepConn(conn, b'^READY') + self.assertAndStepConn(conn, b'^GET-PEERS') + + self.clock.advance(5) + self.assertIsConnected() + self.assertAndStepConn(self.conn, b'^GET-TIPS') + self.assertAndStepConn(self.conn, b'^PING') + + for _ in range(19): + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCKCHAIN') + + # peer1 should now send a PEERS with the new peer that just connected + self.assertAndStepConn(self.conn, b'^PEERS', b'^GET-BEST-BLOCKCHAIN') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCKCHAIN', b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS', b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS', b'^TIPS-END') + self.assertAndStepConn(self.conn, b'^TIPS-END', b'^PONG') + self.assertAndStepConn(self.conn, b'^PONG', b'^BEST-BLOCKCHAIN') + self.assertIsConnected() + + @inlineCallbacks + def test_get_data(self): + self.assertAndStepConn(self.conn, b'^HELLO') + self.assertAndStepConn(self.conn, b'^PEER-ID') + self.assertAndStepConn(self.conn, b'^READY') + self.assertAndStepConn(self.conn, b'^GET-PEERS') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PEERS') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^RELAY') + self.assertIsConnected() + missing_tx = '00000000228dfcd5dec1c9c6263f6430a5b4316bb9e3decb9441a6414bfd8697' + payload = {'until_first_block': missing_tx, 'start_from': [settings.GENESIS_BLOCK_HASH.hex()]} + yield self._send_cmd(self.conn.proto1, 'GET-TRANSACTIONS-BFS', json_dumps(payload)) + self._check_result_only_cmd(self.conn.peek_tr1_value(), b'NOT-FOUND') + self.conn.run_one_step() + + def test_valid_hello_and_peer_id(self): + self.assertAndStepConn(self.conn, b'^HELLO') + self.assertAndStepConn(self.conn, b'^PEER-ID') + self.assertAndStepConn(self.conn, b'^READY') + self.assertAndStepConn(self.conn, b'^GET-PEERS') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PEERS') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^RELAY') + + # this will tick the ping-pong mechanism and looping calls + self.clock.advance(5) + self.assertIsConnected() + self.assertAndStepConn(self.conn, b'^GET-TIPS') + self.assertAndStepConn(self.conn, b'^PING') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCKCHAIN') + self.assertAndStepConn(self.conn, b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS-END') + self.assertAndStepConn(self.conn, b'^PONG') + self.assertIsConnected() + + self.clock.advance(5) + self.assertAndStepConn(self.conn, b'^BEST-BLOCKCHAIN') + self.assertAndStepConn(self.conn, b'^PING') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PONG') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertIsConnected() + + def test_send_ping(self): + self.assertAndStepConn(self.conn, b'^HELLO') + self.assertAndStepConn(self.conn, b'^PEER-ID') + self.assertAndStepConn(self.conn, b'^READY') + self.assertAndStepConn(self.conn, b'^GET-PEERS') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^PEERS') + self.assertAndStepConn(self.conn, b'^BEST-BLOCK') + self.assertAndStepConn(self.conn, b'^RELAY') + + # this will tick the ping-pong mechanism and looping calls + self.clock.advance(5) + self.assertAndStepConn(self.conn, b'^GET-TIPS') + self.assertAndStepConn(self.conn, b'^PING') + self.assertAndStepConn(self.conn, b'^GET-BEST-BLOCKCHAIN') + self.assertAndStepConn(self.conn, b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS') + self.assertAndStepConn(self.conn, b'^TIPS-END') + self.assertEqual(b'PONG\r\n', self.conn.peek_tr1_value()) + self.assertEqual(b'PONG\r\n', self.conn.peek_tr2_value()) + while b'PONG\r\n' in self.conn.peek_tr1_value(): + self.conn.run_one_step() + self.assertEqual(self.clock.seconds(), self.conn.proto1.last_message) + # sync-bridge should behave like sync-v2 class SyncBridgeHathorProtocolTestCase(unittest.SyncBridgeParams, SyncV2HathorProtocolTestCase): diff --git a/tests/p2p/test_split_brain.py b/tests/p2p/test_split_brain.py index 547f845ee..7bc2f44c6 100644 --- a/tests/p2p/test_split_brain.py +++ b/tests/p2p/test_split_brain.py @@ -1,5 +1,3 @@ -import random - import pytest from mnemonic import Mnemonic @@ -11,21 +9,15 @@ from tests.utils import add_blocks_unlock_reward, add_new_block, add_new_double_spending, add_new_transactions -class BaseHathorSplitBrainTestCase(unittest.TestCase): +class BaseHathorSyncMethodsTestCase(unittest.TestCase): __test__ = False def setUp(self): super().setUp() - - # import sys - # from twisted.python import log - # log.startLogging(sys.stdout) - - # self.set_random_seed(0) - from hathor.transaction.genesis import _get_genesis_transactions_unsafe + first_timestamp = min(tx.timestamp for tx in _get_genesis_transactions_unsafe(None)) - self.clock.advance(first_timestamp + random.randint(3600, 120*24*3600)) + self.clock.advance(first_timestamp + self.rng.randint(3600, 120*24*3600)) self.network = 'testnet' @@ -40,12 +32,12 @@ def create_peer(self, network, unlock_wallet=True): # Don't use it anywhere else. It is unsafe to generate mnemonic words like this. # It should be used only for testing purposes. m = Mnemonic('english') - words = m.to_mnemonic(bytes(random.randint(0, 255) for _ in range(32))) + words = m.to_mnemonic(bytes(self.rng.randint(0, 255) for _ in range(32))) wallet.unlock(words=words, tx_storage=manager.tx_storage) return manager @pytest.mark.slow - def test_split_brain(self): + def test_split_brain_plain(self): debug_pdf = False manager1 = self.create_peer(self.network, unlock_wallet=True) @@ -60,9 +52,9 @@ def test_split_brain(self): add_new_block(manager2, advance_clock=1) add_blocks_unlock_reward(manager2) self.clock.advance(10) - for _ in range(random.randint(3, 10)): - add_new_transactions(manager1, random.randint(2, 4), advance_clock=1) - add_new_transactions(manager2, random.randint(3, 7), advance_clock=1) + for _ in range(self.rng.randint(3, 10)): + add_new_transactions(manager1, self.rng.randint(2, 4), advance_clock=1) + add_new_transactions(manager2, self.rng.randint(3, 7), advance_clock=1) add_new_double_spending(manager1) add_new_double_spending(manager2) self.clock.advance(10) @@ -80,16 +72,60 @@ def test_split_brain(self): conn = FakeConnection(manager1, manager2) - conn.run_one_step() # HELLO - conn.run_one_step() # PEER-ID - conn.run_one_step() # READY - conn.run_one_step() # GET-PEERS - conn.run_one_step() # GET-TIPS - conn.run_one_step() # PEERS - conn.run_one_step() # TIPS + # upper limit to how many steps it definitely should be enough + for i in range(3000): + if not conn.can_step(): + break + conn.run_one_step() + self.clock.advance(0.2) + else: + # error if we fall off the loop without breaking + self.fail('took more steps than expected') + self.log.debug('steps', count=i) + for i in range(500): + conn.run_one_step() + self.clock.advance(0.2) + + if debug_pdf: + dot1 = GraphvizVisualizer(manager1.tx_storage, include_verifications=True).dot() + dot1.render('dot1-post') + dot2 = GraphvizVisualizer(manager2.tx_storage, include_verifications=True).dot() + dot2.render('dot2-post') + + node_sync = conn.proto1.state.sync_agent + self.assertSyncedProgress(node_sync) + self.assertTipsEqual(manager1, manager2) + self.assertConsensusEqual(manager1, manager2) + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + @pytest.mark.slow + def test_split_brain_only_blocks_different_height(self): + manager1 = self.create_peer(self.network, unlock_wallet=True) + manager1.avg_time_between_blocks = 3 + + manager2 = self.create_peer(self.network, unlock_wallet=True) + manager2.avg_time_between_blocks = 3 + + for _ in range(10): + add_new_block(manager1, advance_clock=1) + add_blocks_unlock_reward(manager1) + add_new_block(manager2, advance_clock=1) + add_blocks_unlock_reward(manager2) + self.clock.advance(10) + + # Add one more block to manager1, so it's the winner chain + add_new_block(manager1, advance_clock=1) + + block_tip1 = manager1.tx_storage.indexes.height.get_tip() + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + conn = FakeConnection(manager1, manager2) empty_counter = 0 - for i in range(2000): + for i in range(1000): if conn.is_empty(): empty_counter += 1 if empty_counter > 10: @@ -98,30 +134,273 @@ def test_split_brain(self): empty_counter = 0 conn.run_one_step() - self.clock.advance(0.2) + self.clock.advance(1) - if debug_pdf: - dot1 = GraphvizVisualizer(manager1.tx_storage, include_verifications=True).dot() - dot1.render('dot1-post') - dot2 = GraphvizVisualizer(manager2.tx_storage, include_verifications=True).dot() - dot2.render('dot2-post') + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + self.assertConsensusEqual(manager1, manager2) - node_sync = conn.proto1.state.sync_manager - self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) - self.assertTipsEqual(manager1, manager2) + self.assertEqual(block_tip1, manager1.tx_storage.indexes.height.get_tip()) + self.assertEqual(block_tip1, manager2.tx_storage.indexes.height.get_tip()) + + # XXX We must decide what to do when different chains have the same score + # For now we are voiding everyone until the first common block + def test_split_brain_only_blocks_same_height(self): + manager1 = self.create_peer(self.network, unlock_wallet=True) + manager1.avg_time_between_blocks = 3 + + manager2 = self.create_peer(self.network, unlock_wallet=True) + manager2.avg_time_between_blocks = 3 + + for _ in range(10): + add_new_block(manager1, advance_clock=1) + unlock_reward_blocks1 = add_blocks_unlock_reward(manager1) + add_new_block(manager2, advance_clock=1) + unlock_reward_blocks2 = add_blocks_unlock_reward(manager2) + self.clock.advance(10) + + block_tips1 = unlock_reward_blocks1[-1].hash + block_tips2 = unlock_reward_blocks2[-1].hash + + self.assertEqual(len(manager1.tx_storage.get_best_block_tips()), 1) + self.assertCountEqual(manager1.tx_storage.get_best_block_tips(), {block_tips1}) + self.assertEqual(len(manager2.tx_storage.get_best_block_tips()), 1) + self.assertCountEqual(manager2.tx_storage.get_best_block_tips(), {block_tips2}) + + # Save winners for manager1 and manager2 + winners1 = set() + for tx1 in manager1.tx_storage.get_all_transactions(): + tx1_meta = tx1.get_metadata() + if not tx1_meta.voided_by: + winners1.add(tx1.hash) + + winners2 = set() + for tx2 in manager2.tx_storage.get_all_transactions(): + tx2_meta = tx2.get_metadata() + if not tx2_meta.voided_by: + winners2.add(tx2.hash) + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + conn = FakeConnection(manager1, manager2) + + empty_counter = 0 + for i in range(1000): + if conn.is_empty(): + empty_counter += 1 + if empty_counter > 10: + break + else: + empty_counter = 0 + + conn.run_one_step() + self.clock.advance(1) + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + # self.assertEqual(len(manager1.tx_storage.get_best_block_tips()), 2) + # self.assertCountEqual(manager1.tx_storage.get_best_block_tips(), {block_tips1, block_tips2}) + # self.assertEqual(len(manager2.tx_storage.get_best_block_tips()), 2) + # self.assertCountEqual(manager2.tx_storage.get_best_block_tips(), {block_tips1, block_tips2}) + + winners1_after = set() + for tx1 in manager1.tx_storage.get_all_transactions(): + tx1_meta = tx1.get_metadata() + if not tx1_meta.voided_by: + winners1_after.add(tx1.hash) + + winners2_after = set() + for tx2 in manager2.tx_storage.get_all_transactions(): + tx2_meta = tx2.get_metadata() + if not tx2_meta.voided_by: + winners2_after.add(tx2.hash) + + # Both chains have the same height and score + # so they will void all blocks and keep only the genesis (the common block and txs) + self.assertEqual(len(winners1_after), 3) + self.assertEqual(len(winners2_after), 3) + + new_block = add_new_block(manager1, advance_clock=1) + self.clock.advance(20) + + empty_counter = 0 + for i in range(500): + if conn.is_empty(): + empty_counter += 1 + if empty_counter > 10: + break + else: + empty_counter = 0 + + conn.run_one_step() + self.clock.advance(1) + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + winners1_after = set() + for tx1 in manager1.tx_storage.get_all_transactions(): + tx1_meta = tx1.get_metadata() + if not tx1_meta.voided_by: + winners1_after.add(tx1.hash) + + winners2_after = set() + for tx2 in manager2.tx_storage.get_all_transactions(): + tx2_meta = tx2.get_metadata() + if not tx2_meta.voided_by: + winners2_after.add(tx2.hash) + + winners1.add(new_block.hash) + winners2.add(new_block.hash) + + if new_block.get_block_parent().hash == block_tips1: + winners = winners1 + else: + winners = winners2 + + self.assertCountEqual(winners, winners1_after) + self.assertCountEqual(winners, winners2_after) + + self.assertEqual(len(manager1.tx_storage.get_best_block_tips()), 1) + self.assertCountEqual(manager1.tx_storage.get_best_block_tips(), {new_block.hash}) + self.assertEqual(len(manager2.tx_storage.get_best_block_tips()), 1) + self.assertCountEqual(manager2.tx_storage.get_best_block_tips(), {new_block.hash}) + + def test_split_brain_only_blocks_bigger_score(self): + manager1 = self.create_peer(self.network, unlock_wallet=True) + manager1.avg_time_between_blocks = 3 + + manager2 = self.create_peer(self.network, unlock_wallet=True) + manager2.avg_time_between_blocks = 3 + + # Start with 1 because of the genesis block + manager2_blocks = 1 + for _ in range(10): + add_new_block(manager1, advance_clock=1) + add_blocks_unlock_reward(manager1) + add_new_block(manager2, advance_clock=1) + manager2_blocks += 1 + blocks2 = add_blocks_unlock_reward(manager2) + manager2_blocks += len(blocks2) + self.clock.advance(10) + + # Add two more blocks to manager1, so it's the winner chain + add_new_block(manager1, advance_clock=1) + add_new_block(manager1, advance_clock=1) + + # Propagates a block with bigger weight, so the score of the manager2 chain + # will be bigger than the other one + b = add_new_block(manager2, advance_clock=1, propagate=False) + b.weight = 5 + b.resolve() + manager2.propagate_tx(b) + manager2_blocks += 1 + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + conn = FakeConnection(manager1, manager2) + + empty_counter = 0 + for i in range(1000): + if conn.is_empty(): + empty_counter += 1 + if empty_counter > 10: + break + else: + empty_counter = 0 + + conn.run_one_step() + self.clock.advance(1) + + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) self.assertConsensusEqual(manager1, manager2) + + winners2_blocks = 0 + for tx2 in manager2.tx_storage.get_all_transactions(): + tx2_meta = tx2.get_metadata() + if tx2.is_block and not tx2_meta.voided_by: + winners2_blocks += 1 + + # Assert that the consensus had the manager2 chain + self.assertEqual(winners2_blocks, manager2_blocks) + + def test_split_brain_no_double_spending(self): + manager1 = self.create_peer(self.network, unlock_wallet=True) + manager1.avg_time_between_blocks = 3 + manager1.connections.disable_rate_limiter() + + manager2 = self.create_peer(self.network, unlock_wallet=True) + manager2.avg_time_between_blocks = 3 + manager2.connections.disable_rate_limiter() + + winner_blocks = 1 + winner_txs = 2 + + for _ in range(10): + add_new_block(manager1, advance_clock=1) + add_blocks_unlock_reward(manager1) + add_new_block(manager2, advance_clock=1) + winner_blocks += 1 + blocks = add_blocks_unlock_reward(manager2) + winner_blocks += len(blocks) + self.clock.advance(10) + for _ in range(self.rng.randint(3, 10)): + add_new_transactions(manager1, self.rng.randint(2, 4), advance_clock=1) + txs = add_new_transactions(manager2, self.rng.randint(3, 7), advance_clock=1) + winner_txs += len(txs) + self.clock.advance(10) + + self.clock.advance(20) + + # Manager2 will be the winner because it has the biggest chain + add_new_block(manager2, advance_clock=1) + winner_blocks += 1 + self.clock.advance(20) + self.assertConsensusValid(manager1) self.assertConsensusValid(manager2) + conn = FakeConnection(manager1, manager2) + # Disable idle timeout. + conn.disable_idle_timeout() + + self.log.info('starting sync now...') + + # upper limit to how many steps it definitely should be enough + for i in range(3000): + if not conn.can_step(): + break + conn.run_one_step() + self.clock.advance(1) + conn.run_until_empty() + + self.log.debug('steps taken', steps=i + 1) + + self.assertConsensusEqual(manager1, manager2) + self.assertConsensusValid(manager1) + self.assertConsensusValid(manager2) + + winners2 = set() + for tx in manager2.tx_storage.get_all_transactions(): + tx_meta = tx.get_metadata() + if not tx_meta.voided_by: + winners2.add(tx.hash) + + self.assertEqual(len(winners2), winner_blocks + winner_txs) + -class SyncV1HathorSplitBrainTestCase(unittest.SyncV1Params, BaseHathorSplitBrainTestCase): +class SyncV1HathorSyncMethodsTestCase(unittest.SyncV1Params, BaseHathorSyncMethodsTestCase): __test__ = True -class SyncV2HathorSplitBrainTestCase(unittest.SyncV2Params, BaseHathorSplitBrainTestCase): +class SyncV2HathorSyncMethodsTestCase(unittest.SyncV2Params, BaseHathorSyncMethodsTestCase): __test__ = True # sync-bridge should behave like sync-v2 -class SyncBridgeHathorSplitBrainTestCase(unittest.SyncBridgeParams, SyncV2HathorSplitBrainTestCase): +class SyncBridgeHathorSyncMethodsTestCase(unittest.SyncBridgeParams, SyncV2HathorSyncMethodsTestCase): pass diff --git a/tests/p2p/test_split_brain2.py b/tests/p2p/test_split_brain2.py index 9fad42242..fc4601898 100644 --- a/tests/p2p/test_split_brain2.py +++ b/tests/p2p/test_split_brain2.py @@ -67,10 +67,8 @@ def test_split_brain(self): dot2 = GraphvizVisualizer(manager2.tx_storage, include_verifications=True).dot() dot2.render('dot2-post') - node_sync = conn12.proto1.state.sync_manager - self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) - node_sync = conn12.proto2.state.sync_manager - self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) + self.assertSyncedProgress(conn12.proto1.state.sync_agent) + self.assertSyncedProgress(conn12.proto2.state.sync_agent) self.assertTipsEqual(manager1, manager2) self.assertConsensusEqual(manager1, manager2) self.assertConsensusValid(manager1) diff --git a/tests/p2p/test_sync.py b/tests/p2p/test_sync.py index 0efbd7b82..72d1d6592 100644 --- a/tests/p2p/test_sync.py +++ b/tests/p2p/test_sync.py @@ -1,7 +1,6 @@ -import random - from twisted.python.failure import Failure +from hathor.checkpoint import Checkpoint as cp from hathor.conf import HathorSettings from hathor.crypto.util import decode_address from hathor.p2p.protocol import PeerIdState @@ -9,6 +8,7 @@ from hathor.simulator import FakeConnection from hathor.transaction.storage.exceptions import TransactionIsNotABlock from tests import unittest +from tests.utils import add_blocks_unlock_reward settings = HathorSettings() @@ -53,7 +53,7 @@ def _add_new_transactions(self, num_txs): txs = [] for _ in range(num_txs): address = self.get_address(0) - value = random.choice([5, 10, 50, 100, 120]) + value = self.rng.choice([5, 10, 50, 100, 120]) tx = self._add_new_tx(address, value) txs.append(tx) return txs @@ -76,7 +76,6 @@ def test_get_blocks_before(self): genesis_block = self.genesis_blocks[0] result = self.manager1.tx_storage.get_blocks_before(genesis_block.hash) self.assertEqual(0, len(result)) - genesis_tx = [tx for tx in self.genesis if not tx.is_block][0] with self.assertRaises(TransactionIsNotABlock): self.manager1.tx_storage.get_blocks_before(genesis_tx.hash) @@ -102,7 +101,7 @@ def test_block_sync_only_genesis(self): conn.run_one_step() # PEER-ID conn.run_one_step() # READY - node_sync = conn.proto1.state.sync_manager + node_sync = conn.proto1.state.sync_agent self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2) @@ -120,7 +119,7 @@ def test_block_sync_new_blocks(self): conn.run_one_step(debug=True) self.clock.advance(0.1) - node_sync = conn.proto1.state.sync_manager + node_sync = conn.proto1.state.sync_agent self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2) self.assertConsensusEqual(self.manager1, manager2) @@ -140,7 +139,7 @@ def test_block_sync_many_new_blocks(self): conn.run_one_step(debug=True) self.clock.advance(0.1) - node_sync = conn.proto1.state.sync_manager + node_sync = conn.proto1.state.sync_agent self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2) self.assertConsensusEqual(self.manager1, manager2) @@ -168,7 +167,7 @@ def test_block_sync_new_blocks_and_txs(self): # dot2 = manager2.tx_storage.graphviz(format='pdf') # dot2.render('dot2') - node_sync = conn.proto1.state.sync_manager + node_sync = conn.proto1.state.sync_agent self.assertEqual(self.manager1.tx_storage.latest_timestamp, manager2.tx_storage.latest_timestamp) self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) self.assertTipsEqual(self.manager1, manager2) @@ -217,8 +216,9 @@ def test_tx_propagation_nat_peers(self): self._add_new_transactions(1) - for _ in range(1000): - if self.conn1.is_empty() and self.conn2.is_empty(): + for i in range(1000): + # XXX: give it at least 100 steps before checking for emptyness + if i > 100 and self.conn1.is_empty() and self.conn2.is_empty(): break self.conn1.run_one_step() self.conn2.run_one_step() @@ -477,6 +477,273 @@ def test_downloader_disconnect(self): class SyncV2HathorSyncMethodsTestCase(unittest.SyncV2Params, BaseHathorSyncMethodsTestCase): __test__ = True + def test_sync_metadata(self): + # test if the synced peer will build all tx metadata correctly + + height = 0 + # add a mix of blocks and transactions + height += len(self._add_new_blocks(8)) + height += len(add_blocks_unlock_reward(self.manager1)) + self._add_new_transactions(2) + height += len(self._add_new_blocks(1)) + self._add_new_transactions(4) + height += len(self._add_new_blocks(2)) + self._add_new_transactions(2) + + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + conn = FakeConnection(self.manager1, manager2) + + for _ in range(100): + if conn.is_empty(): + break + conn.run_one_step(debug=True) + self.clock.advance(1) + + # check they have the same consensus + node_sync1 = conn.proto1.state.sync_agent + node_sync2 = conn.proto2.state.sync_agent + self.assertEqual(node_sync1.peer_height, height) + self.assertEqual(node_sync1.synced_height, height) + self.assertEqual(node_sync2.peer_height, height) + # 3 genesis + blocks + 8 txs + self.assertEqual(self.manager1.tx_storage.get_vertices_count(), height + 11) + self.assertEqual(manager2.tx_storage.get_vertices_count(), height + 11) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + self.assertConsensusEqual(self.manager1, manager2) + + # Nodes are synced. Make sure manager2 has the correct metadata. + for tx in self.manager1.tx_storage.topological_iterator(): + meta1 = tx.get_metadata() + meta2 = manager2.tx_storage.get_metadata(tx.hash) + self.assertCountEqual(meta1.children or [], meta2.children or []) + self.assertCountEqual(meta1.voided_by or [], meta2.voided_by or []) + self.assertCountEqual(meta1.conflict_with or [], meta2.conflict_with or []) + self.assertCountEqual(meta1.twins or [], meta2.twins or []) + + def test_tx_propagation_nat_peers(self): + super().test_tx_propagation_nat_peers() + + node_sync1 = self.conn1.proto1.state.sync_agent + self.assertEqual(self.manager1.tx_storage.latest_timestamp, self.manager2.tx_storage.latest_timestamp) + self.assertEqual(node_sync1.peer_height, node_sync1.synced_height) + self.assertEqual(node_sync1.peer_height, self.manager1.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager1, self.manager2) + + node_sync2 = self.conn2.proto1.state.sync_agent + self.assertEqual(self.manager2.tx_storage.latest_timestamp, self.manager3.tx_storage.latest_timestamp) + self.assertEqual(node_sync2.peer_height, node_sync2.synced_height) + self.assertEqual(node_sync2.peer_height, self.manager2.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager2, self.manager3) + + def test_block_sync_new_blocks_and_txs(self): + self._add_new_blocks(25) + self._add_new_transactions(3) + self._add_new_blocks(4) + self._add_new_transactions(5) + + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + for _ in range(1000): + conn.run_one_step() + self.clock.advance(0.1) + + # dot1 = self.manager1.tx_storage.graphviz(format='pdf') + # dot1.render('dot1') + + # dot2 = manager2.tx_storage.graphviz(format='pdf') + # dot2.render('dot2') + + node_sync = conn.proto1.state.sync_agent + self.assertEqual(self.manager1.tx_storage.latest_timestamp, manager2.tx_storage.latest_timestamp) + self.assertEqual(node_sync.peer_height, node_sync.synced_height) + self.assertEqual(node_sync.peer_height, self.manager1.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager1, manager2) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + + def test_block_sync_many_new_blocks(self): + self._add_new_blocks(150) + + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + for _ in range(1000): + if conn.is_empty(): + break + conn.run_one_step(debug=True) + self.clock.advance(1) + + node_sync = conn.proto1.state.sync_agent + self.assertEqual(node_sync.peer_height, node_sync.synced_height) + self.assertEqual(node_sync.peer_height, self.manager1.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager1, manager2) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + + def test_block_sync_new_blocks(self): + self._add_new_blocks(15) + + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + for _ in range(1000): + if conn.is_empty(): + break + conn.run_one_step(debug=True) + self.clock.advance(1) + + node_sync = conn.proto1.state.sync_agent + self.assertEqual(node_sync.peer_height, node_sync.synced_height) + self.assertEqual(node_sync.peer_height, self.manager1.tx_storage.get_height_best_block()) + self.assertConsensusEqual(self.manager1, manager2) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + + def test_full_sync(self): + # 10 blocks + blocks = self._add_new_blocks(10) + # N blocks to unlock the reward + unlock_reward_blocks = add_blocks_unlock_reward(self.manager1) + len_reward_unlock = len(unlock_reward_blocks) + # 3 transactions still before the last checkpoint + self._add_new_transactions(3) + # 5 more blocks and the last one is the last checkpoint + new_blocks = self._add_new_blocks(5) + + LAST_CHECKPOINT = len(blocks) + len_reward_unlock + len(new_blocks) + FIRST_CHECKPOINT = LAST_CHECKPOINT // 2 + cps = [ + cp(0, self.genesis_blocks[0].hash), + cp(FIRST_CHECKPOINT, (blocks + unlock_reward_blocks + new_blocks)[FIRST_CHECKPOINT - 1].hash), + cp(LAST_CHECKPOINT, (blocks + unlock_reward_blocks + new_blocks)[LAST_CHECKPOINT - 1].hash) + ] + + # 5 blocks after the last checkpoint + self._add_new_blocks(5) + # 3 transactions + self._add_new_transactions(3) + # 5 more blocks + self._add_new_blocks(5) + + # Add transactions to the mempool + self._add_new_transactions(2) + + self.manager1.checkpoints = cps + + manager2 = self.create_peer(self.network) + manager2.checkpoints = cps + self.assertEqual(manager2.state, manager2.NodeState.READY) + + total_count = 36 + len_reward_unlock + + self.assertEqual(self.manager1.tx_storage.get_vertices_count(), total_count) + self.assertEqual(manager2.tx_storage.get_vertices_count(), 3) + + conn = FakeConnection(self.manager1, manager2) + for i in range(300): + conn.run_one_step(debug=True) + self.clock.advance(0.1) + conn.run_until_empty(1000) + + # node_sync = conn.proto1.state.sync_agent + # self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) + # self.assertTipsEqual(self.manager1, manager2) + common_height = 25 + len_reward_unlock + + self.assertEqual(self.manager1.tx_storage.get_height_best_block(), common_height) + self.assertEqual(manager2.tx_storage.get_height_best_block(), common_height) + + node_sync1 = conn.proto1.state.sync_agent + node_sync2 = conn.proto2.state.sync_agent + self.assertEqual(node_sync1.peer_height, common_height) + self.assertEqual(node_sync1.synced_height, common_height) + self.assertEqual(node_sync2.peer_height, common_height) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + self.assertConsensusEqual(self.manager1, manager2) + + # 3 genesis + # 25 blocks + # Unlock reward blocks + # 8 txs + self.assertEqual(self.manager1.tx_storage.get_vertices_count(), total_count) + self.assertEqual(manager2.tx_storage.get_vertices_count(), total_count) + self.assertEqual(len(manager2.tx_storage.indexes.mempool_tips.get()), 1) + self.assertEqual(len(self.manager1.tx_storage.indexes.mempool_tips.get()), 1) + + def test_block_sync_checkpoints(self): + TOTAL_BLOCKS = 30 + LAST_CHECKPOINT = 15 + FIRST_CHECKPOINT = LAST_CHECKPOINT // 2 + blocks = self._add_new_blocks(TOTAL_BLOCKS, propagate=False) + cps = [ + cp(0, self.genesis_blocks[0].hash), + cp(FIRST_CHECKPOINT, blocks[FIRST_CHECKPOINT - 1].hash), + cp(LAST_CHECKPOINT, blocks[LAST_CHECKPOINT - 1].hash) + ] + self.manager1.checkpoints = cps + + manager2 = self.create_peer(self.network) + manager2.checkpoints = cps + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + # initial connection setup + for _ in range(100): + conn.run_one_step(debug=False) + self.clock.advance(0.1) + + # find synced timestamp + self.clock.advance(5) + for _ in range(600): + conn.run_one_step(debug=False) + self.clock.advance(0.1) + + self.assertEqual(self.manager1.tx_storage.get_best_block().get_metadata().height, TOTAL_BLOCKS) + self.assertEqual(manager2.tx_storage.get_best_block().get_metadata().height, TOTAL_BLOCKS) + + node_sync1 = conn.proto1.state.sync_agent + node_sync2 = conn.proto2.state.sync_agent + + self.assertEqual(node_sync1.peer_height, TOTAL_BLOCKS) + self.assertEqual(node_sync1.synced_height, TOTAL_BLOCKS) + self.assertEqual(node_sync2.peer_height, len(blocks)) + self.assertConsensusValid(self.manager1) + self.assertConsensusValid(manager2) + + def test_block_sync_only_genesis(self): + manager2 = self.create_peer(self.network) + self.assertEqual(manager2.state, manager2.NodeState.READY) + + conn = FakeConnection(self.manager1, manager2) + + genesis_tx = [tx for tx in self.genesis if not tx.is_block][0] + with self.assertRaises(TransactionIsNotABlock): + self.manager1.tx_storage.get_blocks_before(genesis_tx.hash) + + for _ in range(100): + if conn.is_empty(): + break + conn.run_one_step(debug=True) + self.clock.advance(1) + + node_sync = conn.proto1.state.sync_agent + self.assertEqual(node_sync.synced_height, 0) + self.assertEqual(node_sync.peer_height, 0) + + self.assertEqual(self.manager1.tx_storage.get_vertices_count(), 3) + self.assertEqual(manager2.tx_storage.get_vertices_count(), 3) + # TODO: an equivalent test to test_downloader, could be something like test_checkpoint_sync diff --git a/tests/p2p/test_sync_bridge.py b/tests/p2p/test_sync_bridge.py new file mode 100644 index 000000000..cdf000627 --- /dev/null +++ b/tests/p2p/test_sync_bridge.py @@ -0,0 +1,82 @@ +from hathor.simulator import FakeConnection +from tests.simulation.base import SimulatorTestCase + + +class MixedSyncRandomSimulatorTestCase(SimulatorTestCase): + __test__ = True + + def test_the_three_transacting_miners(self): + manager1 = self.create_peer(enable_sync_v1=True, enable_sync_v2=False) + manager2 = self.create_peer(enable_sync_v1=True, enable_sync_v2=True) + manager3 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) + + managers = [manager1, manager2, manager3] + all_managers = managers + miners = [] + tx_gens = [] + + for manager in managers: + miner = self.simulator.create_miner(manager, hashpower=100e6) + miner.start() + miners.append(miner) + tx_gen = self.simulator.create_tx_generator(manager, rate=2 / 60., hashpower=1e6, ignore_no_funds=True) + tx_gen.start() + tx_gens.append(tx_gen) + + self.simulator.run(2000) + + self.simulator.add_connection(FakeConnection(manager1, manager2, latency=0.300)) + self.simulator.add_connection(FakeConnection(manager1, manager3, latency=0.300)) + self.simulator.add_connection(FakeConnection(manager2, manager3, latency=0.300)) + + for tx_gen in tx_gens: + tx_gen.stop() + for miner in miners: + miner.stop() + + self.simulator.run_until_complete(2000, 600) + + for idx, node in enumerate(all_managers): + self.log.debug(f'checking node {idx}') + self.assertConsensusValid(manager) + + for manager_a, manager_b in zip(all_managers[:-1], all_managers[1:]): + # sync-v2 consensus test is more lenient (if sync-v1 assert passes sync-v2 assert will pass too) + self.assertConsensusEqualSyncV2(manager_a, manager_b, strict_sync_v2_indexes=False) + + def test_bridge_with_late_v2(self): + manager1 = self.create_peer(enable_sync_v1=True, enable_sync_v2=False) + manager2 = self.create_peer(enable_sync_v1=True, enable_sync_v2=True) + manager3 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) + + managers = [manager1, manager2] + all_managers = [manager1, manager2, manager3] + miners = [] + tx_gens = [] + + for manager in managers: + miner = self.simulator.create_miner(manager, hashpower=100e6) + miner.start() + miners.append(miner) + tx_gen = self.simulator.create_tx_generator(manager, rate=2 / 60., hashpower=1e6, ignore_no_funds=True) + tx_gen.start() + tx_gens.append(tx_gen) + + self.simulator.add_connection(FakeConnection(manager1, manager2, latency=0.300)) + self.simulator.run(2000) + + for tx_gen in tx_gens: + tx_gen.stop() + for miner in miners: + miner.stop() + + self.simulator.add_connection(FakeConnection(manager2, manager3, latency=0.300)) + self.simulator.run_until_complete(2000, 600) + + for idx, node in enumerate(all_managers): + self.log.debug(f'checking node {idx}') + self.assertConsensusValid(manager) + + for manager_a, manager_b in zip(all_managers[:-1], all_managers[1:]): + # sync-v2 consensus test is more lenient (if sync-v1 assert passes sync-v2 assert will pass too) + self.assertConsensusEqualSyncV2(manager_a, manager_b, strict_sync_v2_indexes=False) diff --git a/tests/p2p/test_sync_mempool.py b/tests/p2p/test_sync_mempool.py index 38c82603d..37f8a86fd 100644 --- a/tests/p2p/test_sync_mempool.py +++ b/tests/p2p/test_sync_mempool.py @@ -1,5 +1,3 @@ -import random - from hathor.crypto.util import decode_address from hathor.graphviz import GraphvizVisualizer from hathor.simulator import FakeConnection @@ -43,7 +41,7 @@ def _add_new_transactions(self, num_txs): txs = [] for _ in range(num_txs): address = self.get_address(0) - value = random.choice([5, 10, 50, 100, 120]) + value = self.rng.choice([5, 10, 50, 100, 120]) tx = self._add_new_tx(address, value) txs.append(tx) return txs diff --git a/tests/p2p/test_sync_rate_limiter.py b/tests/p2p/test_sync_rate_limiter.py index 724448187..054543f78 100644 --- a/tests/p2p/test_sync_rate_limiter.py +++ b/tests/p2p/test_sync_rate_limiter.py @@ -8,7 +8,9 @@ from tests.simulation.base import SimulatorTestCase -class BaseRandomSimulatorTestCase(SimulatorTestCase): +class SyncV1RandomSimulatorTestCase(unittest.SyncV1Params, SimulatorTestCase): + __test__ = True + def test_sync_rate_limiter(self): manager1 = self.create_peer() @@ -28,7 +30,7 @@ def test_sync_rate_limiter(self): connected_peers2 = list(manager2.connections.connected_peers.values()) self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] - sync2 = protocol1.state.sync_manager + sync2 = protocol1.state.sync_agent sync2._send_tips = MagicMock() for i in range(100): @@ -60,7 +62,7 @@ def test_sync_rate_limiter_disconnect(self): self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] - sync1 = protocol1.state.sync_manager + sync1 = protocol1.state.sync_agent sync1._send_tips = Mock(wraps=sync1._send_tips) sync1.send_tips() @@ -107,7 +109,7 @@ def test_sync_rate_limiter_delayed_calls_draining(self): self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] - sync1 = protocol1.state.sync_manager + sync1 = protocol1.state.sync_agent sync1.send_tips() self.assertEqual(len(sync1._send_tips_call_later), 0) @@ -145,7 +147,7 @@ def test_sync_rate_limiter_delayed_calls_stop(self): self.assertEqual(1, len(connected_peers2)) protocol1 = connected_peers2[0] - sync1 = protocol1.state.sync_manager + sync1 = protocol1.state.sync_agent sync1.send_tips() self.assertEqual(len(sync1._send_tips_call_later), 0) @@ -177,16 +179,3 @@ def test_sync_rate_limiter_delayed_calls_stop(self): # All residual tasks should have been canceled for call_later in sync1._send_tips_call_later: self.assertEqual(call_later.active(), False) - - -class SyncV1RandomSimulatorTestCase(unittest.SyncV1Params, BaseRandomSimulatorTestCase): - __test__ = True - - -class SyncV2RandomSimulatorTestCase(unittest.SyncV2Params, BaseRandomSimulatorTestCase): - __test__ = True - - -# sync-bridge should behave like sync-v2 -class SyncBridgeRandomSimulatorTestCase(unittest.SyncBridgeParams, SyncV2RandomSimulatorTestCase): - __test__ = True diff --git a/tests/p2p/test_sync_v2.py b/tests/p2p/test_sync_v2.py new file mode 100644 index 000000000..0a9ef50bc --- /dev/null +++ b/tests/p2p/test_sync_v2.py @@ -0,0 +1,243 @@ +import pytest +from twisted.python.failure import Failure + +from hathor.conf import HathorSettings +from hathor.p2p.peer_id import PeerId +from hathor.simulator import FakeConnection +from hathor.simulator.trigger import StopAfterNMinedBlocks, StopAfterNTransactions, StopWhenTrue, Trigger +from hathor.transaction.storage.traversal import DFSWalk +from tests.simulation.base import SimulatorTestCase +from tests.utils import HAS_ROCKSDB + +settings = HathorSettings() + + +class BaseRandomSimulatorTestCase(SimulatorTestCase): + __test__ = True + + def _get_partial_blocks(self, tx_storage): + with tx_storage.allow_partially_validated_context(): + partial_blocks = set() + for tx in tx_storage.get_all_transactions(): + if not tx.is_block: + continue + meta = tx.get_metadata() + if meta.validation.is_partial(): + partial_blocks.add(tx.hash) + return partial_blocks + + def _run_restart_test(self, *, full_verification: bool, use_tx_storage_cache: bool) -> None: + manager1 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) + manager1.allow_mining_without_peers() + + miner1 = self.simulator.create_miner(manager1, hashpower=10e6) + miner1.start() + trigger: Trigger = StopAfterNMinedBlocks(miner1, quantity=50) + self.assertTrue(self.simulator.run(3 * 3600, trigger=trigger)) + + gen_tx1 = self.simulator.create_tx_generator(manager1, rate=2., hashpower=1e6, ignore_no_funds=True) + gen_tx1.start() + trigger = StopAfterNTransactions(gen_tx1, quantity=500) + self.assertTrue(self.simulator.run(3600, trigger=trigger)) + + # Stop mining and run again to increase the mempool. + miner1.stop() + self.simulator.run(600) + + # Finally, stop all generators. + gen_tx1.stop() + + # Create a new peer and run sync for a while (but stop before getting synced). + path = self.mkdtemp() + peer_id = PeerId() + builder2 = self.simulator.get_default_builder() \ + .set_peer_id(peer_id) \ + .disable_sync_v1() \ + .enable_sync_v2() \ + .use_rocksdb(path) + + manager2 = self.simulator.create_peer(builder2) + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + + # Run sync for 2 minutes so we know it's not going to complete. + self.simulator.run(120) + + b1 = manager1.tx_storage.get_best_block() + b2 = manager2.tx_storage.get_best_block() + + self.assertNotEqual(b1.hash, b2.hash) + + partial_blocks = self._get_partial_blocks(manager2.tx_storage) + self.assertGreater(len(partial_blocks), 0) + + for _ in range(20): + print() + print('Stopping manager2...') + for _ in range(20): + print() + + # Stop the full node. + conn12.disconnect(Failure(Exception('testing'))) + self.simulator.remove_connection(conn12) + manager2.stop() + manager2.tx_storage._rocksdb_storage.close() + del manager2 + + for _ in range(20): + print() + print('Restarting manager2 as manager3...') + for _ in range(20): + print() + + # Restart full node using the same db. + builder3 = self.simulator.get_default_builder() \ + .set_peer_id(peer_id) \ + .disable_sync_v1() \ + .enable_sync_v2() \ + .use_rocksdb(path) + + if full_verification: + builder3.enable_full_verification() + else: + builder3.disable_full_verification() + + if use_tx_storage_cache: + builder3.use_tx_storage_cache() + + manager3 = self.simulator.create_peer(builder3) + self.assertEqual(partial_blocks, self._get_partial_blocks(manager3.tx_storage)) + self.assertTrue(manager3.tx_storage.indexes.deps.has_needed_tx()) + + conn13 = FakeConnection(manager1, manager3, latency=0.05) + self.simulator.add_connection(conn13) + + # Let the connection start to sync. + self.simulator.run(60) + + # Run until it's synced (time out of 1h) + sync3 = conn13.proto2.state.sync_agent + self.simulator.run(600) + sync3._breakpoint = True + + trigger = StopWhenTrue(sync3.is_synced) + self.assertTrue(self.simulator.run(5400, trigger=trigger)) + + self.assertEqual(manager1.tx_storage.get_vertices_count(), manager3.tx_storage.get_vertices_count()) + self.assertConsensusEqualSyncV2(manager1, manager3) + + # Start generators again to test real time sync. + miner1.start() + gen_tx1.start() + self.simulator.run(600) + miner1.stop() + gen_tx1.stop() + + # Make sure we are all synced. + self.simulator.run(600) + + self.assertEqual(manager1.tx_storage.get_vertices_count(), manager3.tx_storage.get_vertices_count()) + self.assertConsensusEqualSyncV2(manager1, manager3) + + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') + def test_restart_fullnode_full_verification(self): + self._run_restart_test(full_verification=True, use_tx_storage_cache=False) + + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') + def test_restart_fullnode_quick(self): + self._run_restart_test(full_verification=False, use_tx_storage_cache=False) + + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') + def test_restart_fullnode_quick_with_cache(self): + self._run_restart_test(full_verification=False, use_tx_storage_cache=True) + + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') + def test_restart_fullnode_full_verification_with_cache(self): + self._run_restart_test(full_verification=True, use_tx_storage_cache=True) + + def test_exceeds_streaming_and_mempool_limits(self) -> None: + manager1 = self.create_peer(enable_sync_v1=False, enable_sync_v2=True) + manager1.allow_mining_without_peers() + + # Find 50 blocks. + miner1 = self.simulator.create_miner(manager1, hashpower=10e6) + miner1.start() + trigger: Trigger = StopAfterNMinedBlocks(miner1, quantity=100) + self.assertTrue(self.simulator.run(3 * 3600, trigger=trigger)) + miner1.stop() + + # Generate 500 txs. + gen_tx1 = self.simulator.create_tx_generator(manager1, rate=3., hashpower=10e9, ignore_no_funds=True) + gen_tx1.start() + trigger = StopAfterNTransactions(gen_tx1, quantity=500) + self.simulator.run(3600, trigger=trigger) + self.assertGreater(manager1.tx_storage.get_vertices_count(), 500) + gen_tx1.stop() + + # Find 1 block. + miner1.start() + trigger = StopAfterNMinedBlocks(miner1, quantity=1) + self.assertTrue(self.simulator.run(3600, trigger=trigger)) + miner1.stop() + + # Confirm block has 400+ transactions. + blk = manager1.tx_storage.get_best_block() + tx_parents = [manager1.tx_storage.get_transaction(x) for x in blk.parents[1:]] + self.assertEqual(len(tx_parents), 2) + dfs = DFSWalk(manager1.tx_storage, is_dag_verifications=True, is_left_to_right=False) + cnt = 0 + for tx in dfs.run(tx_parents): + if tx.get_metadata().first_block == blk.hash: + cnt += 1 + else: + dfs.skip_neighbors(tx) + self.assertGreater(cnt, 400) + + # Generate 500 txs in mempool. + gen_tx1.start() + trigger = StopAfterNTransactions(gen_tx1, quantity=500) + self.simulator.run(3600, trigger=trigger) + self.assertGreater(manager1.tx_storage.get_vertices_count(), 1000) + gen_tx1.stop() + + for _ in range(20): + print() + print('Part 2: Start new fullnode and sync') + for _ in range(20): + print() + + # Create a new peer and run sync for a while (but stop before getting synced). + peer_id = PeerId() + builder2 = self.simulator.get_default_builder() \ + .set_peer_id(peer_id) \ + .disable_sync_v1() \ + .enable_sync_v2() \ + + manager2 = self.simulator.create_peer(builder2) + conn12 = FakeConnection(manager1, manager2, latency=0.05) + self.simulator.add_connection(conn12) + + # Let the connection start to sync. + self.simulator.run(1) + + # Change manager1 default streaming and mempool limits. + sync1 = conn12.proto1.state.sync_agent + sync1.DEFAULT_STREAMING_LIMIT = 30 + sync1.mempool_manager.MAX_STACK_LENGTH = 30 + self.assertIsNone(sync1.blockchain_streaming) + self.assertIsNone(sync1.transactions_streaming) + + # Change manager2 default streaming and mempool limits. + sync2 = conn12.proto2.state.sync_agent + sync2.DEFAULT_STREAMING_LIMIT = 50 + sync2.mempool_manager.MAX_STACK_LENGTH = 50 + self.assertIsNone(sync2.blockchain_streaming) + self.assertIsNone(sync2.transactions_streaming) + + # Run until fully synced. + # trigger = StopWhenTrue(sync2.is_synced) + # self.assertTrue(self.simulator.run(5400, trigger=trigger)) + self.simulator.run(3600) + + self.assertEqual(manager1.tx_storage.get_vertices_count(), manager2.tx_storage.get_vertices_count()) + self.assertConsensusEqualSyncV2(manager1, manager2) diff --git a/tests/resources/feature/test_feature.py b/tests/resources/feature/test_feature.py index 5dcb83f4f..bc6a9083e 100644 --- a/tests/resources/feature/test_feature.py +++ b/tests/resources/feature/test_feature.py @@ -19,6 +19,7 @@ from hathor.feature_activation.feature import Feature from hathor.feature_activation.feature_service import FeatureService from hathor.feature_activation.model.criteria import Criteria +from hathor.feature_activation.model.feature_description import FeatureDescription from hathor.feature_activation.model.feature_state import FeatureState from hathor.feature_activation.resources.feature import FeatureResource from hathor.feature_activation.settings import Settings as FeatureSettings @@ -29,37 +30,45 @@ @pytest.fixture def web(): - best_block = Mock(spec_set=Block) - best_block.get_feature_activation_bit_counts = Mock(return_value=[0, 1, 0, 0]) - best_block.hash_hex = 'some_hash' - best_block.get_height = Mock(return_value=123) + block_mock = Mock(wraps=Block(), spec_set=Block) + block_mock.get_feature_activation_bit_counts = Mock(return_value=[0, 1, 0, 0]) + block_mock.hash_hex = 'some_hash' + block_mock.get_height = Mock(return_value=123) tx_storage = Mock(spec_set=TransactionStorage) - tx_storage.get_best_block = Mock(return_value=best_block) + tx_storage.get_best_block = Mock(return_value=block_mock) + tx_storage.get_transaction = Mock(return_value=block_mock) def get_state(*, block: Block, feature: Feature) -> FeatureState: return FeatureState.ACTIVE if feature is Feature.NOP_FEATURE_1 else FeatureState.STARTED + nop_feature_1_criteria = Criteria( + bit=0, + start_height=0, + timeout_height=100, + version='0.1.0' + ) + nop_feature_2_criteria = Criteria( + bit=1, + start_height=200, + threshold=2, + timeout_height=300, + version='0.2.0' + ) + feature_service = Mock(spec_set=FeatureService) feature_service.get_state = Mock(side_effect=get_state) + feature_service.get_bits_description = Mock(return_value={ + Feature.NOP_FEATURE_1: FeatureDescription(state=FeatureState.DEFINED, criteria=nop_feature_1_criteria), + Feature.NOP_FEATURE_2: FeatureDescription(state=FeatureState.LOCKED_IN, criteria=nop_feature_2_criteria), + }) feature_settings = FeatureSettings( evaluation_interval=4, default_threshold=3, features={ - Feature.NOP_FEATURE_1: Criteria( - bit=0, - start_height=0, - timeout_height=100, - version='0.1.0' - ), - Feature.NOP_FEATURE_2: Criteria( - bit=1, - start_height=200, - threshold=2, - timeout_height=300, - version='0.2.0' - ) + Feature.NOP_FEATURE_1: nop_feature_1_criteria, + Feature.NOP_FEATURE_2: nop_feature_2_criteria } ) @@ -105,3 +114,15 @@ def test_get_features(web): ) assert result == expected + + +def test_get_block_features(web): + response = web.get('feature', args={b'block': b'1234'}) + result = response.result.json_value() + expected = dict( + signal_bits=[ + dict(bit=1, signal=0, feature="NOP_FEATURE_2", feature_state="LOCKED_IN") + ] + ) + + assert result == expected diff --git a/tests/resources/p2p/test_healthcheck.py b/tests/resources/p2p/test_healthcheck.py index 521612897..90bf1e260 100644 --- a/tests/resources/p2p/test_healthcheck.py +++ b/tests/resources/p2p/test_healthcheck.py @@ -72,7 +72,7 @@ def test_get_ready(self): add_new_blocks(self.manager, 5) # This will make sure the peers are synced - while not self.conn1.is_empty(): + for _ in range(600): self.conn1.run_one_step(debug=True) self.clock.advance(0.1) diff --git a/tests/resources/transaction/test_tx.py b/tests/resources/transaction/test_tx.py index d5cc377e8..c6a2e72d9 100644 --- a/tests/resources/transaction/test_tx.py +++ b/tests/resources/transaction/test_tx.py @@ -63,10 +63,10 @@ def test_get_one(self): @inlineCallbacks def test_get_one_known_tx(self): + # Tx tesnet 0033784bc8443ba851fd88d81c6f06774ae529f25c1fa8f026884ad0a0e98011 # We had a bug with this endpoint in this tx because the token_data from inputs # was being copied from the output - # First add needed data on storage tx_hex = ('0001020306001c382847d8440d05da95420bee2ebeb32bc437f82a9ae47b0745c8a29a7b0d007231eee3cb6160d95172' 'a409d634d0866eafc8775f5729fff6a61e7850aba500f4dd53f84f1f0091125250b044e49023fbbd0f74f6093cdd2226' @@ -175,10 +175,10 @@ def test_get_one_known_tx(self): @inlineCallbacks def test_get_one_known_tx_with_authority(self): + # Tx tesnet 00005f234469407614bf0abedec8f722bb5e534949ad37650f6077c899741ed7 # We had a bug with this endpoint in this tx because the token_data from inputs # was not considering authority mask - # First add needed data on storage tx_hex = ('0001010202000023b318c91dcfd4b967b205dc938f9f5e2fd5114256caacfb8f6dd13db330000023b318c91dcfd4b967b20' '5dc938f9f5e2fd5114256caacfb8f6dd13db33000006946304402200f7de9e866fbc2d600d6a46eb620fa2d72c9bf032250' @@ -492,6 +492,34 @@ def test_negative_timestamp(self): data = response.json_value() self.assertFalse(data['success']) + @inlineCallbacks + def test_partially_validated_not_found(self): + # First add needed data on storage + tx_hex = ('0001020306001c382847d8440d05da95420bee2ebeb32bc437f82a9ae47b0745c8a29a7b0d007231eee3cb6160d95172' + 'a409d634d0866eafc8775f5729fff6a61e7850aba500f4dd53f84f1f0091125250b044e49023fbbd0f74f6093cdd2226' + 'fdff3e09a101006946304402205dcbb7956d95b0e123954160d369e64bca7b176e1eb136e2dae5b95e46741509022072' + '6f99a363e8a4d79963492f4359c7589667eb0f45af7effe0dd4e51fbb5543d210288c10b8b1186b8c5f6bc05855590a6' + '522af35f269ddfdb8df39426a01ca9d2dd003d3c40fb04737e1a2a848cfd2592490a71cd0248b9e7d6a626f45dec8697' + '5b00006a4730450221008741dff52d97ce5f084518e1f4cac6bd98abdc88b98e6b18d6a8666fadac05f0022068951306' + '19eaf5433526e4803187c0aa08a8b1c46d9dc4ffaa89406fb2d4940c2102dd29eaadbb21a4de015d1812d5c0ec63cb8e' + 'e921e28580b6a9f8ff08db168c0e0096fb9b1a9e5fc34a9750bcccc746564c2b73f6defa381e130d9a4ea38cb1d80000' + '6a473045022100cb6b8abfb958d4029b0e6a89c828b65357456d20b8e6a8e42ad6d9a780fcddc4022035a8a46248b9c5' + '20b0205aa99ec5c390b40ae97a0b3ccc6e68e835ce5bde972a210306f7fdc08703152348484768fc7b85af900860a3d6' + 'fa85343524150d0370770b0000000100001976a914b9987a3866a7c26225c57a62b14e901377e2f9e288ac0000000200' + '001976a914b9987a3866a7c26225c57a62b14e901377e2f9e288ac0000000301001f0460b5a2b06f76a914b9987a3866' + 'a7c26225c57a62b14e901377e2f9e288ac0000006001001976a914b9987a3866a7c26225c57a62b14e901377e2f9e288' + 'ac0000000402001976a914b9987a3866a7c26225c57a62b14e901377e2f9e288ac000002b602001976a91479ae26cf2f' + '2dc703120a77192fc16eda9ed22e1b88ac40200000218def416095b08602003d3c40fb04737e1a2a848cfd2592490a71cd' + '0248b9e7d6a626f45dec86975b00f4dd53f84f1f0091125250b044e49023fbbd0f74f6093cdd2226fdff3e09a1000002be') + tx = Transaction.create_from_struct(bytes.fromhex(tx_hex), self.manager.tx_storage) + tx.set_validation(ValidationState.BASIC) + with self.manager.tx_storage.allow_partially_validated_context(): + self.manager.tx_storage.save_transaction(tx) + + response = yield self.web.get("transaction", {b'id': bytes(tx.hash_hex, 'utf-8')}) + data = response.json_value() + self.assertFalse(data['success']) + class SyncV1TransactionTest(unittest.SyncV1Params, BaseTransactionTest): __test__ = True diff --git a/tests/simulation/test_simulator.py b/tests/simulation/test_simulator.py index c8002b7f2..d39cf81e0 100644 --- a/tests/simulation/test_simulator.py +++ b/tests/simulation/test_simulator.py @@ -1,6 +1,7 @@ import pytest from hathor.simulator import FakeConnection +from hathor.simulator.trigger import All as AllTriggers, StopWhenSynced from tests import unittest from tests.simulation.base import SimulatorTestCase @@ -56,7 +57,7 @@ def test_two_nodes(self): gen_tx1.stop() gen_tx2.stop() - self.simulator.run(5 * 60) + self.assertTrue(self.simulator.run(600, trigger=StopWhenSynced(conn12))) self.assertTrue(conn12.is_connected) self.assertTipsEqual(manager1, manager2) @@ -64,12 +65,16 @@ def test_two_nodes(self): def test_many_miners_since_beginning(self): nodes = [] miners = [] + stop_triggers = [] for hashpower in [10e6, 5e6, 1e6, 1e6, 1e6]: manager = self.create_peer() for node in nodes: - conn = FakeConnection(manager, node, latency=0.085) + # XXX: using autoreconnect is more realistic, but ideally it shouldn't be needed, but the test is + # failing without it for some reason + conn = FakeConnection(manager, node, latency=0.085, autoreconnect=True) self.simulator.add_connection(conn) + stop_triggers.append(StopWhenSynced(conn)) nodes.append(manager) @@ -82,7 +87,7 @@ def test_many_miners_since_beginning(self): for miner in miners: miner.stop() - self.simulator.run(15) + self.assertTrue(self.simulator.run(3600, trigger=AllTriggers(stop_triggers))) for node in nodes[1:]: self.assertTipsEqual(nodes[0], node) @@ -92,6 +97,7 @@ def test_new_syncing_peer(self): nodes = [] miners = [] tx_generators = [] + stop_triggers = [] manager = self.create_peer() nodes.append(manager) @@ -122,8 +128,9 @@ def test_new_syncing_peer(self): self.log.debug('adding late node') late_manager = self.create_peer() for node in nodes: - conn = FakeConnection(late_manager, node, latency=0.300) + conn = FakeConnection(late_manager, node, latency=0.300, autoreconnect=True) self.simulator.add_connection(conn) + stop_triggers.append(StopWhenSynced(conn)) self.simulator.run(600) @@ -132,7 +139,7 @@ def test_new_syncing_peer(self): for miner in miners: miner.stop() - self.simulator.run_until_complete(600) + self.assertTrue(self.simulator.run(3600, trigger=AllTriggers(stop_triggers))) for idx, node in enumerate(nodes): self.log.debug(f'checking node {idx}') diff --git a/tests/tx/test_block.py b/tests/tx/test_block.py index 3ef6ce990..0b925a236 100644 --- a/tests/tx/test_block.py +++ b/tests/tx/test_block.py @@ -119,3 +119,12 @@ def test_get_feature_activation_bit_list(signal_bits: int, expected_bit_list: li result = block._get_feature_activation_bit_list() assert result == expected_bit_list + + +def test_get_feature_activation_bit_value() -> None: + block = Block(signal_bits=0b0000_0100) + + assert block.get_feature_activation_bit_value(0) == 0 + assert block.get_feature_activation_bit_value(1) == 0 + assert block.get_feature_activation_bit_value(2) == 1 + assert block.get_feature_activation_bit_value(3) == 0 diff --git a/tests/tx/test_indexes.py b/tests/tx/test_indexes.py index d3b1edb72..1a2482821 100644 --- a/tests/tx/test_indexes.py +++ b/tests/tx/test_indexes.py @@ -842,6 +842,26 @@ def test_addresses_index_last(self): self.assertTrue(addresses_indexes.is_address_empty(address)) self.assertEqual(addresses_indexes.get_sorted_from_address(address), []) + def test_height_index(self): + from hathor.indexes.height_index import HeightInfo + + # make height 100 + H = 100 + blocks = add_new_blocks(self.manager, H - settings.REWARD_SPEND_MIN_BLOCKS, advance_clock=15) + height_index = self.manager.tx_storage.indexes.height + self.assertEqual(height_index.get_height_tip(), HeightInfo(100, blocks[-1].hash)) + self.assertEqual(height_index.get_n_height_tips(1), [HeightInfo(100, blocks[-1].hash)]) + self.assertEqual(height_index.get_n_height_tips(2), + [HeightInfo(100, blocks[-1].hash), HeightInfo(99, blocks[-2].hash)]) + self.assertEqual(height_index.get_n_height_tips(3), + [HeightInfo(100, blocks[-1].hash), + HeightInfo(99, blocks[-2].hash), + HeightInfo(98, blocks[-3].hash)]) + self.assertEqual(len(height_index.get_n_height_tips(100)), 100) + self.assertEqual(len(height_index.get_n_height_tips(101)), 101) + self.assertEqual(len(height_index.get_n_height_tips(102)), 101) + self.assertEqual(height_index.get_n_height_tips(103), height_index.get_n_height_tips(104)) + class BaseMemoryIndexesTest(BaseIndexesTest): def setUp(self): @@ -860,7 +880,7 @@ def setUp(self): # this makes sure we can spend the genesis outputs self.manager = self.create_peer('testnet', tx_storage=self.tx_storage, unlock_wallet=True, wallet_index=True, - utxo_index=True) + use_memory_index=True, utxo_index=True) self.blocks = add_blocks_unlock_reward(self.manager) self.last_block = self.blocks[-1] diff --git a/tests/tx/test_reward_lock.py b/tests/tx/test_reward_lock.py index 374179901..e705d608f 100644 --- a/tests/tx/test_reward_lock.py +++ b/tests/tx/test_reward_lock.py @@ -3,6 +3,7 @@ from hathor.conf import HathorSettings from hathor.crypto.util import get_address_from_public_key from hathor.transaction import Transaction, TxInput, TxOutput +from hathor.transaction.exceptions import RewardLocked from hathor.transaction.scripts import P2PKH from hathor.transaction.storage import TransactionMemoryStorage from hathor.wallet import Wallet @@ -32,6 +33,16 @@ def setUp(self): blocks = add_blocks_unlock_reward(self.manager) self.last_block = blocks[-1] + def _add_reward_block(self): + reward_block = self.manager.generate_mining_block( + address=get_address_from_public_key(self.genesis_public_key) + ) + reward_block.resolve() + self.assertTrue(self.manager.propagate_tx(reward_block)) + # XXX: calculate unlock height AFTER adding the block so the height is correctly calculated + unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 + return reward_block, unlock_height + def _spend_reward_tx(self, manager, reward_block): value = reward_block.outputs[0].value address = get_address_from_public_key(self.genesis_public_key) @@ -54,13 +65,8 @@ def _spend_reward_tx(self, manager, reward_block): return tx def test_classic_reward_lock(self): - from hathor.transaction.exceptions import RewardLocked - # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # reward cannot be spent while not enough blocks are added for _ in range(settings.REWARD_SPEND_MIN_BLOCKS): @@ -76,13 +82,8 @@ def test_classic_reward_lock(self): self.assertTrue(self.manager.propagate_tx(tx, fails_silently=False)) def test_block_with_not_enough_height(self): - from hathor.transaction.exceptions import RewardLocked - # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add one less block than needed add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS - 1, advance_clock=1) @@ -100,10 +101,7 @@ def test_block_with_not_enough_height(self): def test_block_with_enough_height(self): # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add just enough blocks add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS, advance_clock=1) @@ -118,13 +116,9 @@ def test_block_with_enough_height(self): def test_mempool_tx_with_not_enough_height(self): from hathor.exception import InvalidNewTransaction - from hathor.transaction.exceptions import RewardLocked # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add one less block than needed add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS - 1, advance_clock=1) @@ -139,10 +133,7 @@ def test_mempool_tx_with_not_enough_height(self): def test_mempool_tx_with_enough_height(self): # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add just enough blocks add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS, advance_clock=1) @@ -153,13 +144,8 @@ def test_mempool_tx_with_enough_height(self): self.assertTrue(self.manager.on_new_tx(tx, fails_silently=False)) def test_mempool_tx_invalid_after_reorg(self): - from hathor.transaction.exceptions import RewardLocked - # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # add just enough blocks blocks = add_new_blocks(self.manager, settings.REWARD_SPEND_MIN_BLOCKS, advance_clock=1) @@ -191,13 +177,8 @@ def test_mempool_tx_invalid_after_reorg(self): @pytest.mark.xfail(reason='this is no longer the case, timestamp will not matter', strict=True) def test_classic_reward_lock_timestamp_expected_to_fail(self): - from hathor.transaction.exceptions import RewardLocked - # add block with a reward we can spend - reward_block = self.manager.generate_mining_block(address=get_address_from_public_key(self.genesis_public_key)) - reward_block.resolve() - unlock_height = reward_block.get_metadata().height + settings.REWARD_SPEND_MIN_BLOCKS + 1 - self.assertTrue(self.manager.propagate_tx(reward_block)) + reward_block, unlock_height = self._add_reward_block() # we add enough blocks that this output could be spent based on block height blocks = add_blocks_unlock_reward(self.manager) diff --git a/tests/unittest.py b/tests/unittest.py index b6988f7ea..c6fe0b213 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -1,4 +1,5 @@ import os +import secrets import shutil import tempfile import time @@ -99,6 +100,7 @@ class TestCase(unittest.TestCase): _enable_sync_v1: bool _enable_sync_v2: bool use_memory_storage: bool = USE_MEMORY_STORAGE + seed_config: Optional[int] = None def setUp(self): _set_test_mode(TestMode.TEST_ALL_WEIGHT) @@ -108,7 +110,9 @@ def setUp(self): self.clock.advance(time.time()) self.log = logger.new() self.reset_peer_id_pool() - self.rng = Random() + self.seed = secrets.randbits(64) if self.seed_config is None else self.seed_config + self.log.debug('set seed', seed=self.seed) + self.rng = Random(self.seed) self._pending_cleanups = [] def tearDown(self): @@ -178,15 +182,8 @@ def create_peer(self, network, peer_id=None, wallet=None, tx_storage=None, unloc capabilities=None, full_verification=True, enable_sync_v1=None, enable_sync_v2=None, checkpoints=None, utxo_index=False, event_manager=None, use_memory_index=None, start_manager=True, pubsub=None, event_storage=None, enable_event_queue=None, use_memory_storage=None): - if enable_sync_v1 is None: - assert hasattr(self, '_enable_sync_v1'), ('`_enable_sync_v1` has no default by design, either set one on ' - 'the test class or pass `enable_sync_v1` by argument') - enable_sync_v1 = self._enable_sync_v1 - if enable_sync_v2 is None: - assert hasattr(self, '_enable_sync_v2'), ('`_enable_sync_v2` has no default by design, either set one on ' - 'the test class or pass `enable_sync_v2` by argument') - enable_sync_v2 = self._enable_sync_v2 - assert enable_sync_v1 or enable_sync_v2, 'enable at least one sync version' + + enable_sync_v1, enable_sync_v2 = self._syncVersionFlags(enable_sync_v1, enable_sync_v2) builder = self.get_builder(network) \ .set_full_verification(full_verification) @@ -286,7 +283,33 @@ def assertIsTopological(self, tx_sequence: Iterator[BaseTransaction], message: O self.assertIn(dep, valid_deps, message) valid_deps.add(tx.hash) + def _syncVersionFlags(self, enable_sync_v1=None, enable_sync_v2=None): + """Internal: use this to check and get the flags and optionally provide override values.""" + if enable_sync_v1 is None: + assert hasattr(self, '_enable_sync_v1'), ('`_enable_sync_v1` has no default by design, either set one on ' + 'the test class or pass `enable_sync_v1` by argument') + enable_sync_v1 = self._enable_sync_v1 + if enable_sync_v2 is None: + assert hasattr(self, '_enable_sync_v2'), ('`_enable_sync_v2` has no default by design, either set one on ' + 'the test class or pass `enable_sync_v2` by argument') + enable_sync_v2 = self._enable_sync_v2 + assert enable_sync_v1 or enable_sync_v2, 'enable at least one sync version' + return enable_sync_v1, enable_sync_v2 + def assertTipsEqual(self, manager1, manager2): + _, enable_sync_v2 = self._syncVersionFlags() + if enable_sync_v2: + self.assertTipsEqualSyncV2(manager1, manager2) + else: + self.assertTipsEqualSyncV1(manager1, manager2) + + def assertTipsNotEqual(self, manager1, manager2): + s1 = set(manager1.tx_storage.get_all_tips()) + s2 = set(manager2.tx_storage.get_all_tips()) + self.assertNotEqual(s1, s2) + + def assertTipsEqualSyncV1(self, manager1, manager2): + # XXX: this is the original implementation of assertTipsEqual s1 = set(manager1.tx_storage.get_all_tips()) s2 = set(manager2.tx_storage.get_all_tips()) self.assertEqual(s1, s2) @@ -295,12 +318,38 @@ def assertTipsEqual(self, manager1, manager2): s2 = set(manager2.tx_storage.get_tx_tips()) self.assertEqual(s1, s2) - def assertTipsNotEqual(self, manager1, manager2): - s1 = set(manager1.tx_storage.get_all_tips()) - s2 = set(manager2.tx_storage.get_all_tips()) - self.assertNotEqual(s1, s2) + def assertTipsEqualSyncV2(self, manager1, manager2, *, strict_sync_v2_indexes=True): + # tx tips + if strict_sync_v2_indexes: + tips1 = manager1.tx_storage.indexes.mempool_tips.get() + tips2 = manager2.tx_storage.indexes.mempool_tips.get() + else: + tips1 = {tx.hash for tx in manager1.tx_storage.iter_mempool_tips_from_best_index()} + tips2 = {tx.hash for tx in manager2.tx_storage.iter_mempool_tips_from_best_index()} + self.log.debug('tx tips1', len=len(tips1), list=shorten_hash(tips1)) + self.log.debug('tx tips2', len=len(tips2), list=shorten_hash(tips2)) + self.assertEqual(tips1, tips2) + + # best block + s1 = set(manager1.tx_storage.get_best_block_tips()) + s2 = set(manager2.tx_storage.get_best_block_tips()) + self.log.debug('block tips1', len=len(s1), list=shorten_hash(s1)) + self.log.debug('block tips2', len=len(s2), list=shorten_hash(s2)) + self.assertEqual(s1, s2) + + # best block (from height index) + b1 = manager1.tx_storage.indexes.height.get_tip() + b2 = manager2.tx_storage.indexes.height.get_tip() + self.assertEqual(b1, b2) def assertConsensusEqual(self, manager1, manager2): + _, enable_sync_v2 = self._syncVersionFlags() + if enable_sync_v2: + self.assertConsensusEqualSyncV2(manager1, manager2) + else: + self.assertConsensusEqualSyncV1(manager1, manager2) + + def assertConsensusEqualSyncV1(self, manager1, manager2): self.assertEqual(manager1.tx_storage.get_vertices_count(), manager2.tx_storage.get_vertices_count()) for tx1 in manager1.tx_storage.get_all_transactions(): tx2 = manager2.tx_storage.get_transaction(tx1.hash) @@ -319,6 +368,44 @@ def assertConsensusEqual(self, manager1, manager2): # Hard verification # self.assertEqual(tx1_meta.voided_by, tx2_meta.voided_by) + def assertConsensusEqualSyncV2(self, manager1, manager2, *, strict_sync_v2_indexes=True): + # The current sync algorithm does not propagate voided blocks/txs + # so the count might be different even though the consensus is equal + # One peer might have voided txs that the other does not have + + # to start off, both nodes must have the same tips + self.assertTipsEqualSyncV2(manager1, manager2, strict_sync_v2_indexes=strict_sync_v2_indexes) + + # the following is specific to sync-v2 + + # helper function: + def get_all_executed_or_voided(tx_storage): + """Get all txs separated into three sets: executed, voided, partial""" + tx_executed = set() + tx_voided = set() + tx_partial = set() + for tx in tx_storage.get_all_transactions(): + assert tx.hash is not None + tx_meta = tx.get_metadata() + if not tx_meta.validation.is_fully_connected(): + tx_partial.add(tx.hash) + elif not tx_meta.voided_by: + tx_executed.add(tx.hash) + else: + tx_voided.add(tx.hash) + return tx_executed, tx_voided, tx_partial + + # extract all the transactions from each node, split into three sets + tx_executed1, tx_voided1, tx_partial1 = get_all_executed_or_voided(manager1.tx_storage) + tx_executed2, tx_voided2, tx_partial2 = get_all_executed_or_voided(manager2.tx_storage) + + # both must have the exact same executed set + self.assertEqual(tx_executed1, tx_executed2) + + # XXX: the rest actually doesn't matter + self.log.debug('node1 rest', len_voided=len(tx_voided1), len_partial=len(tx_partial1)) + self.log.debug('node2 rest', len_voided=len(tx_voided2), len_partial=len(tx_partial2)) + def assertConsensusValid(self, manager): for tx in manager.tx_storage.get_all_transactions(): if tx.is_block: @@ -370,6 +457,20 @@ def assertTransactionConsensusValid(self, tx): self.assertTrue(meta.voided_by) self.assertTrue(parent_meta.voided_by.issubset(meta.voided_by)) + def assertSyncedProgress(self, node_sync): + """Check "synced" status of p2p-manager, uses self._enable_sync_vX to choose which check to run.""" + enable_sync_v1, enable_sync_v2 = self._syncVersionFlags() + if enable_sync_v2: + self.assertV2SyncedProgress(node_sync) + elif enable_sync_v1: + self.assertV1SyncedProgress(node_sync) + + def assertV1SyncedProgress(self, node_sync): + self.assertEqual(node_sync.synced_timestamp, node_sync.peer_timestamp) + + def assertV2SyncedProgress(self, node_sync): + self.assertEqual(node_sync.synced_height, node_sync.peer_height) + def clean_tmpdirs(self): for tmpdir in self.tmpdirs: shutil.rmtree(tmpdir)