From c757a5f83a73dc63551ccba079d3318af104065f Mon Sep 17 00:00:00 2001 From: Marcelo Salhab Brogliato Date: Thu, 11 May 2023 22:57:38 -0500 Subject: [PATCH 01/24] refactor(builder): Split resources creation from CliBuilder to ResourcesBuilder --- hathor/builder/__init__.py | 2 + hathor/builder/cli_builder.py | 240 +----------------------- hathor/builder/resources_builder.py | 271 ++++++++++++++++++++++++++++ hathor/cli/run_node.py | 7 +- tests/others/test_cli_builder.py | 26 +-- 5 files changed, 293 insertions(+), 253 deletions(-) create mode 100644 hathor/builder/resources_builder.py diff --git a/hathor/builder/__init__.py b/hathor/builder/__init__.py index 93271b01a..40145b2bc 100644 --- a/hathor/builder/__init__.py +++ b/hathor/builder/__init__.py @@ -14,9 +14,11 @@ from hathor.builder.builder import BuildArtifacts, Builder from hathor.builder.cli_builder import CliBuilder +from hathor.builder.resources_builder import ResourcesBuilder __all__ = [ 'BuildArtifacts', 'Builder', 'CliBuilder', + 'ResourcesBuilder', ] diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index 740079311..7f22dfa42 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -18,23 +18,18 @@ import platform import sys from argparse import Namespace -from typing import Any, Dict, Optional +from typing import Optional -from autobahn.twisted.resource import WebSocketResource from structlog import get_logger from twisted.internet.posixbase import PosixReactorBase -from twisted.web import server -from twisted.web.resource import Resource from hathor.consensus import ConsensusAlgorithm from hathor.event import EventManager -from hathor.event.resources.event import EventResource from hathor.exception import BuilderError from hathor.indexes import IndexesManager from hathor.manager import HathorManager from hathor.p2p.peer_id import PeerId from hathor.p2p.utils import discover_hostname -from hathor.prometheus import PrometheusMetricsExporter from hathor.pubsub import PubSubManager from hathor.wallet import BaseWallet, HDWallet, Wallet @@ -49,9 +44,6 @@ class CliBuilder: def __init__(self) -> None: self.log = logger.new() - self._build_prometheus = False - self._build_status = False - def check_or_raise(self, condition: bool, message: str) -> None: """Will exit printing `message` if `condition` is False.""" if not condition: @@ -311,233 +303,3 @@ def create_wallet(self, args: Namespace) -> BaseWallet: return wallet else: raise BuilderError('Invalid type of wallet') - - def create_prometheus(self, args: Namespace) -> PrometheusMetricsExporter: - kwargs: Dict[str, Any] = { - 'metrics': self.manager.metrics, - 'metrics_prefix': args.prometheus_prefix - } - - if args.data: - kwargs['path'] = os.path.join(args.data, 'prometheus') - else: - raise BuilderError('To run prometheus exporter you must have a data path') - - prometheus = PrometheusMetricsExporter(**kwargs) - prometheus.start() - - self._build_prometheus = True - return prometheus - - def create_resources(self, args: Namespace) -> server.Site: - from hathor.conf import HathorSettings - from hathor.debug_resources import ( - DebugCrashResource, - DebugLogResource, - DebugMessAroundResource, - DebugPrintResource, - DebugRaiseResource, - DebugRejectResource, - ) - from hathor.mining.ws import MiningWebsocketFactory - from hathor.p2p.resources import ( - AddPeersResource, - HealthcheckReadinessResource, - MiningInfoResource, - MiningResource, - NetfilterRuleResource, - StatusResource, - ) - from hathor.profiler import get_cpu_profiler - from hathor.profiler.resources import CPUProfilerResource, ProfilerResource - from hathor.transaction.resources import ( - BlockAtHeightResource, - CreateTxResource, - DashboardTransactionResource, - DecodeTxResource, - GetBlockTemplateResource, - GraphvizFullResource, - GraphvizNeighboursResource, - MempoolResource, - PushTxResource, - SubmitBlockResource, - TransactionAccWeightResource, - TransactionResource, - TxParentsResource, - UtxoSearchResource, - ValidateAddressResource, - ) - from hathor.version_resource import VersionResource - from hathor.wallet.resources import ( - AddressResource, - BalanceResource, - HistoryResource, - LockWalletResource, - SendTokensResource, - SignTxResource, - StateWalletResource, - UnlockWalletResource, - ) - from hathor.wallet.resources.nano_contracts import ( - NanoContractDecodeResource, - NanoContractExecuteResource, - NanoContractMatchValueResource, - ) - from hathor.wallet.resources.thin_wallet import ( - AddressBalanceResource, - AddressHistoryResource, - AddressSearchResource, - SendTokensResource as SendTokensThinResource, - TokenHistoryResource, - TokenResource, - ) - from hathor.websocket import HathorAdminWebsocketFactory, WebsocketStatsResource - - settings = HathorSettings() - cpu = get_cpu_profiler() - - # TODO get this from a file. How should we do with the factory? - root = Resource() - wallet_resource = Resource() - root.putChild(b'wallet', wallet_resource) - thin_wallet_resource = Resource() - root.putChild(b'thin_wallet', thin_wallet_resource) - contracts_resource = Resource() - wallet_resource.putChild(b'nano-contract', contracts_resource) - p2p_resource = Resource() - root.putChild(b'p2p', p2p_resource) - graphviz = Resource() - # XXX: reach the resource through /graphviz/ too, previously it was a leaf so this wasn't a problem - graphviz.putChild(b'', graphviz) - for fmt in ['dot', 'pdf', 'png', 'jpg']: - bfmt = fmt.encode('ascii') - graphviz.putChild(b'full.' + bfmt, GraphvizFullResource(self.manager, format=fmt)) - graphviz.putChild(b'neighbours.' + bfmt, GraphvizNeighboursResource(self.manager, format=fmt)) - - resources = [ - (b'status', StatusResource(self.manager), root), - (b'version', VersionResource(self.manager), root), - (b'create_tx', CreateTxResource(self.manager), root), - (b'decode_tx', DecodeTxResource(self.manager), root), - (b'validate_address', ValidateAddressResource(self.manager), root), - (b'push_tx', - PushTxResource(self.manager, args.max_output_script_size, args.allow_non_standard_script), - root), - (b'graphviz', graphviz, root), - (b'transaction', TransactionResource(self.manager), root), - (b'block_at_height', BlockAtHeightResource(self.manager), root), - (b'transaction_acc_weight', TransactionAccWeightResource(self.manager), root), - (b'dashboard_tx', DashboardTransactionResource(self.manager), root), - (b'profiler', ProfilerResource(self.manager), root), - (b'top', CPUProfilerResource(self.manager, cpu), root), - (b'mempool', MempoolResource(self.manager), root), - # mining - (b'mining', MiningResource(self.manager), root), - (b'getmininginfo', MiningInfoResource(self.manager), root), - (b'get_block_template', GetBlockTemplateResource(self.manager), root), - (b'submit_block', SubmitBlockResource(self.manager), root), - (b'tx_parents', TxParentsResource(self.manager), root), - # /thin_wallet - (b'address_history', AddressHistoryResource(self.manager), thin_wallet_resource), - (b'address_balance', AddressBalanceResource(self.manager), thin_wallet_resource), - (b'address_search', AddressSearchResource(self.manager), thin_wallet_resource), - (b'send_tokens', SendTokensThinResource(self.manager), thin_wallet_resource), - (b'token', TokenResource(self.manager), thin_wallet_resource), - (b'token_history', TokenHistoryResource(self.manager), thin_wallet_resource), - # /wallet/nano-contract - (b'match-value', NanoContractMatchValueResource(self.manager), contracts_resource), - (b'decode', NanoContractDecodeResource(self.manager), contracts_resource), - (b'execute', NanoContractExecuteResource(self.manager), contracts_resource), - # /p2p - (b'peers', AddPeersResource(self.manager), p2p_resource), - (b'netfilter', NetfilterRuleResource(self.manager), p2p_resource), - (b'readiness', HealthcheckReadinessResource(self.manager), p2p_resource), - ] - # XXX: only enable UTXO search API if the index is enabled - if args.utxo_index: - resources.extend([ - (b'utxo_search', UtxoSearchResource(self.manager), root), - ]) - - if args.enable_debug_api: - debug_resource = Resource() - root.putChild(b'_debug', debug_resource) - resources.extend([ - (b'log', DebugLogResource(), debug_resource), - (b'raise', DebugRaiseResource(), debug_resource), - (b'reject', DebugRejectResource(), debug_resource), - (b'print', DebugPrintResource(), debug_resource), - ]) - if args.enable_crash_api: - crash_resource = Resource() - root.putChild(b'_crash', crash_resource) - resources.extend([ - (b'exit', DebugCrashResource(), crash_resource), - (b'mess_around', DebugMessAroundResource(self.manager), crash_resource), - ]) - - for url_path, resource, parent in resources: - parent.putChild(url_path, resource) - - if self.manager.stratum_factory is not None: - from hathor.stratum.resources import MiningStatsResource - root.putChild(b'miners', MiningStatsResource(self.manager)) - - with_wallet_api = bool(self.wallet and args.wallet_enable_api) - if with_wallet_api: - wallet_resources = ( - # /wallet - (b'balance', BalanceResource(self.manager), wallet_resource), - (b'history', HistoryResource(self.manager), wallet_resource), - (b'address', AddressResource(self.manager), wallet_resource), - (b'send_tokens', SendTokensResource(self.manager), wallet_resource), - (b'sign_tx', SignTxResource(self.manager), wallet_resource), - (b'unlock', UnlockWalletResource(self.manager), wallet_resource), - (b'lock', LockWalletResource(self.manager), wallet_resource), - (b'state', StateWalletResource(self.manager), wallet_resource), - ) - for url_path, resource, parent in wallet_resources: - parent.putChild(url_path, resource) - - # Websocket resource - assert self.manager.tx_storage.indexes is not None - ws_factory = HathorAdminWebsocketFactory(metrics=self.manager.metrics, - address_index=self.manager.tx_storage.indexes.addresses) - ws_factory.start() - root.putChild(b'ws', WebSocketResource(ws_factory)) - - # Mining websocket resource - mining_ws_factory = MiningWebsocketFactory(self.manager) - root.putChild(b'mining_ws', WebSocketResource(mining_ws_factory)) - - ws_factory.subscribe(self.manager.pubsub) - - # Event websocket resource - if args.x_enable_event_queue and self.event_ws_factory is not None: - root.putChild(b'event_ws', WebSocketResource(self.event_ws_factory)) - root.putChild(b'event', EventResource(self.manager._event_manager)) - - # Websocket stats resource - root.putChild(b'websocket_stats', WebsocketStatsResource(ws_factory)) - - real_root = Resource() - real_root.putChild(settings.API_VERSION_PREFIX.encode('ascii'), root) - - from hathor.profiler.site import SiteProfiler - status_server = SiteProfiler(real_root) - self.log.info('with status', listen=args.status, with_wallet_api=with_wallet_api) - - # Set websocket factory in metrics - self.manager.metrics.websocket_factory = ws_factory - - self._build_status = True - return status_server - - def register_resources(self, args: Namespace, *, dry_run: bool = False) -> None: - if args.prometheus: - self.create_prometheus(args) - - if args.status: - status_server = self.create_resources(args) - if not dry_run: - self.reactor.listenTCP(args.status, status_server) diff --git a/hathor/builder/resources_builder.py b/hathor/builder/resources_builder.py new file mode 100644 index 000000000..dd9022558 --- /dev/null +++ b/hathor/builder/resources_builder.py @@ -0,0 +1,271 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from argparse import Namespace +from typing import TYPE_CHECKING, Any, Dict, Optional + +from autobahn.twisted.resource import WebSocketResource +from structlog import get_logger +from twisted.web import server +from twisted.web.resource import Resource + +from hathor.event.resources.event import EventResource +from hathor.exception import BuilderError +from hathor.prometheus import PrometheusMetricsExporter + +if TYPE_CHECKING: + from hathor.event.websocket.factory import EventWebsocketFactory + from hathor.manager import HathorManager + +logger = get_logger() + + +class ResourcesBuilder: + def __init__(self, manager: 'HathorManager', event_ws_factory: Optional['EventWebsocketFactory']) -> None: + self.log = logger.new() + self.manager = manager + self.event_ws_factory = event_ws_factory + self.wallet = manager.wallet + + self._built_status = False + self._built_prometheus = False + + def build(self, args: Namespace) -> Optional[server.Site]: + if args.prometheus: + self.create_prometheus(args) + if args.status: + return self.create_resources(args) + return None + + def create_prometheus(self, args: Namespace) -> PrometheusMetricsExporter: + kwargs: Dict[str, Any] = { + 'metrics': self.manager.metrics, + 'metrics_prefix': args.prometheus_prefix + } + + if args.data: + kwargs['path'] = os.path.join(args.data, 'prometheus') + else: + raise BuilderError('To run prometheus exporter you must have a data path') + + prometheus = PrometheusMetricsExporter(**kwargs) + prometheus.start() + + self._built_prometheus = True + return prometheus + + def create_resources(self, args: Namespace) -> server.Site: + from hathor.conf import HathorSettings + from hathor.debug_resources import ( + DebugCrashResource, + DebugLogResource, + DebugMessAroundResource, + DebugPrintResource, + DebugRaiseResource, + DebugRejectResource, + ) + from hathor.mining.ws import MiningWebsocketFactory + from hathor.p2p.resources import ( + AddPeersResource, + HealthcheckReadinessResource, + MiningInfoResource, + MiningResource, + NetfilterRuleResource, + StatusResource, + ) + from hathor.profiler import get_cpu_profiler + from hathor.profiler.resources import CPUProfilerResource, ProfilerResource + from hathor.transaction.resources import ( + BlockAtHeightResource, + CreateTxResource, + DashboardTransactionResource, + DecodeTxResource, + GetBlockTemplateResource, + GraphvizFullResource, + GraphvizNeighboursResource, + MempoolResource, + PushTxResource, + SubmitBlockResource, + TransactionAccWeightResource, + TransactionResource, + TxParentsResource, + UtxoSearchResource, + ValidateAddressResource, + ) + from hathor.version_resource import VersionResource + from hathor.wallet.resources import ( + AddressResource, + BalanceResource, + HistoryResource, + LockWalletResource, + SendTokensResource, + SignTxResource, + StateWalletResource, + UnlockWalletResource, + ) + from hathor.wallet.resources.nano_contracts import ( + NanoContractDecodeResource, + NanoContractExecuteResource, + NanoContractMatchValueResource, + ) + from hathor.wallet.resources.thin_wallet import ( + AddressBalanceResource, + AddressHistoryResource, + AddressSearchResource, + SendTokensResource as SendTokensThinResource, + TokenHistoryResource, + TokenResource, + ) + from hathor.websocket import HathorAdminWebsocketFactory, WebsocketStatsResource + + settings = HathorSettings() + cpu = get_cpu_profiler() + + # TODO get this from a file. How should we do with the factory? + root = Resource() + wallet_resource = Resource() + root.putChild(b'wallet', wallet_resource) + thin_wallet_resource = Resource() + root.putChild(b'thin_wallet', thin_wallet_resource) + contracts_resource = Resource() + wallet_resource.putChild(b'nano-contract', contracts_resource) + p2p_resource = Resource() + root.putChild(b'p2p', p2p_resource) + graphviz = Resource() + # XXX: reach the resource through /graphviz/ too, previously it was a leaf so this wasn't a problem + graphviz.putChild(b'', graphviz) + for fmt in ['dot', 'pdf', 'png', 'jpg']: + bfmt = fmt.encode('ascii') + graphviz.putChild(b'full.' + bfmt, GraphvizFullResource(self.manager, format=fmt)) + graphviz.putChild(b'neighbours.' + bfmt, GraphvizNeighboursResource(self.manager, format=fmt)) + + resources = [ + (b'status', StatusResource(self.manager), root), + (b'version', VersionResource(self.manager), root), + (b'create_tx', CreateTxResource(self.manager), root), + (b'decode_tx', DecodeTxResource(self.manager), root), + (b'validate_address', ValidateAddressResource(self.manager), root), + (b'push_tx', + PushTxResource(self.manager, args.max_output_script_size, args.allow_non_standard_script), + root), + (b'graphviz', graphviz, root), + (b'transaction', TransactionResource(self.manager), root), + (b'block_at_height', BlockAtHeightResource(self.manager), root), + (b'transaction_acc_weight', TransactionAccWeightResource(self.manager), root), + (b'dashboard_tx', DashboardTransactionResource(self.manager), root), + (b'profiler', ProfilerResource(self.manager), root), + (b'top', CPUProfilerResource(self.manager, cpu), root), + (b'mempool', MempoolResource(self.manager), root), + # mining + (b'mining', MiningResource(self.manager), root), + (b'getmininginfo', MiningInfoResource(self.manager), root), + (b'get_block_template', GetBlockTemplateResource(self.manager), root), + (b'submit_block', SubmitBlockResource(self.manager), root), + (b'tx_parents', TxParentsResource(self.manager), root), + # /thin_wallet + (b'address_history', AddressHistoryResource(self.manager), thin_wallet_resource), + (b'address_balance', AddressBalanceResource(self.manager), thin_wallet_resource), + (b'address_search', AddressSearchResource(self.manager), thin_wallet_resource), + (b'send_tokens', SendTokensThinResource(self.manager), thin_wallet_resource), + (b'token', TokenResource(self.manager), thin_wallet_resource), + (b'token_history', TokenHistoryResource(self.manager), thin_wallet_resource), + # /wallet/nano-contract + (b'match-value', NanoContractMatchValueResource(self.manager), contracts_resource), + (b'decode', NanoContractDecodeResource(self.manager), contracts_resource), + (b'execute', NanoContractExecuteResource(self.manager), contracts_resource), + # /p2p + (b'peers', AddPeersResource(self.manager), p2p_resource), + (b'netfilter', NetfilterRuleResource(self.manager), p2p_resource), + (b'readiness', HealthcheckReadinessResource(self.manager), p2p_resource), + ] + # XXX: only enable UTXO search API if the index is enabled + if args.utxo_index: + resources.extend([ + (b'utxo_search', UtxoSearchResource(self.manager), root), + ]) + + if args.enable_debug_api: + debug_resource = Resource() + root.putChild(b'_debug', debug_resource) + resources.extend([ + (b'log', DebugLogResource(), debug_resource), + (b'raise', DebugRaiseResource(), debug_resource), + (b'reject', DebugRejectResource(), debug_resource), + (b'print', DebugPrintResource(), debug_resource), + ]) + if args.enable_crash_api: + crash_resource = Resource() + root.putChild(b'_crash', crash_resource) + resources.extend([ + (b'exit', DebugCrashResource(), crash_resource), + (b'mess_around', DebugMessAroundResource(self.manager), crash_resource), + ]) + + for url_path, resource, parent in resources: + parent.putChild(url_path, resource) + + if self.manager.stratum_factory is not None: + from hathor.stratum.resources import MiningStatsResource + root.putChild(b'miners', MiningStatsResource(self.manager)) + + with_wallet_api = bool(self.wallet and args.wallet_enable_api) + if with_wallet_api: + wallet_resources = ( + # /wallet + (b'balance', BalanceResource(self.manager), wallet_resource), + (b'history', HistoryResource(self.manager), wallet_resource), + (b'address', AddressResource(self.manager), wallet_resource), + (b'send_tokens', SendTokensResource(self.manager), wallet_resource), + (b'sign_tx', SignTxResource(self.manager), wallet_resource), + (b'unlock', UnlockWalletResource(self.manager), wallet_resource), + (b'lock', LockWalletResource(self.manager), wallet_resource), + (b'state', StateWalletResource(self.manager), wallet_resource), + ) + for url_path, resource, parent in wallet_resources: + parent.putChild(url_path, resource) + + # Websocket resource + assert self.manager.tx_storage.indexes is not None + ws_factory = HathorAdminWebsocketFactory(metrics=self.manager.metrics, + address_index=self.manager.tx_storage.indexes.addresses) + ws_factory.start() + root.putChild(b'ws', WebSocketResource(ws_factory)) + + # Mining websocket resource + mining_ws_factory = MiningWebsocketFactory(self.manager) + root.putChild(b'mining_ws', WebSocketResource(mining_ws_factory)) + + ws_factory.subscribe(self.manager.pubsub) + + # Event websocket resource + if args.x_enable_event_queue and self.event_ws_factory is not None: + root.putChild(b'event_ws', WebSocketResource(self.event_ws_factory)) + root.putChild(b'event', EventResource(self.manager._event_manager)) + + # Websocket stats resource + root.putChild(b'websocket_stats', WebsocketStatsResource(ws_factory)) + + real_root = Resource() + real_root.putChild(settings.API_VERSION_PREFIX.encode('ascii'), root) + + from hathor.profiler.site import SiteProfiler + status_server = SiteProfiler(real_root) + self.log.info('with status', listen=args.status, with_wallet_api=with_wallet_api) + + # Set websocket factory in metrics + self.manager.metrics.websocket_factory = ws_factory + + self._built_status = True + return status_server diff --git a/hathor/cli/run_node.py b/hathor/cli/run_node.py index d37fc3db6..8bd88e012 100644 --- a/hathor/cli/run_node.py +++ b/hathor/cli/run_node.py @@ -128,7 +128,7 @@ def prepare(self, args: Namespace, *, register_resources: bool = True) -> None: from hathor.util import reactor self.reactor = reactor - from hathor.builder import CliBuilder + from hathor.builder import CliBuilder, ResourcesBuilder from hathor.exception import BuilderError builder = CliBuilder() try: @@ -140,7 +140,10 @@ def prepare(self, args: Namespace, *, register_resources: bool = True) -> None: self.wallet = self.manager.wallet self.start_manager(args) if register_resources: - builder.register_resources(args) + resources_builder = ResourcesBuilder(self.manager, builder.event_ws_factory) + status_server = resources_builder.build(args) + if args.status: + self.reactor.listenTCP(args.status, status_server) from hathor.conf import HathorSettings settings = HathorSettings() diff --git a/tests/others/test_cli_builder.py b/tests/others/test_cli_builder.py index 2513a0487..eb248003e 100644 --- a/tests/others/test_cli_builder.py +++ b/tests/others/test_cli_builder.py @@ -2,7 +2,7 @@ import pytest -from hathor.builder import CliBuilder +from hathor.builder import CliBuilder, ResourcesBuilder from hathor.event import EventManager from hathor.event.storage import EventMemoryStorage, EventRocksDBStorage from hathor.event.websocket import EventWebsocketFactory @@ -26,18 +26,20 @@ def setUp(self): self.parser = RunNode.create_parser() self.builder = CliBuilder() - def _build_with_error(self, args: List[str], err_msg: str) -> None: - args = self.parser.parse_args(args) + def _build_with_error(self, cmd_args: List[str], err_msg: str) -> None: + args = self.parser.parse_args(cmd_args) with self.assertRaises(BuilderError) as cm: - self.builder.create_manager(self.reactor, args) - self.builder.register_resources(args, dry_run=True) + manager = self.builder.create_manager(self.reactor, args) + self.resources_builder = ResourcesBuilder(manager, self.builder.event_ws_factory) + self.resources_builder.build(args) self.assertEqual(err_msg, str(cm.exception)) - def _build(self, args: List[str]) -> HathorManager: - args = self.parser.parse_args(args) + def _build(self, cmd_args: List[str]) -> HathorManager: + args = self.parser.parse_args(cmd_args) manager = self.builder.create_manager(self.reactor, args) self.assertIsNotNone(manager) - self.builder.register_resources(args, dry_run=True) + self.resources_builder = ResourcesBuilder(manager, self.builder.event_ws_factory) + self.resources_builder.build(args) return manager def test_empty(self): @@ -54,8 +56,8 @@ def test_all_default(self): self.assertNotIn(SyncVersion.V1, manager.connections._sync_factories) self.assertIn(SyncVersion.V1_1, manager.connections._sync_factories) self.assertNotIn(SyncVersion.V2, manager.connections._sync_factories) - self.assertFalse(self.builder._build_prometheus) - self.assertFalse(self.builder._build_status) + self.assertFalse(self.resources_builder._built_prometheus) + self.assertFalse(self.resources_builder._built_status) self.assertIsNone(manager._event_manager) @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') @@ -133,7 +135,7 @@ def test_status(self): '--enable-debug-api', '--enable-crash-api' ]) - self.assertTrue(self.builder._build_status) + self.assertTrue(self.resources_builder._built_status) self.clean_pending(required_to_quiesce=False) def test_prometheus_no_data(self): @@ -144,7 +146,7 @@ def test_prometheus_no_data(self): def test_prometheus(self): data_dir = self.mkdtemp() self._build(['--prometheus', '--data', data_dir]) - self.assertTrue(self.builder._build_prometheus) + self.assertTrue(self.resources_builder._built_prometheus) self.clean_pending(required_to_quiesce=False) @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') From 54a1969b0291a6c91aa46cd5bf803680ef5f393b Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Tue, 16 May 2023 13:23:22 -0300 Subject: [PATCH 02/24] feat(events): realiable integration during the load phase (#555) --- hathor/builder/builder.py | 13 ++-- hathor/builder/cli_builder.py | 24 +++---- hathor/builder/resources_builder.py | 2 +- hathor/cli/run_node.py | 2 - hathor/event/event_manager.py | 86 +++++++++++++++++++------ hathor/event/model/node_state.py | 20 ++++++ hathor/event/storage/event_storage.py | 26 ++++++++ hathor/event/storage/memory_storage.py | 20 ++++++ hathor/event/storage/rocksdb_storage.py | 54 ++++++++++++++-- hathor/manager.py | 28 +++++--- hathor/pubsub.py | 3 - hathor/transaction/util.py | 13 ++++ tests/event/test_event_manager.py | 29 ++++++--- tests/event/test_event_reorg.py | 4 ++ tests/event/test_event_storage.py | 85 ++++++++++++++++++++++-- tests/others/test_cli_builder.py | 12 +--- 16 files changed, 336 insertions(+), 85 deletions(-) create mode 100644 hathor/event/model/node_state.py diff --git a/hathor/builder/builder.py b/hathor/builder/builder.py index c3815ed27..a5954880e 100644 --- a/hathor/builder/builder.py +++ b/hathor/builder/builder.py @@ -85,6 +85,7 @@ def __init__(self) -> None: self._event_manager: Optional[EventManager] = None self._event_ws_factory: Optional[EventWebsocketFactory] = None + self._enable_event_queue: Optional[bool] = None self._rocksdb_path: Optional[str] = None self._rocksdb_storage: Optional[RocksDBStorage] = None @@ -128,7 +129,6 @@ def build(self) -> BuildArtifacts: consensus_algorithm = ConsensusAlgorithm(soft_voided_tx_ids, pubsub) wallet = self._get_or_create_wallet() - event_storage = self._get_or_create_event_storage() event_manager = self._get_or_create_event_manager() tx_storage = self._get_or_create_tx_storage() indexes = tx_storage.indexes @@ -161,20 +161,22 @@ def build(self) -> BuildArtifacts: if self._full_verification is not None: kwargs['full_verification'] = self._full_verification + if self._enable_event_queue is not None: + kwargs['enable_event_queue'] = self._enable_event_queue + manager = HathorManager( reactor, pubsub=pubsub, consensus_algorithm=consensus_algorithm, peer_id=peer_id, tx_storage=tx_storage, - event_storage=event_storage, + event_manager=event_manager, network=self._network, wallet=wallet, rng=self._rng, checkpoints=self._checkpoints, capabilities=self._capabilities, environment_info=get_environment_info(self._cmdline, peer_id.id), - event_manager=event_manager, **kwargs ) @@ -303,8 +305,8 @@ def _get_or_create_event_storage(self) -> EventStorage: return self._event_storage - def _get_or_create_event_manager(self) -> Optional[EventManager]: - if self._event_manager is None and self._event_ws_factory is not None: + def _get_or_create_event_manager(self) -> EventManager: + if self._event_manager is None: self._event_manager = EventManager( reactor=self._get_reactor(), pubsub=self._get_or_create_pubsub(), @@ -389,6 +391,7 @@ def enable_wallet_index(self) -> 'Builder': def enable_event_manager(self, *, event_ws_factory: EventWebsocketFactory) -> 'Builder': self.check_if_can_modify() + self._enable_event_queue = True self._event_ws_factory = event_ws_factory return self diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index 7f22dfa42..b6ed4006a 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -136,19 +136,15 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa pubsub = PubSubManager(reactor) - event_manager: Optional[EventManager] = None if args.x_enable_event_queue: self.event_ws_factory = EventWebsocketFactory(reactor, event_storage) - event_manager = EventManager( - event_storage=event_storage, - event_ws_factory=self.event_ws_factory, - pubsub=pubsub, - reactor=reactor, - emit_load_events=args.x_emit_load_events - ) - else: - self.check_or_raise(not args.x_emit_load_events, '--x-emit-load-events cannot be used without ' - '--x-enable-event-queue') + + event_manager = EventManager( + event_storage=event_storage, + event_ws_factory=self.event_ws_factory, + pubsub=pubsub, + reactor=reactor + ) if args.wallet_index and tx_storage.indexes is not None: self.log.debug('enable wallet indexes') @@ -174,7 +170,6 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa network=network, hostname=hostname, tx_storage=tx_storage, - event_storage=event_storage, event_manager=event_manager, wallet=self.wallet, stratum_port=args.stratum, @@ -185,7 +180,8 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa enable_sync_v2=enable_sync_v2, consensus_algorithm=consensus_algorithm, environment_info=get_environment_info(args=str(args), peer_id=peer_id.id), - full_verification=full_verification + full_verification=full_verification, + enable_event_queue=bool(args.x_enable_event_queue) ) if args.data: @@ -232,7 +228,7 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa self.manager.enable_event_queue = True self.log.info('--x-enable-event-queue flag provided. ' - 'The events detected by the full node will be stored and retrieved to clients') + 'The events detected by the full node will be stored and can be retrieved by clients') for description in args.listen: self.manager.add_listen_address(description) diff --git a/hathor/builder/resources_builder.py b/hathor/builder/resources_builder.py index dd9022558..8925c0d5b 100644 --- a/hathor/builder/resources_builder.py +++ b/hathor/builder/resources_builder.py @@ -250,7 +250,7 @@ def create_resources(self, args: Namespace) -> server.Site: ws_factory.subscribe(self.manager.pubsub) # Event websocket resource - if args.x_enable_event_queue and self.event_ws_factory is not None: + if args.x_enable_event_queue: root.putChild(b'event_ws', WebSocketResource(self.event_ws_factory)) root.putChild(b'event', EventResource(self.manager._event_manager)) diff --git a/hathor/cli/run_node.py b/hathor/cli/run_node.py index 8bd88e012..809592eb4 100644 --- a/hathor/cli/run_node.py +++ b/hathor/cli/run_node.py @@ -100,8 +100,6 @@ def create_parser(cls) -> ArgumentParser: parser.add_argument('--x-localhost-only', action='store_true', help='Only connect to peers on localhost') parser.add_argument('--x-rocksdb-indexes', action='store_true', help=SUPPRESS) parser.add_argument('--x-enable-event-queue', action='store_true', help='Enable event queue mechanism') - parser.add_argument('--x-emit-load-events', action='store_true', help='Enable emission of events during the ' - 'LOAD phase') parser.add_argument('--peer-id-blacklist', action='extend', default=[], nargs='+', type=str, help='Peer IDs to forbid connection') return parser diff --git a/hathor/event/event_manager.py b/hathor/event/event_manager.py index 4ab06cbe4..ff3197b29 100644 --- a/hathor/event/event_manager.py +++ b/hathor/event/event_manager.py @@ -12,15 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable, Optional +from typing import Callable, Iterator, Optional from structlog import get_logger from hathor.event.model.base_event import BaseEvent from hathor.event.model.event_type import EventType +from hathor.event.model.node_state import NodeState from hathor.event.storage import EventStorage from hathor.event.websocket import EventWebsocketFactory from hathor.pubsub import EventArguments, HathorEvents, PubSubManager +from hathor.transaction import BaseTransaction from hathor.util import Reactor logger = get_logger() @@ -51,7 +53,9 @@ class EventManager: _peer_id: str _is_running: bool = False - _load_finished: bool = False + _previous_node_state: Optional[NodeState] = None + _last_event: Optional[BaseEvent] = None + _last_existing_group_id: Optional[int] = None @property def event_storage(self) -> EventStorage: @@ -60,34 +64,40 @@ def event_storage(self) -> EventStorage: def __init__( self, event_storage: EventStorage, - event_ws_factory: EventWebsocketFactory, pubsub: PubSubManager, reactor: Reactor, - emit_load_events: bool = False + event_ws_factory: Optional[EventWebsocketFactory] = None, ): self.log = logger.new() - self._clock = reactor + self._reactor = reactor self._event_storage = event_storage self._event_ws_factory = event_ws_factory self._pubsub = pubsub - self.emit_load_events = emit_load_events - self._last_event = self._event_storage.get_last_event() - self._last_existing_group_id = self._event_storage.get_last_group_id() + def start(self, peer_id: str) -> None: + assert self._is_running is False, 'Cannot start, EventManager is already running' + assert self._event_ws_factory is not None, 'Cannot start, EventWebsocketFactory is not set' + assert self.get_event_queue_state() is True, 'Cannot start, event queue feature is disabled' + + self._previous_node_state = self._event_storage.get_node_state() + + if self._should_reload_events(): + self._event_storage.clear_events() + else: + self._last_event = self._event_storage.get_last_event() + self._last_existing_group_id = self._event_storage.get_last_group_id() self._assert_closed_event_group() self._subscribe_events() - def start(self, peer_id: str) -> None: - assert self._is_running is False, 'Cannot start, EventManager is already running' - self._peer_id = peer_id self._event_ws_factory.start() self._is_running = True def stop(self): assert self._is_running is True, 'Cannot stop, EventManager is not running' + assert self._event_ws_factory is not None self._event_ws_factory.stop() self._is_running = False @@ -110,25 +120,28 @@ def _subscribe_events(self): """ Subscribe to defined events for the pubsub received """ for event in _SUBSCRIBE_EVENTS: - self._pubsub.subscribe(event, self._handle_event) + self._pubsub.subscribe(event, self._handle_hathor_event) + + def _handle_hathor_event(self, hathor_event: HathorEvents, event_args: EventArguments) -> None: + event_type = EventType.from_hathor_event(hathor_event) + + self._handle_event(event_type, event_args) - def _handle_event(self, hathor_event: HathorEvents, event_args: EventArguments) -> None: + def _handle_event(self, event_type: EventType, event_args: EventArguments) -> None: assert self._is_running, 'Cannot handle event, EventManager is not started.' - event_type = EventType.from_hathor_event(hathor_event) event_specific_handlers = { + EventType.LOAD_STARTED: self._handle_load_started, EventType.LOAD_FINISHED: self._handle_load_finished } if event_specific_handler := event_specific_handlers.get(event_type): event_specific_handler() - if not self._load_finished and not self.emit_load_events: - return - self._handle_event_creation(event_type, event_args) def _handle_event_creation(self, event_type: EventType, event_args: EventArguments) -> None: + assert self._event_ws_factory is not None create_event_fn: Callable[[EventType, EventArguments], BaseEvent] if event_type in _GROUP_START_EVENTS: @@ -181,9 +194,6 @@ def _create_non_group_edge_event(self, event_type: EventType, event_args: EventA group_id=group_id, ) - def _handle_load_finished(self): - self._load_finished = True - def _create_event( self, event_type: EventType, @@ -193,8 +203,42 @@ def _create_event( return BaseEvent.from_event_arguments( event_id=0 if self._last_event is None else self._last_event.id + 1, peer_id=self._peer_id, - timestamp=self._clock.seconds(), + timestamp=self._reactor.seconds(), event_type=event_type, event_args=event_args, group_id=group_id, ) + + def _handle_load_started(self): + self._event_storage.save_node_state(NodeState.LOAD) + + def _handle_load_finished(self): + self._event_storage.save_node_state(NodeState.SYNC) + + def _should_reload_events(self) -> bool: + return self._previous_node_state in [None, NodeState.LOAD] + + def get_event_queue_state(self) -> bool: + """Get whether the event queue feature is enabled from the storage""" + return self._event_storage.get_event_queue_state() + + def save_event_queue_state(self, state: bool) -> None: + self._event_storage.save_event_queue_state(state) + + def handle_load_phase_vertices(self, topological_iterator: Iterator[BaseTransaction]) -> None: + """ + Either generates load phase events or not, depending on previous node state. + Does so asynchronously so events generated here are not processed before normal event handling. + """ + assert self._is_running, 'Cannot handle load phase events, EventManager is not started.' + + if not self._should_reload_events(): + return + + for vertex in topological_iterator: + self._reactor.callLater( + delay=0, + callable=self._handle_event, + event_type=EventType.NEW_VERTEX_ACCEPTED, + event_args=EventArguments(tx=vertex) + ) diff --git a/hathor/event/model/node_state.py b/hathor/event/model/node_state.py new file mode 100644 index 000000000..161e0061d --- /dev/null +++ b/hathor/event/model/node_state.py @@ -0,0 +1,20 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class NodeState(Enum): + LOAD = 0 + SYNC = 1 diff --git a/hathor/event/storage/event_storage.py b/hathor/event/storage/event_storage.py index c91b14d79..90a68d761 100644 --- a/hathor/event/storage/event_storage.py +++ b/hathor/event/storage/event_storage.py @@ -16,6 +16,7 @@ from typing import Iterator, Optional from hathor.event.model.base_event import BaseEvent +from hathor.event.model.node_state import NodeState class EventStorage(ABC): @@ -43,3 +44,28 @@ def get_last_group_id(self) -> Optional[int]: def iter_from_event(self, key: int) -> Iterator[BaseEvent]: """ Iterate through events starting from the event with the given key""" raise NotImplementedError + + @abstractmethod + def clear_events(self) -> None: + """Clear all stored events and related metadata.""" + raise NotImplementedError + + @abstractmethod + def save_node_state(self, state: NodeState) -> None: + """Save a node state in the storage""" + raise NotImplementedError + + @abstractmethod + def get_node_state(self) -> Optional[NodeState]: + """Get the node state from the storage""" + raise NotImplementedError + + @abstractmethod + def save_event_queue_state(self, enabled: bool) -> None: + """Save whether the event queue feature is enabled in the storage""" + raise NotImplementedError + + @abstractmethod + def get_event_queue_state(self) -> bool: + """Get whether the event queue feature is enabled from the storage""" + raise NotImplementedError diff --git a/hathor/event/storage/memory_storage.py b/hathor/event/storage/memory_storage.py index 3d28a2035..569d51d0f 100644 --- a/hathor/event/storage/memory_storage.py +++ b/hathor/event/storage/memory_storage.py @@ -15,6 +15,7 @@ from typing import Iterator, List, Optional from hathor.event.model.base_event import BaseEvent +from hathor.event.model.node_state import NodeState from hathor.event.storage.event_storage import EventStorage @@ -23,6 +24,8 @@ def __init__(self) -> None: self._events: List[BaseEvent] = [] self._last_event: Optional[BaseEvent] = None self._last_group_id: Optional[int] = None + self._node_state: Optional[NodeState] = None + self._event_queue_enabled: bool = False def save_event(self, event: BaseEvent) -> None: if event.id != len(self._events): @@ -54,3 +57,20 @@ def iter_from_event(self, key: int) -> Iterator[BaseEvent]: while key < len(self._events): yield self._events[key] key += 1 + + def clear_events(self) -> None: + self._events = [] + self._last_event = None + self._last_group_id = None + + def save_node_state(self, state: NodeState) -> None: + self._node_state = state + + def get_node_state(self) -> Optional[NodeState]: + return self._node_state + + def save_event_queue_state(self, enabled: bool) -> None: + self._event_queue_enabled = enabled + + def get_event_queue_state(self) -> bool: + return self._event_queue_enabled diff --git a/hathor/event/storage/rocksdb_storage.py b/hathor/event/storage/rocksdb_storage.py index 5f5f02424..eae5f5305 100644 --- a/hathor/event/storage/rocksdb_storage.py +++ b/hathor/event/storage/rocksdb_storage.py @@ -15,21 +15,27 @@ from typing import Iterator, Optional from hathor.event.model.base_event import BaseEvent +from hathor.event.model.node_state import NodeState from hathor.event.storage.event_storage import EventStorage from hathor.storage.rocksdb_storage import RocksDBStorage -from hathor.transaction.util import int_to_bytes +from hathor.transaction.util import bytes_to_int, int_to_bytes from hathor.util import json_dumpb _CF_NAME_EVENT = b'event' _CF_NAME_META = b'event-metadata' _KEY_LAST_GROUP_ID = b'last-group-id' +_KEY_NODE_STATE = b'node-state' +_KEY_EVENT_QUEUE_ENABLED = b'event-queue-enabled' class EventRocksDBStorage(EventStorage): def __init__(self, rocksdb_storage: RocksDBStorage): - self._db = rocksdb_storage.get_db() - self._cf_event = rocksdb_storage.get_or_create_column_family(_CF_NAME_EVENT) - self._cf_meta = rocksdb_storage.get_or_create_column_family(_CF_NAME_META) + self._rocksdb_storage = rocksdb_storage + + self._db = self._rocksdb_storage.get_db() + self._cf_event = self._rocksdb_storage.get_or_create_column_family(_CF_NAME_EVENT) + self._cf_meta = self._rocksdb_storage.get_or_create_column_family(_CF_NAME_META) + self._last_event: Optional[BaseEvent] = self._db_get_last_event() self._last_group_id: Optional[int] = self._db_get_last_group_id() @@ -57,11 +63,11 @@ def _db_get_last_group_id(self) -> Optional[int]: last_group_id = self._db.get((self._cf_meta, _KEY_LAST_GROUP_ID)) if last_group_id is None: return None - return int.from_bytes(last_group_id, byteorder='big', signed=False) + return bytes_to_int(last_group_id) def save_event(self, event: BaseEvent) -> None: if (self._last_event is None and event.id != 0) or \ - (self._last_event is not None and event.id > self._last_event.id + 1): + (self._last_event is not None and event.id != self._last_event.id + 1): raise ValueError('invalid event.id, ids must be sequential and leave no gaps') event_data = json_dumpb(event.dict()) key = int_to_bytes(event.id, 8) @@ -84,3 +90,39 @@ def get_last_event(self) -> Optional[BaseEvent]: def get_last_group_id(self) -> Optional[int]: return self._last_group_id + + def clear_events(self) -> None: + self._last_event = None + self._last_group_id = None + + self._db.delete((self._cf_meta, _KEY_LAST_GROUP_ID)) + self._db.drop_column_family(self._cf_event) + + self._cf_event = self._rocksdb_storage.get_or_create_column_family(_CF_NAME_EVENT) + + def save_node_state(self, state: NodeState) -> None: + self._db.put((self._cf_meta, _KEY_NODE_STATE), int_to_bytes(state.value, 8)) + + def get_node_state(self) -> Optional[NodeState]: + node_state_bytes = self._db.get((self._cf_meta, _KEY_NODE_STATE)) + + if node_state_bytes is None: + return None + + node_state_int = bytes_to_int(node_state_bytes) + + return NodeState(node_state_int) + + def save_event_queue_state(self, enabled: bool) -> None: + self._db.put( + (self._cf_meta, _KEY_EVENT_QUEUE_ENABLED), + enabled.to_bytes(length=1, byteorder='big') + ) + + def get_event_queue_state(self) -> bool: + enabled_bytes = self._db.get((self._cf_meta, _KEY_EVENT_QUEUE_ENABLED)) + + if enabled_bytes is None: + return False + + return bool.from_bytes(enabled_bytes, byteorder='big') diff --git a/hathor/manager.py b/hathor/manager.py index 642c0d775..b608f4386 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -30,7 +30,6 @@ from hathor.conf import HathorSettings from hathor.consensus import ConsensusAlgorithm from hathor.event.event_manager import EventManager -from hathor.event.storage import EventStorage from hathor.exception import ( DoubleSpendingError, HathorError, @@ -88,11 +87,10 @@ def __init__(self, consensus_algorithm: ConsensusAlgorithm, peer_id: PeerId, tx_storage: TransactionStorage, - event_storage: EventStorage, + event_manager: EventManager, network: str, hostname: Optional[str] = None, wallet: Optional[BaseWallet] = None, - event_manager: Optional[EventManager] = None, stratum_port: Optional[int] = None, ssl: bool = True, enable_sync_v1: bool = False, @@ -102,7 +100,8 @@ def __init__(self, checkpoints: Optional[List[Checkpoint]] = None, rng: Optional[Random] = None, environment_info: Optional[EnvironmentInfo] = None, - full_verification: bool = False): + full_verification: bool = False, + enable_event_queue: bool = False): """ :param reactor: Twisted reactor which handles the mainloop and the events. :param peer_id: Id of this node. @@ -125,6 +124,11 @@ def __init__(self, if not (enable_sync_v1 or enable_sync_v1_1 or enable_sync_v2): raise TypeError(f'{type(self).__name__}() at least one sync version is required') + if event_manager.get_event_queue_state() is True and not enable_event_queue: + raise ValueError( + 'cannot start manager without event queue feature, as it was enabled in the previous startup' + ) + self._enable_sync_v1 = enable_sync_v1 self._enable_sync_v2 = enable_sync_v2 @@ -172,9 +176,8 @@ def __init__(self, self.tx_storage.pubsub = self.pubsub self._event_manager = event_manager - - if self._event_manager: - assert self._event_manager.event_storage == event_storage + self._event_manager.save_event_queue_state(enable_event_queue) + self._enable_event_queue = enable_event_queue if enable_sync_v2: assert self.tx_storage.indexes is not None @@ -285,7 +288,7 @@ def start(self) -> None: ) sys.exit(-1) - if self._event_manager: + if self._enable_event_queue: self._event_manager.start(not_none(self.my_peer.id)) self.state = self.NodeState.INITIALIZING @@ -355,7 +358,7 @@ def stop(self) -> Deferred: if wait_stratum: waits.append(wait_stratum) - if self._event_manager: + if self._enable_event_queue: self._event_manager.stop() self.tx_storage.flush() @@ -396,7 +399,7 @@ def _initialize_components(self) -> None: This method runs through all transactions, verifying them and updating our wallet. """ - assert not self._event_manager, 'this method cannot be used if the events feature is enabled.' + assert not self._enable_event_queue, 'this method cannot be used if the events feature is enabled.' self.log.info('initialize') if self.wallet: @@ -649,6 +652,11 @@ def _initialize_components_new(self) -> None: # XXX: last step before actually starting is updating the last started at timestamps self.tx_storage.update_last_started_at(started_at) + + if self._enable_event_queue: + topological_iterator = self.tx_storage.topological_iterator() + self._event_manager.handle_load_phase_vertices(topological_iterator) + self.state = self.NodeState.READY self.pubsub.publish(HathorEvents.LOAD_FINISHED) diff --git a/hathor/pubsub.py b/hathor/pubsub.py index 255088f80..0a0e0153e 100644 --- a/hathor/pubsub.py +++ b/hathor/pubsub.py @@ -90,9 +90,6 @@ class HathorEvents(Enum): REORG_FINISHED Triggered when consensus algorithm ends all changes involved in a reorg - - VERTEX_METADATA_CHANGED - Triggered when consensus algorithm changes a metadata of an existing vertex (transaction or block) """ MANAGER_ON_START = 'manager:on_start' MANAGER_ON_STOP = 'manager:on_stop' diff --git a/hathor/transaction/util.py b/hathor/transaction/util.py index 0f88ed86c..fb9b1b9c7 100644 --- a/hathor/transaction/util.py +++ b/hathor/transaction/util.py @@ -28,6 +28,19 @@ def int_to_bytes(number: int, size: int, signed: bool = False) -> bytes: return number.to_bytes(size, byteorder='big', signed=signed) +def bytes_to_int(data: bytes, *, signed: bool = False) -> int: + """ + Converts data in bytes to an int. Assumes big-endian format. + + Args: + data: bytes to be converted + signed: whether two's complement is used to represent the integer. + + Returns: the converted data as int + """ + return int.from_bytes(data, byteorder='big', signed=signed) + + def unpack(fmt: str, buf: bytes) -> Any: size = struct.calcsize(fmt) return struct.unpack(fmt, buf[:size]), buf[size:] diff --git a/tests/event/test_event_manager.py b/tests/event/test_event_manager.py index 1b4f7250f..0f602a9fe 100644 --- a/tests/event/test_event_manager.py +++ b/tests/event/test_event_manager.py @@ -42,19 +42,32 @@ def test_event_group(self): self._fake_reorg_started() self._fake_reorg_finished() self.run_to_completion() + event0 = self.event_storage.get_event(0) event1 = self.event_storage.get_event(1) event2 = self.event_storage.get_event(2) event3 = self.event_storage.get_event(3) event4 = self.event_storage.get_event(4) - self.assertEqual(EventType(event0.type), EventType.LOAD_FINISHED) - self.assertEqual(EventType(event1.type), EventType.REORG_STARTED) - self.assertIsNotNone(event1.group_id) - self.assertEqual(EventType(event2.type), EventType.REORG_FINISHED) - self.assertIsNotNone(event2.group_id) - self.assertEqual(event1.group_id, event2.group_id) - self.assertNotEqual(event2.group_id, event3.group_id) - self.assertEqual(event3.group_id, event4.group_id) + event5 = self.event_storage.get_event(5) + event6 = self.event_storage.get_event(6) + event7 = self.event_storage.get_event(7) + event8 = self.event_storage.get_event(8) + + self.assertEqual(EventType(event0.type), EventType.LOAD_STARTED) + self.assertEqual(EventType(event1.type), EventType.NEW_VERTEX_ACCEPTED) + self.assertEqual(EventType(event2.type), EventType.NEW_VERTEX_ACCEPTED) + self.assertEqual(EventType(event3.type), EventType.NEW_VERTEX_ACCEPTED) + self.assertEqual(EventType(event4.type), EventType.LOAD_FINISHED) + self.assertEqual(EventType(event5.type), EventType.REORG_STARTED) + + self.assertIsNotNone(event5.group_id) + self.assertEqual(EventType(event6.type), EventType.REORG_FINISHED) + self.assertIsNotNone(event6.group_id) + self.assertEqual(event5.group_id, event6.group_id) + + self.assertNotEqual(event6.group_id, event7.group_id) + self.assertIsNotNone(event7.group_id) + self.assertEqual(event7.group_id, event8.group_id) def test_cannot_start_group_twice(self): self._fake_reorg_started() diff --git a/tests/event/test_event_reorg.py b/tests/event/test_event_reorg.py index fa69a4fac..878e21b39 100644 --- a/tests/event/test_event_reorg.py +++ b/tests/event/test_event_reorg.py @@ -58,6 +58,10 @@ class unsorted(list): pass expected_events_grouped = [ [ + (EventType.LOAD_STARTED, {}), + (EventType.NEW_VERTEX_ACCEPTED, {'hash': settings.GENESIS_BLOCK_HASH.hex()}), + (EventType.NEW_VERTEX_ACCEPTED, {'hash': settings.GENESIS_TX1_HASH.hex()}), + (EventType.NEW_VERTEX_ACCEPTED, {'hash': settings.GENESIS_TX2_HASH.hex()}), (EventType.LOAD_FINISHED, {}) ], # XXX: the order of the following events can vary depending on which genesis is spent/confirmed first diff --git a/tests/event/test_event_storage.py b/tests/event/test_event_storage.py index f2441f3a5..4889bcb15 100644 --- a/tests/event/test_event_storage.py +++ b/tests/event/test_event_storage.py @@ -2,6 +2,8 @@ import pytest +from hathor.event.model.node_state import NodeState +from hathor.event.storage import EventStorage from hathor.event.storage.memory_storage import EventMemoryStorage from hathor.event.storage.rocksdb_storage import EventRocksDBStorage from hathor.storage.rocksdb_storage import RocksDBStorage @@ -12,6 +14,8 @@ class EventStorageBaseTest(unittest.TestCase): __test__ = False + event_storage: EventStorage + def setUp(self): super().setUp() self.event_mocker = EventMocker(self.rng) @@ -92,15 +96,88 @@ def test_iter_from_event_negative_key(self): def test_save_events_and_retrieve_last_group_id(self): expected_group_id = 4 - for i in range(10): - group_id = i if i <= expected_group_id else None - event = self.event_mocker.generate_mocked_event(i, group_id) - self.event_storage.save_event(event) + + self._populate_events_and_last_group_id(n_events=10, last_group_id=expected_group_id) actual_group_id = self.event_storage.get_last_group_id() assert expected_group_id == actual_group_id + def _populate_events_and_last_group_id(self, n_events: int, last_group_id: int) -> None: + for i in range(n_events): + group_id = i if i <= last_group_id else None + event = self.event_mocker.generate_mocked_event(i, group_id) + self.event_storage.save_event(event) + + def test_get_empty_node_state(self): + node_state = self.event_storage.get_node_state() + + assert node_state is None + + def test_save_node_state_and_retrieve(self): + self.event_storage.save_node_state(NodeState.SYNC) + node_state = self.event_storage.get_node_state() + + assert node_state == NodeState.SYNC + + def test_get_empty_event_queue_state(self): + enabled = self.event_storage.get_event_queue_state() + + assert enabled is False + + def test_save_event_queue_enabled_and_retrieve(self): + self.event_storage.save_event_queue_state(True) + enabled = self.event_storage.get_event_queue_state() + + assert enabled is True + + def test_save_event_queue_disabled_and_retrieve(self): + self.event_storage.save_event_queue_state(False) + enabled = self.event_storage.get_event_queue_state() + + assert enabled is False + + def test_clear_events_empty_database(self): + self._test_clear_events() + + def _test_clear_events(self) -> None: + self.event_storage.clear_events() + + events = list(self.event_storage.iter_from_event(0)) + last_event = self.event_storage.get_last_event() + last_group_id = self.event_storage.get_last_group_id() + + assert events == [] + assert last_event is None + assert last_group_id is None + + def test_clear_events_full_database(self): + n_events = 10 + expected_last_group_id = 4 + expected_node_state = NodeState.SYNC + + self._populate_events_and_last_group_id(n_events=n_events, last_group_id=4) + self.event_storage.save_node_state(expected_node_state) + self.event_storage.save_event_queue_state(True) + + events = list(self.event_storage.iter_from_event(0)) + last_group_id = self.event_storage.get_last_group_id() + node_state = self.event_storage.get_node_state() + event_queue_state = self.event_storage.get_event_queue_state() + + assert len(events) == n_events + assert last_group_id == expected_last_group_id + assert node_state == expected_node_state + assert event_queue_state is True + + self._test_clear_events() + + node_state = self.event_storage.get_node_state() + event_queue_state = self.event_storage.get_event_queue_state() + + assert node_state == expected_node_state + assert event_queue_state is True + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') class EventStorageRocksDBTest(EventStorageBaseTest): diff --git a/tests/others/test_cli_builder.py b/tests/others/test_cli_builder.py index eb248003e..4aa4b0e00 100644 --- a/tests/others/test_cli_builder.py +++ b/tests/others/test_cli_builder.py @@ -58,7 +58,7 @@ def test_all_default(self): self.assertNotIn(SyncVersion.V2, manager.connections._sync_factories) self.assertFalse(self.resources_builder._built_prometheus) self.assertFalse(self.resources_builder._built_status) - self.assertIsNone(manager._event_manager) + self.assertFalse(manager._enable_event_queue) @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') def test_cache_storage(self): @@ -163,7 +163,6 @@ def test_event_queue_with_rocksdb_storage(self): self.assertIsInstance(manager._event_manager, EventManager) self.assertIsInstance(manager._event_manager._event_storage, EventRocksDBStorage) self.assertIsInstance(manager._event_manager._event_ws_factory, EventWebsocketFactory) - self.assertFalse(manager._event_manager.emit_load_events) def test_event_queue_with_memory_storage(self): manager = self._build(['--x-enable-event-queue', '--memory-storage']) @@ -171,16 +170,7 @@ def test_event_queue_with_memory_storage(self): self.assertIsInstance(manager._event_manager, EventManager) self.assertIsInstance(manager._event_manager._event_storage, EventMemoryStorage) self.assertIsInstance(manager._event_manager._event_ws_factory, EventWebsocketFactory) - self.assertFalse(manager._event_manager.emit_load_events) def test_event_queue_with_full_verification(self): args = ['--x-enable-event-queue', '--memory-storage', '--x-full-verification'] self._build_with_error(args, '--x-full-verification cannot be used with --x-enable-event-queue') - - def test_event_queue_with_emit_load_events(self): - manager = self._build(['--x-enable-event-queue', '--memory-storage', '--x-emit-load-events']) - - self.assertIsInstance(manager._event_manager, EventManager) - self.assertIsInstance(manager._event_manager._event_storage, EventMemoryStorage) - self.assertIsInstance(manager._event_manager._event_ws_factory, EventWebsocketFactory) - self.assertTrue(manager._event_manager.emit_load_events) From 0b709767287fccb95455e998434a8144cdd5b6ba Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Wed, 17 May 2023 11:19:33 -0300 Subject: [PATCH 03/24] chore: add yamllint to all files (#598) --- .codecov.yml | 2 +- .github/workflows/docker.yml | 227 +++++++++--------- .github/workflows/main.yml | 155 ++++++------ .yamllint.yml | 10 + Makefile | 8 +- .../unreleased/fix-output-negative-value.yml | 2 +- .../fix-ws-notification-broadcast.yml | 2 +- poetry.lock | 85 ++++++- pyproject.toml | 1 + 9 files changed, 295 insertions(+), 197 deletions(-) create mode 100644 .yamllint.yml diff --git a/.codecov.yml b/.codecov.yml index 54f8a414e..6c772a419 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -7,7 +7,7 @@ coverage: # https://docs.codecov.io/docs/commit-status status: # TODO: re-enable patch in the future - patch: off + patch: false project: default: # minimum coverage ratio that the commit must meet to be considered a success diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index b3a2df32d..358f0f101 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,14 +1,15 @@ +# yamllint disable rule:line-length name: docker -on: +on: # yamllint disable-line rule:truthy push: branches: - - master - - dev - - 'experimental/**' + - master + - dev + - 'experimental/**' tags: - - v* + - v* schedule: - - cron: '0 4 * * *' # nightlies at 4am UTC + - cron: '0 4 * * *' # nightlies at 4am UTC env: TEST_TAG: hathor-core:test jobs: @@ -20,114 +21,114 @@ jobs: fail-fast: false matrix: python-impl: - - python - - pypy + - python + - pypy python-version: - - '3.8' - - '3.9' - - '3.10' - - '3.11' + - '3.8' + - '3.9' + - '3.10' + - '3.11' exclude: - # XXX: neither pypy-3.10 nor pypy-3.11 exist yet, maybe pypy-3.10 will be out on PyPy v7.3.10 - - python-impl: pypy - python-version: '3.10' - - python-impl: pypy - python-version: '3.11' + # XXX: neither pypy-3.10 nor pypy-3.11 exist yet, maybe pypy-3.10 will be out on PyPy v7.3.10 + - python-impl: pypy + python-version: '3.10' + - python-impl: pypy + python-version: '3.11' steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Prepare base version - id: prep - run: | - export GITHUB_REF='${{ github.ref }}' - export GITHUB_EVENT_NAME='${{ github.event_name }}' - export GITHUB_SHA='${{ github.sha }}' - export GITHUB_EVENT_DEFAULT_BRANCH='${{ github.event.repository.default_branch }}' - export GITHUB_EVENT_NUMBER='${{ github.event.number }}' - export MATRIX_PYTHON_IMPL='${{ matrix.python-impl }}' - export MATRIX_PYTHON_VERSION='${{ matrix.python-version }}' - export SECRETS_DOCKERHUB_IMAGE='${{ secrets.DOCKERHUB_IMAGE }}' - export SECRETS_GHCR_IMAGE='${{ secrets.GHCR_IMAGE }}' + - name: Checkout + uses: actions/checkout@v3 + - name: Prepare base version + id: prep + run: | + export GITHUB_REF='${{ github.ref }}' + export GITHUB_EVENT_NAME='${{ github.event_name }}' + export GITHUB_SHA='${{ github.sha }}' + export GITHUB_EVENT_DEFAULT_BRANCH='${{ github.event.repository.default_branch }}' + export GITHUB_EVENT_NUMBER='${{ github.event.number }}' + export MATRIX_PYTHON_IMPL='${{ matrix.python-impl }}' + export MATRIX_PYTHON_VERSION='${{ matrix.python-version }}' + export SECRETS_DOCKERHUB_IMAGE='${{ secrets.DOCKERHUB_IMAGE }}' + export SECRETS_GHCR_IMAGE='${{ secrets.GHCR_IMAGE }}' - python extras/github/docker.py - - name: Check version - if: steps.prep.outputs.check-version - run: | - make check-version VERSION='${{ steps.prep.outputs.check-version }}' - - name: Set up QEMU # arm64 is not available natively - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - with: - version: latest - install: true - driver-opts: network=host - - name: Login to DockerHub - uses: docker/login-action@v2 - if: steps.prep.outputs.login-dockerhub == 'true' - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Login to GitHub Container Registry - uses: docker/login-action@v2 - if: steps.prep.outputs.login-ghcr == 'true' - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Cache Docker layers - uses: actions/cache@v3 - if: steps.prep_base_version.outputs.is-nightly == 'false' - with: - path: /tmp/.buildx-cache - # this key is setup such that every branch has its cache and new branches can reuse master's cache, but not the other way around - key: ${{ runner.os }}-buildx-${{ matrix.python-impl }}${{ matrix.python-version }}-${{ github.head_ref || github.ref }}-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-buildx-${{ matrix.python-impl }}${{ matrix.python-version }}-refs/heads/master- - - name: Build and export to Docker - uses: docker/build-push-action@v3 - with: - context: . - file: ${{ steps.prep.outputs.dockerfile }} - build-args: PYTHON=${{ matrix.python-version }} - pull: true - load: true - tags: ${{ env.TEST_TAG }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - name: Test image - run: docker run --rm ${{ env.TEST_TAG }} quick_test --data / --testnet - - name: Build and push - uses: docker/build-push-action@v3 - continue-on-error: ${{ matrix.python-impl == 'pypy' }} # PyPy is not first-class and has been causing some build failures - if: ${{ !env.ACT }} # Skip this step when testing locally with https://github.com/nektos/act - with: - context: . - file: ${{ steps.prep.outputs.dockerfile }} - build-args: PYTHON=${{ matrix.python-version }} - platforms: linux/amd64,linux/arm64 - pull: true - push: ${{ github.event_name != 'pull_request' && steps.prep.outputs.push }} - tags: ${{ steps.prep.outputs.tags }} - # see: https://github.com/opencontainers/image-spec/blob/master/annotations.md - labels: | - org.opencontainers.image.title=${{ github.event.repository.name }} - org.opencontainers.image.description=${{ github.event.repository.description }} - org.opencontainers.image.url=${{ github.event.repository.html_url }} - org.opencontainers.image.source=${{ github.event.repository.clone_url }} - org.opencontainers.image.version=${{ steps.prep.outputs.version }} - org.opencontainers.image.created=${{ steps.prep.outputs.created }} - org.opencontainers.image.revision=${{ github.sha }} - org.opencontainers.image.licenses=${{ github.event.repository.license.spdx_id }} - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache - - name: Slack Notification - if: ${{ steps.prep.outputs.slack-notification-version && steps.prep_base_version.outputs.disable-slack-notification == 'false' && job.status == 'success' }} - uses: rtCamp/action-slack-notify@28e8b353eabda5998a2e1203aed33c5999944779 - env: - SLACK_COLOR: ${{ job.status }} # It can turn the job status into a color. Success will be green. - SLACK_MESSAGE: 'We will be deploying this new image soon. Get in touch with the hathor-core team if you want to talk about this deployment.' - SLACK_TITLE: 'Hathor Core - new ${{ steps.prep.outputs.slack-notification-version }} Docker image pushed :rocket:' - SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} - SLACK_FOOTER: '' - MSG_MINIMAL: actions url + python extras/github/docker.py + - name: Check version + if: steps.prep.outputs.check-version + run: | + make check-version VERSION='${{ steps.prep.outputs.check-version }}' + - name: Set up QEMU # arm64 is not available natively + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: latest + install: true + driver-opts: network=host + - name: Login to DockerHub + uses: docker/login-action@v2 + if: steps.prep.outputs.login-dockerhub == 'true' + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + if: steps.prep.outputs.login-ghcr == 'true' + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Cache Docker layers + uses: actions/cache@v3 + if: steps.prep_base_version.outputs.is-nightly == 'false' + with: + path: /tmp/.buildx-cache + # this key is setup such that every branch has its cache and new branches can reuse master's cache, but not the other way around + key: ${{ runner.os }}-buildx-${{ matrix.python-impl }}${{ matrix.python-version }}-${{ github.head_ref || github.ref }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx-${{ matrix.python-impl }}${{ matrix.python-version }}-refs/heads/master- + - name: Build and export to Docker + uses: docker/build-push-action@v3 + with: + context: . + file: ${{ steps.prep.outputs.dockerfile }} + build-args: PYTHON=${{ matrix.python-version }} + pull: true + load: true + tags: ${{ env.TEST_TAG }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + - name: Test image + run: docker run --rm ${{ env.TEST_TAG }} quick_test --data / --testnet + - name: Build and push + uses: docker/build-push-action@v3 + continue-on-error: ${{ matrix.python-impl == 'pypy' }} # PyPy is not first-class and has been causing some build failures + if: ${{ !env.ACT }} # Skip this step when testing locally with https://github.com/nektos/act + with: + context: . + file: ${{ steps.prep.outputs.dockerfile }} + build-args: PYTHON=${{ matrix.python-version }} + platforms: linux/amd64,linux/arm64 + pull: true + push: ${{ github.event_name != 'pull_request' && steps.prep.outputs.push }} + tags: ${{ steps.prep.outputs.tags }} + # see: https://github.com/opencontainers/image-spec/blob/master/annotations.md + labels: | + org.opencontainers.image.title=${{ github.event.repository.name }} + org.opencontainers.image.description=${{ github.event.repository.description }} + org.opencontainers.image.url=${{ github.event.repository.html_url }} + org.opencontainers.image.source=${{ github.event.repository.clone_url }} + org.opencontainers.image.version=${{ steps.prep.outputs.version }} + org.opencontainers.image.created=${{ steps.prep.outputs.created }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=${{ github.event.repository.license.spdx_id }} + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + - name: Slack Notification + if: ${{ steps.prep.outputs.slack-notification-version && steps.prep_base_version.outputs.disable-slack-notification == 'false' && job.status == 'success' }} + uses: rtCamp/action-slack-notify@28e8b353eabda5998a2e1203aed33c5999944779 + env: + SLACK_COLOR: ${{ job.status }} # It can turn the job status into a color. Success will be green. + SLACK_MESSAGE: 'We will be deploying this new image soon. Get in touch with the hathor-core team if you want to talk about this deployment.' + SLACK_TITLE: 'Hathor Core - new ${{ steps.prep.outputs.slack-notification-version }} Docker image pushed :rocket:' + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_FOOTER: '' + MSG_MINIMAL: actions url diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 99c9b2bb6..304734f4b 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,11 +1,12 @@ +# yamllint disable rule:line-length name: tests -on: +on: # yamllint disable-line rule:truthy push: branches: - - master - - dev + - master + - dev tags: - - v* + - v* pull_request: jobs: matrix: @@ -13,45 +14,45 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - id: set-matrix - shell: python - run: | - import os - import json - full_matrix = { - 'python': ['3.8', '3.9', '3.10', '3.11'], - # available OS's: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idruns-on - 'os': ['ubuntu-22.04', 'macos-12', 'windows-2022'], - 'include': [ - # XXX: tests fail on these, not sure why, when running them individually each on passes, but not on `make tests` - # {'os': 'ubuntu-22.04', 'python': 'pypy-3.8'}, - # {'os': 'ubuntu-22.04', 'python': 'pypy-3.9'}, - ], - } - # this is the fastest one: - reduced_matrix = { - 'python': ['3.9'], - 'os': ['ubuntu-22.04'], - } - github_repository = os.environ['GITHUB_REPOSITORY'] - if github_repository.lower() == 'hathornetwork/hathor-core': - matrix = full_matrix - else: - matrix = reduced_matrix - print('::set-output name=matrix::' + json.dumps(matrix)) + - id: set-matrix + shell: python + run: | + import os + import json + full_matrix = { + 'python': ['3.8', '3.9', '3.10', '3.11'], + # available OS's: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idruns-on + 'os': ['ubuntu-22.04', 'macos-12', 'windows-2022'], + 'include': [ + # XXX: tests fail on these, not sure why, when running them individually each on passes, but not on `make tests` + # {'os': 'ubuntu-22.04', 'python': 'pypy-3.8'}, + # {'os': 'ubuntu-22.04', 'python': 'pypy-3.9'}, + ], + } + # this is the fastest one: + reduced_matrix = { + 'python': ['3.9'], + 'os': ['ubuntu-22.04'], + } + github_repository = os.environ['GITHUB_REPOSITORY'] + if github_repository.lower() == 'hathornetwork/hathor-core': + matrix = full_matrix + else: + matrix = reduced_matrix + print('::set-output name=matrix::' + json.dumps(matrix)) check-matrix: runs-on: ubuntu-latest needs: matrix steps: - - name: Install json2yaml - run: | - sudo npm install -g json2yaml - - name: Check matrix definition - run: | - matrix='${{ needs.matrix.outputs.matrix }}' - echo $matrix - echo $matrix | jq . - echo $matrix | json2yaml + - name: Install json2yaml + run: | + sudo npm install -g json2yaml + - name: Check matrix definition + run: | + matrix='${{ needs.matrix.outputs.matrix }}' + echo $matrix + echo $matrix | jq . + echo $matrix | json2yaml test: name: python-${{ matrix.python }} (${{ matrix.os }}) runs-on: ${{ matrix.os }} @@ -61,41 +62,41 @@ jobs: fail-fast: false matrix: ${{fromJson(needs.matrix.outputs.matrix)}} steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Install Poetry - run: pipx install poetry - - name: Set up Python ${{ matrix.python }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python }} - cache: 'poetry' - - name: Install Ubuntu dependencies - if: startsWith(matrix.os, 'ubuntu') - run: | - sudo apt-get -qy update - sudo apt-get -qy install graphviz librocksdb-dev libsnappy-dev liblz4-dev - - name: Install macOS dependencies - if: startsWith(matrix.os, 'macos') - run: | - brew cleanup -q - # brew update -q - brew install -q graphviz rocksdb pkg-config - - name: Install Poetry dependencies - run: poetry install -n --no-root - - name: Cache mypy - uses: actions/cache@v3 - with: - path: .mypy_cache - # this key is setup such that every branch has its cache and new branches can reuse dev's cache, but not the other way around - key: ${{ runner.os }}-py${{ matrix.python }}-mypy-${{ github.head_ref || github.ref }} - restore-keys: | - ${{ runner.os }}-py${{ matrix.python }}-mypy-refs/heads/dev- - ${{ runner.os }}-py${{ matrix.python }}-mypy- - - name: Run linters - run: poetry run make check - - name: Run tests - run: poetry run make tests - - name: Upload coverage - uses: codecov/codecov-action@v3 - if: matrix.python == 3.9 && startsWith(matrix.os, 'ubuntu') + - name: Checkout + uses: actions/checkout@v3 + - name: Install Poetry + run: pipx install poetry + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python }} + cache: 'poetry' + - name: Install Ubuntu dependencies + if: startsWith(matrix.os, 'ubuntu') + run: | + sudo apt-get -qy update + sudo apt-get -qy install graphviz librocksdb-dev libsnappy-dev liblz4-dev + - name: Install macOS dependencies + if: startsWith(matrix.os, 'macos') + run: | + brew cleanup -q + # brew update -q + brew install -q graphviz rocksdb pkg-config + - name: Install Poetry dependencies + run: poetry install -n --no-root + - name: Cache mypy + uses: actions/cache@v3 + with: + path: .mypy_cache + # this key is setup such that every branch has its cache and new branches can reuse dev's cache, but not the other way around + key: ${{ runner.os }}-py${{ matrix.python }}-mypy-${{ github.head_ref || github.ref }} + restore-keys: | + ${{ runner.os }}-py${{ matrix.python }}-mypy-refs/heads/dev- + ${{ runner.os }}-py${{ matrix.python }}-mypy- + - name: Run linters + run: poetry run make check + - name: Run tests + run: poetry run make tests + - name: Upload coverage + uses: codecov/codecov-action@v3 + if: matrix.python == 3.9 && startsWith(matrix.os, 'ubuntu') diff --git a/.yamllint.yml b/.yamllint.yml new file mode 100644 index 000000000..077d1e328 --- /dev/null +++ b/.yamllint.yml @@ -0,0 +1,10 @@ +extends: default + +rules: + document-start: disable + line-length: + max: 100 + new-lines: + type: platform + comments: + min-spaces-from-content: 1 diff --git a/Makefile b/Makefile index d22dbcccc..56a229b4d 100644 --- a/Makefile +++ b/Makefile @@ -74,15 +74,19 @@ flake8: isort-check: isort --ac --check-only $(py_sources) +.PHONY: yamllint +yamllint: + yamllint . + .PHONY: check-version check-version: bash ./extras/check_version.sh $(VERSION) .PHONY: check -check: check-version flake8 isort-check mypy +check: check-version yamllint flake8 isort-check mypy .PHONY: dcheck -dcheck: check-version flake8 isort-check dmypy +dcheck: check-version yamllint flake8 isort-check dmypy # formatting: diff --git a/changelogs/unreleased/fix-output-negative-value.yml b/changelogs/unreleased/fix-output-negative-value.yml index b33f66135..3c8784e15 100644 --- a/changelogs/unreleased/fix-output-negative-value.yml +++ b/changelogs/unreleased/fix-output-negative-value.yml @@ -2,4 +2,4 @@ title: Prevent outputs from accepting negative values merge_request: 303 author: -type: fix \ No newline at end of file +type: fix diff --git a/changelogs/unreleased/fix-ws-notification-broadcast.yml b/changelogs/unreleased/fix-ws-notification-broadcast.yml index 36a27451a..1361385b2 100644 --- a/changelogs/unreleased/fix-ws-notification-broadcast.yml +++ b/changelogs/unreleased/fix-ws-notification-broadcast.yml @@ -2,4 +2,4 @@ title: Fixing ws message being sent to wrong user merge_request: 302 author: -type: fix \ No newline at end of file +type: fix diff --git a/poetry.lock b/poetry.lock index 72e4cd6ee..46758372e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. +# This file is automatically @generated by Poetry and should not be changed by hand. [[package]] name = "aiohttp" @@ -1102,6 +1102,18 @@ files = [ qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] testing = ["docopt", "pytest (<6.0.0)"] +[[package]] +name = "pathspec" +version = "0.11.1" +description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, + {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, +] + [[package]] name = "pexpect" version = "4.8.0" @@ -1451,6 +1463,56 @@ files = [ {file = "pywin32-305-cp39-cp39-win_amd64.whl", hash = "sha256:50768c6b7c3f0b38b7fb14dd4104da93ebced5f1a50dc0e834594bff6fbe1271"}, ] +[[package]] +name = "pyyaml" +version = "6.0" +description = "YAML parser and emitter for Python" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, + {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, + {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, + {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, + {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, + {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, + {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, + {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, + {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, + {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, + {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, + {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, + {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, + {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, + {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, + {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, + {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, + {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, +] + [[package]] name = "requests" version = "2.28.1" @@ -1940,6 +2002,25 @@ files = [ {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"}, ] +[[package]] +name = "yamllint" +version = "1.31.0" +description = "A linter for YAML files." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yamllint-1.31.0-py3-none-any.whl", hash = "sha256:15f4bdb645e6a4a0a22fe5415bc38b4a934c51419b30104896d2f3f95e329185"}, + {file = "yamllint-1.31.0.tar.gz", hash = "sha256:2d83f1d12f733e162a87e06b176149d7bb9c5bae4a9e5fce1c771d7f703f7a65"}, +] + +[package.dependencies] +pathspec = ">=0.5.3" +pyyaml = "*" + +[package.extras] +dev = ["doc8", "flake8", "flake8-import-order", "rstcheck[sphinx]", "sphinx"] + [[package]] name = "yarl" version = "1.8.2" @@ -2128,4 +2209,4 @@ sentry = ["sentry-sdk", "structlog-sentry"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4" -content-hash = "d1b70ed0381b8680d8c4ebf91fca2519fdc51ecb71b32833efac6747cc7ce183" +content-hash = "17520e2c0c85413966ac2feeedde03289e347ef625725a8677550f4e210e1c7e" diff --git a/pyproject.toml b/pyproject.toml index ac9e82044..e130cd9d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,7 @@ pytest = "~7.2.0" pytest-cov = "~4.0.0" flaky = "~3.7.0" pytest-xdist = "~3.2.0" +yamllint = "~1.31.0" # stubs: types-requests = "=2.28.11.4" types-pyopenssl = "=22.1.0.2" From 03a14fd026b1d7001158fe178cfec65e596a31f8 Mon Sep 17 00:00:00 2001 From: Jan Segre Date: Tue, 2 Aug 2022 15:23:28 +0200 Subject: [PATCH 04/24] chore(python): drop support for Python 3.8 --- .github/workflows/docker.yml | 1 - .github/workflows/main.yml | 3 +-- poetry.lock | 6 +++--- pyproject.toml | 3 +-- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 358f0f101..c2e22d2cc 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -24,7 +24,6 @@ jobs: - python - pypy python-version: - - '3.8' - '3.9' - '3.10' - '3.11' diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 304734f4b..55aaac55a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -20,12 +20,11 @@ jobs: import os import json full_matrix = { - 'python': ['3.8', '3.9', '3.10', '3.11'], + 'python': ['3.9', '3.10', '3.11'], # available OS's: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idruns-on 'os': ['ubuntu-22.04', 'macos-12', 'windows-2022'], 'include': [ # XXX: tests fail on these, not sure why, when running them individually each on passes, but not on `make tests` - # {'os': 'ubuntu-22.04', 'python': 'pypy-3.8'}, # {'os': 'ubuntu-22.04', 'python': 'pypy-3.9'}, ], } diff --git a/poetry.lock b/poetry.lock index 46758372e..7b1713470 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. [[package]] name = "aiohttp" @@ -2208,5 +2208,5 @@ sentry = ["sentry-sdk", "structlog-sentry"] [metadata] lock-version = "2.0" -python-versions = ">=3.8.1,<4" -content-hash = "17520e2c0c85413966ac2feeedde03289e347ef625725a8677550f4e210e1c7e" +python-versions = ">=3.9,<4" +content-hash = "1032dbd9e22b32f9f3780d270fb03c9c23d26b8e6a54c0befdb005027c4caa1a" diff --git a/pyproject.toml b/pyproject.toml index e130cd9d3..f0d03df73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,7 +23,6 @@ homepage = "https://hathor.network/" repository = "https://github.com/HathorNetwork/hathor-core/" # https://pypi.org/classifiers/ classifiers = [ - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -51,7 +50,7 @@ types-requests = "=2.28.11.4" types-pyopenssl = "=22.1.0.2" [tool.poetry.dependencies] -python = ">=3.8.1,<4" +python = ">=3.9,<4" twisted = "~22.10.0" autobahn = "~22.7.1" base58 = "~2.1.1" From 77a9a01fd0aad96c25ba83e9ca3eacab85e0f7a5 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Thu, 18 May 2023 11:59:59 -0300 Subject: [PATCH 05/24] refactor: move hathor settings to yaml configuration files [part 1/2] (#593) --- Makefile | 4 +- hathor/builder/cli_builder.py | 10 +- hathor/cli/run_node.py | 12 +- hathor/conf/__init__.py | 11 ++ hathor/conf/get_settings.py | 57 +++++- hathor/conf/mainnet.yml | 179 ++++++++++++++++++ hathor/conf/settings.py | 76 +++++++- hathor/conf/testnet.yml | 37 ++++ hathor/conf/unittests.yml | 20 ++ hathor/utils/pydantic.py | 3 +- hathor/utils/yaml.py | 23 +++ poetry.lock | 18 +- pyproject.toml | 2 + tests/conftest.py | 4 +- .../invalid_hathor_settings_fixture.yml | 40 ++++ .../missing_hathor_settings_fixture.yml | 39 ++++ .../valid_hathor_settings_fixture.yml | 40 ++++ tests/others/test_hathor_settings.py | 107 +++++++++++ 18 files changed, 658 insertions(+), 24 deletions(-) create mode 100644 hathor/conf/mainnet.yml create mode 100644 hathor/conf/testnet.yml create mode 100644 hathor/conf/unittests.yml create mode 100644 hathor/utils/yaml.py create mode 100644 tests/others/resources/invalid_hathor_settings_fixture.yml create mode 100644 tests/others/resources/missing_hathor_settings_fixture.yml create mode 100644 tests/others/resources/valid_hathor_settings_fixture.yml create mode 100644 tests/others/test_hathor_settings.py diff --git a/Makefile b/Makefile index 56a229b4d..3c6f57fd8 100644 --- a/Makefile +++ b/Makefile @@ -42,8 +42,8 @@ tests-quick: .PHONY: tests-genesis tests-genesis: - HATHOR_TEST_CONFIG_FILE=hathor.conf.mainnet pytest tests/tx/test_genesis.py - HATHOR_TEST_CONFIG_FILE=hathor.conf.testnet pytest tests/tx/test_genesis.py + HATHOR_TEST_CONFIG_YAML='./hathor/conf/mainnet.yml' pytest tests/tx/test_genesis.py + HATHOR_TEST_CONFIG_YAML='./hathor/conf/testnet.yml' pytest tests/tx/test_genesis.py .PHONY: tests-ci tests-ci: diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index b6ed4006a..39ff6b68b 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -52,7 +52,7 @@ def check_or_raise(self, condition: bool, message: str) -> None: def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorManager: import hathor from hathor.conf import HathorSettings - from hathor.conf.get_settings import get_settings_module + from hathor.conf.get_settings import get_settings_filepath, get_settings_module from hathor.daa import TestMode, _set_test_mode from hathor.event.storage import EventMemoryStorage, EventRocksDBStorage, EventStorage from hathor.event.websocket.factory import EventWebsocketFactory @@ -69,7 +69,11 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa from hathor.util import get_environment_info settings = HathorSettings() - settings_module = get_settings_module() # only used for logging its location + + # only used for logging its location + settings_module = get_settings_module() + settings_source = settings_module.__file__ if settings_module is not None else get_settings_filepath() + self.log = logger.new() self.reactor = reactor @@ -85,7 +89,7 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa my_peer_id=str(peer_id.id), python=python, platform=platform.platform(), - settings=settings_module.__file__, + settings=settings_source, ) tx_storage: TransactionStorage diff --git a/hathor/cli/run_node.py b/hathor/cli/run_node.py index 809592eb4..c8ea2af04 100644 --- a/hathor/cli/run_node.py +++ b/hathor/cli/run_node.py @@ -19,6 +19,8 @@ from structlog import get_logger +from hathor.conf import TESTNET_SETTINGS_FILEPATH + logger = get_logger() # LOGGING_CAPTURE_STDOUT = True @@ -102,6 +104,7 @@ def create_parser(cls) -> ArgumentParser: parser.add_argument('--x-enable-event-queue', action='store_true', help='Enable event queue mechanism') parser.add_argument('--peer-id-blacklist', action='extend', default=[], nargs='+', type=str, help='Peer IDs to forbid connection') + parser.add_argument('--config-yaml', type=str, help='Configuration yaml filepath') return parser def prepare(self, args: Namespace, *, register_resources: bool = True) -> None: @@ -309,9 +312,12 @@ def __init__(self, *, argv=None): argv = sys.argv[1:] self.parser = self.create_parser() args = self.parse_args(argv) - if args.testnet: - if not os.environ.get('HATHOR_CONFIG_FILE'): - os.environ['HATHOR_CONFIG_FILE'] = 'hathor.conf.testnet' + + if args.config_yaml: + os.environ['HATHOR_CONFIG_YAML'] = args.config_yaml + elif args.testnet: + os.environ['HATHOR_CONFIG_YAML'] = TESTNET_SETTINGS_FILEPATH + self.prepare(args) self.register_signal_handlers(args) if args.sysctl: diff --git a/hathor/conf/__init__.py b/hathor/conf/__init__.py index 5d4ccd33a..99dde297c 100644 --- a/hathor/conf/__init__.py +++ b/hathor/conf/__init__.py @@ -12,8 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +from pathlib import Path + from hathor.conf.get_settings import HathorSettings +parent_dir = Path(__file__).parent + +MAINNET_SETTINGS_FILEPATH = str(parent_dir / 'mainnet.yml') +TESTNET_SETTINGS_FILEPATH = str(parent_dir / 'testnet.yml') +UNITTESTS_SETTINGS_FILEPATH = str(parent_dir / 'unittests.yml') + __all__ = [ + 'MAINNET_SETTINGS_FILEPATH', + 'TESTNET_SETTINGS_FILEPATH', + 'UNITTESTS_SETTINGS_FILEPATH', 'HathorSettings', ] diff --git a/hathor/conf/get_settings.py b/hathor/conf/get_settings.py index 69b040c93..d555204f1 100644 --- a/hathor/conf/get_settings.py +++ b/hathor/conf/get_settings.py @@ -17,33 +17,72 @@ from types import ModuleType from typing import Optional +from structlog import get_logger + +from hathor import conf from hathor.conf.settings import HathorSettings as Settings +logger = get_logger() + +_settings_filepath: Optional[str] = None _config_file: Optional[str] = None def HathorSettings() -> Settings: - """ Return configuration file namedtuple - Get the file from environment variable 'HATHOR_CONFIG_FILE' - If not set we return the config file of the mainnet + """ + Returns the configuration named tuple. + + Tries to get the configuration from a python module in the 'HATHOR_CONFIG_FILE' env var, which will be deprecated. + If not found, tries to get it from a yaml filepath in the 'HATHOR_YAML_CONFIG', which will be the new standard. + + If neither is set, or if the module import fails, the mainnet configuration is returned. """ settings_module = get_settings_module() - settings = getattr(settings_module, 'SETTINGS') - assert isinstance(settings, Settings) - return settings + if settings_module is not None: + log = logger.new() + log.warn( + "Setting a config module via the 'HATHOR_CONFIG_FILE' env var will be deprecated soon. " + "Use the '--config-yaml' CLI option or the 'HATHOR_CONFIG_YAML' env var to set a yaml filepath instead." + ) + settings = getattr(settings_module, 'SETTINGS') + assert isinstance(settings, Settings) + return settings -def get_settings_module() -> ModuleType: + settings_filepath = get_settings_filepath() + + return Settings.from_yaml(filepath=settings_filepath) + + +def get_settings_module() -> Optional[ModuleType]: global _config_file # Import config file for network - default_file = 'hathor.conf.mainnet' - config_file = os.environ.get('HATHOR_CONFIG_FILE', default_file) + config_file = os.environ.get('HATHOR_CONFIG_FILE') if _config_file is None: _config_file = config_file elif _config_file != config_file: raise Exception('loading config twice with a different file') + + if not config_file: + return None + try: module = importlib.import_module(config_file) except ModuleNotFoundError: + default_file = 'hathor.conf.mainnet' module = importlib.import_module(default_file) + return module + + +def get_settings_filepath() -> str: + global _settings_filepath + + new_settings_filepath = os.environ.get('HATHOR_CONFIG_YAML', conf.MAINNET_SETTINGS_FILEPATH) + + if _settings_filepath is not None and _settings_filepath != new_settings_filepath: + raise Exception('loading config twice with a different file') + + _settings_filepath = new_settings_filepath + + return new_settings_filepath diff --git a/hathor/conf/mainnet.yml b/hathor/conf/mainnet.yml new file mode 100644 index 000000000..fc90f3e90 --- /dev/null +++ b/hathor/conf/mainnet.yml @@ -0,0 +1,179 @@ +P2PKH_VERSION_BYTE: x28 +MULTISIG_VERSION_BYTE: x64 +NETWORK_NAME: mainnet +BOOTSTRAP_DNS: + - mainnet.hathor.network +ENABLE_PEER_WHITELIST: true +WHITELIST_URL: https://hathor-public-files.s3.amazonaws.com/whitelist_peer_ids + +# Genesis stuff +GENESIS_OUTPUT_SCRIPT: 76a9147fd4ae0e4fb2d2854e76d359029d8078bb99649e88ac +GENESIS_TIMESTAMP: 1578075305 +GENESIS_BLOCK_NONCE: 2591358 +GENESIS_BLOCK_HASH: 000006cb93385b8b87a545a1cbb6197e6caff600c12cc12fc54250d39c8088fc +GENESIS_TX1_NONCE: 7715 +GENESIS_TX1_HASH: 0002d4d2a15def7604688e1878ab681142a7b155cbe52a6b4e031250ae96db0a +GENESIS_TX2_NONCE: 3769 +GENESIS_TX2_HASH: 0002ad8d1519daaddc8e1a37b14aac0b045129c01832281fb1c02d873c7abbf9 + +CHECKPOINTS: + 100_000: 0000000000001247073138556b4f60fff3ff6eec6521373ccee5a6526a7c10af + 200_000: 00000000000001bf13197340ae0807df2c16f4959da6054af822550d7b20e19e + 300_000: 00000000000000e1e8bdba2006cc34db3a1f20294cbe87bd52cceda245238290 + 400_000: 000000000000002ae98f2a15db331d059eeed34d71f813f51d1ac1dbf3d94089 + 500_000: 00000000000000036f2f7234f7bf83b5746ce9b8179922d2781efd82aa3d72bf + 600_000: 0000000000000001ad38d502f537ce757d7e732230d22434238ca215dd92cca1 + 700_000: 000000000000000066f04be2f3a8607c1c71682e65e07150822fb03afcbf4035 + 800_000: 0000000000000000958372b3ce24a26ce97a3b063c835e7d55c632f289f2cdb0 + 900_000: 0000000000000000c9bac3c3c71a1324f66481be03ad0e5d5fbbed94fc6b8794 + 1_000_000: 00000000000000001060adafe703b8aa28c7d2cfcbddf77d52e62ea0a1df5416 + 1_100_000: 00000000000000000ecc349992158a3972e7a24af494a891a8d1ae3ab982ee4e + 1_200_000: 000000000000000091ddabd35a0c3984609e2892b72b14d38d23d58e1fa87c91 + 1_300_000: 00000000000000000244794568649ac43e0abd4b53b1a583b6cc8e243e65f582 + 1_400_000: 000000000000000011a65b1c3cba2b94ad05525ac2ec60f315bb7b204c8160c7 + 1_500_000: 0000000000000000ddbbf005a3970f256ad46167fc2143796d8f87c0c905e657 + 1_600_000: 00000000000000011098dda3dbe2ac95287ec0f3c12edc5c054dd8edc70cd6c3 + 1_700_000: 000000000000000054278ce817fda6cd3287144545babf0a415f883d074601ab + 1_800_000: 00000000000000002110c5ccb781bee9fea0a4cbbd49e52023ffb5900732ee4d + 1_900_000: 000000000000000032a8f2411190e1e49ff577d352950011083b85d935453338 + 2_000_000: 000000000000000005c31cc418e95497dbb2017a6ae2683a1550bd61f180b5b1 + 2_100_000: 00000000000000000c96c02d514017263d4e624a61fb9f10babcbf8d4632b67b + 2_200_000: 00000000000000001016a7bbb6ccfc957ba6d29a562b43e8620f57ddc9147dde + 2_300_000: 0000000000000000164dafd8d922c783a99d83f66220eb7c54f11bee1aaac126 + 2_400_000: 0000000000000000067aa4bf7306dadf0f56e38380327a472f55e7be72fbe7da + 2_500_000: 00000000000000000c418b03ceb3a4fe7023674811f8ec94d7b9d5b1879ddc28 + 2_600_000: 0000000000000000020af703e2955e3f7934e8bc376da2ba6cfc6dc609feaf84 + 2_700_000: 00000000000000000cf3a35ab01a2281024ca4ca7871f5a6d67106eb36151038 + 2_800_000: 000000000000000004439733fd419a8a747e8afe2f89348a17c1fac24538a63c + 2_900_000: 0000000000000000090cbd5a7958c82a2b969103001d92334f287dadcf3e01bc + 3_000_000: 000000000000000013c9086f4ce441f5db5de55a5e235f4f7f1ef223aedfe2db + 3_100_000: 00000000000000000d226a5998ffc65af89b1226126b1af1f8d0712a5301c775 + 3_200_000: 0000000000000000028d9629d85d93d0f5e798a498ca7b1710ffc157fa045cd5 + 3_300_000: 0000000000000000065b74441acb3d2ff770d384b2bad44c9823f26a0327690c + 3_400_000: 000000000000000077242c961a0c6f708bc671a8372eb8b095311f091fddc6c3 + +SOFT_VOIDED_TX_IDS: + - 0000000012a922a6887497bed9c41e5ed7dc7213cae107db295602168266cd02 + - 000000001980b413ad5b5c5152338093aecfb1f5a7563d4e7fef8fb240a50bb9 + - 000000001bf05ea882891c99e5285f38e001f10226380a830168d8070c21b78c + - 0000000024e0320e4dd6990299f083db2448c9f200223cad8ac23c282fd4c80e + - 000000002767852b0eaca53a0f91db8202bea171c0d060fca4ae1030fd2a2e7a + - 000000002875eff18af64f50481bc7576224572da7d63a43e994110b0002f6b7 + - 000000003e888a6a898a59c569ed839d047e5490554b132b2a3fd96bca8a8166 + - 0000000042ecfa3e98251eb56a8c19f6914b8cd275bcd49c1c84f9fc28123207 + - 0000000051c62be84b273ac17bba34426c654464eff7b7563208c4a3a2582fb6 + - 0000000058738ab486ec8da130e394361cb78fb331c5b5adf11798e046d89f08 + - 00000000818f72870b4df1961e8b81daacc00283490758c3ef6741245116e6ad + - 00000000998bacf9dfc135233f83127a01ad61f5707955f4e0d0d12c80e85f2e + - 00000000b733604d8afdf8102b7dcf9a29605312c0466f98f5183213c4e1d327 + - 00000000c57350f31fa1b09af55a6ea722867a596b5f5b408c541bfaec38fd8f + - 00000000c58405f10e5a19f46295f08cb3c9d3e0479ff7ff418b9ad5e2c074d4 + - 00000000c780100a67213a1cf726f57bfd02e4d7e673b24693e40dab1eabc372 + - 00000000d7dbc1e0b99d6940561ddd9b99fa2f51fde82ea343156f90daa0fa0a + - 00000000f44f6785caca715698bafe9a60895bb6675579b0c33f6b45330a538c + - 00000017df9acb795a358e8a6c1afde3ac0d3dfd6241441320b9449fdd27534d + - 00000042d700981477caec92d78861bac49ef72d8d54dbaf6fbc316025563f93 + - 000000a56b177bfc6d3b83c351cb99cba665898c029f7bb628d36bdc9a3053cc + - 000000ac12f9c538404cc8b44f28e6354f76740d0daf206755f74c5b8bcc660a + - 000000b2af584901b731c4d2d6207a531ce3f4447c6470de9c0b189fe2cd616e + - 000000d133f2710bafd4d8b63b81081f629cf34ebcaa70ed44243a014ed85a1a + - 000000dc6c899e76793bceab54ceb124138ab9fc447f01a3beea3f4dd0549186 + - 000000ebf1b6246ac0f859f4e88544a51b71ca6a8e3573149b34e06903aec338 + - 000000eda13cf4e1b33b8ff3c62b892e6c4e9c05e865173123283984424de827 + - 000000f1dc1476e3921a782d3cd815aec3b7b68723d0b9599fbd99fc9a7057ee + - 000000ffb042976e52200535299f5a4cc13805949d6c233adf8bf26401403506 + - 0000011e8f0ff3a741a48bcc49acce5680d593cf1c69d39aaf667c1dd2f850a7 + - 00000128fce693d2c3642d9c906def03049600c48252089e5d649e992f9a0542 + - 0000013cf9daed1138f88829117d36ce59e231fde8b724d715af190266b322c8 + - 000001521fd0530e6e67e122bd0454dc1a62f8687539bf3b4adf2e84306a4a6d + - 0000016617061d62979146812c422a52f381b3fd48e3fbcdc6962d7ef86f73f6 + - 000001664d4736d66c0fcd4241e0166103f7569eed2150f86bc0c7c65c063d80 + - 000001a87ca6db997b56837df35d70fcab3960e7ff0c0385d1aa455931ed55bd + - 000001c2ddc22637d06ba993b253b05dc94cf4c7d835b961e7741543a584e395 + - 000001c90db53e28c8f012c752591ccb7e755a74c86a655a2900e0bd3a7d0ecc + - 000001cf28c56059e3b4516eb0c8839b981940df0c9cb3066a5ad0ae5944c4a5 + - 000001e6967e87d4cca6fda912458b3eb51f74409d12be997b8a84f02b20218d + - 000001f6e130f3291548f8a1164a252f2b229cce2629647e60393ef31e4d483c + - 000001f78b3e0ca9d36a7354bd9fadea960f89536afc258e62f0fa544204405d + - 0000020857d6a0d3291bda650f4c8f85c073613641f121da48c2debf26d72884 + - 000002089edd265c5b50d6ceb3eb862c2fffaff269ca4b7a024579cd44ccfe42 + - 0000020a48625f27ce380fde59b5e3dfd4204e31f400b2d5959262dbf47b5dc6 + - 0000020c2ed05a4c23f89fc9de4b8bb6f93d86b798156fbd8e0abf06c8711ac0 + - 00000212fbda0a12653e5298b44a7c3795f51a0eeb379aa660a7a30903c67cc8 + - 000002187a0b41ee345cff15e9c914a84573e3dcdb71b55f3f44ab55e603da92 + - 00000222f01c219470806f8866f97d73a467bd4cfe3af2eee82bddc4f5e80a17 + - 0000023444fb134782db2462a6a0b810ce8b31041de539d0cb3c96b33107af99 + - 0000023464ecb9218d4d21a9397fbf22b49820af65c6b07d1e56fe7cf15baed7 + - 0000023bcf6f92885339c3a8fbb55e2c4f220241de18e6666d217ba04b238cd3 + - 00000241f25d2f75e657b507feba5d6d3819644f257db4bc1054c5f72c83b8a7 + - 00000258411bdb0d128ebfc9b127c039ee4317e0f95e76dda9a9170ea877d25b + - 00000259b49387008617b5a0f86ecea8e615268e741db774ee34d0fb23857926 + - 0000025a334e8ff3d96f1a209119a2960f61017c29f7f9742618add01df9c82c + - 000002907cfd7fbad44c098ba2d414b7ab366f9c52d56781c59fb4633a724c00 + - 00000293f88193d7793c8c9259c661969d2261402baadfa3cb48e83aab08ae3b + - 000002aa2d6dbfc9044c829ec83c9b3e5f07aad875af51b5589410510f2517d7 + - 000002af16219be350b1e278f61043649733d179037be495885f056f798cb526 + - 000002b677ffe2cd16e8d4b28608f4b2eaf918924431cd97ec3eae3659a1f19c + - 000002c0ab260e7cf4b5a31fbed03c1efbc0b656edb561c6727e1357106a33b8 + - 00000353b3fd1550bbb87329a0058261093970f7974db037f4c6185d43b2bed4 + - 0000037c0f10cca87577100b5f7bc2b8100e62f6facf83ccd29e3f6ba280afcd + - 0000039f312e0bbd3fe3c02ce94c56d895cc8c87208176a2e7673ebfc72c9e8b + - 000003a6a42a0fef94fa689f4ea03bbf563a1e82a4626a7d833d85aef0f975f5 + - 000003a8ba8ea4e8fa72762c69923643e2a66a1980ad3d0f25ba279bed48d1a7 + - 000003b7d6d3c005e9a4027a6722a6d923b7ddbe2b7add31888ae280200f3e0d + - 000003baa91af94d28e7032327324c458cb4016b10e87c6adf4884eaf9598629 + - 000003bb5d47addfa303836320c3fd292daede501a57b010afc4a52c6c216586 + - 000003c16d094663db2528cd37f635ed28095dabde60ca70d01c76e3c8388995 + - 000003c60b7fd804b161d53138b15bc266ea62ba1770ea6733242a9413a15fe5 + - 000003d19ba2638271c5edf5bf1e633e808ca52cb753104ec0254ba09e749d89 + - 000003fb91f0e962e96025903c3c3aaffe51c477f7c1c49e4fd161d7ee501ba1 + - 000004167295adf699bf3bb99a87bbfa20b491779432f8e1e409f11731b355e4 + - 000004351d74e5dfbb9d1010e8fc857b3456ecba38ef3d7b5844435125de364e + - 0000046901e90282e1ea707bc8b3ba3f57573038e6031d5bb17103f69716137a + - 0000046e0e8ab1295064861af24dc92ba93fe359dae0995fa8e7674183779f4f + - 00000479f2fcb4caeb2752909a3ac160681ad923b10f8cf884b798f56916bba8 + - 000004a4b36e1abc71ecd71ca89190ebed895892708577c181a2910a49f08cfa + - 0000051bacbd74bf190857ffd7c035e54f67488c78e2fb6f9c1c83980ad71bef + - 0000058e17aa5303e022572b40482dc2661059da4f5be95e7dde22750d1f2ff0 + - 000005cd2a6a49a6994daa2f3d0ba6e0fb881284954b29b57401050f3ca47bdf + - 000005fd618bf0c520447a5518627abe5f47e90248030d965df48a35207e33fb + - 0000060426306faf8b1e772bc9968d610fbc6684435d2de996621f958b97d64a + - 000006b56ac7e1ea911210f7eb4db397a61bb0d5a8580821fdaa1abb7602de9c + - 00000713e01f81308ed02309d4bbb6a36664ff3f10f587a6879ec753c7802e23 + - 000007683cd8d775d309e07308f7c9125e621cebb72aeb8ec1d710e0810157ea + - 00000914c20126cf00fffb71d0af7a03cf17ddda69ecc5bd68abc127090c95d3 + - 00000b310fdcf52c9195422d2fcf49ae3de214501261b29a985d8e64bed64fea + - 00000bd6e23c43367ef5d7bb1e2f0ef4ed9256fa9c5f4f2c737a479f264cf8a0 + - 00000cf6bc737c42ecc7f2fcf92ea4dc92e59404b93b3fa3e1e0648cafb3f4ad + - 00000ddd6cc76f17a982e506a64f1edb75b89a576ef7b595c11b78d1ef2d68b7 + - 00000e16526da07de7d14bd87155887624069bc67244098fab4f05589f0c5723 + - 00000ea977c8193be1f7abb703e12867e6cf981567699954b8bf7cd1f33094f1 + - 000010c880b068126f3b9d33c4705bcf91a15f80c8c0ee519b232b51ea5697aa + - 000010fdf4a3e2f60a376180bbb473a280e88d184c13af0aabf6867c07f436af + - 000016eaffd25d3335523b15bd18ca707b5503a00fed1c39857b20e3227edaf0 + - 00001714c55b80d90489005c3edd8de1779a8b9e2aaf9c56592e5922a8de8ae1 + - 00001958f73bfdbe2cba49a2d6b5221d8046f5326bff67eb37efed70eb103cee + - 000019a02ba2f7f15cf4abb885f909400e9be7526c033b8ef475fa2dfebf57e3 + - 00001a20cd1ce1b3a021d3a3224dc5c18550b7981508bd446f03a5762724a9e1 + - 00001a40d2ac32d5f39d51680538a72570c9d263eb3ea364cefab7b9906a0e90 + - 00001bd841e1373f711d44a83584125db5cb4844d7086d7981ea91ea78d01183 + - 00001c9fc462dccec2830b0fc87a6d740d03ef92133570d492c558de1d6156bf + - 00001d1830d70992ff544914fb8fea3d71b8a686e9b466450f6800a2383394f5 + - 000024051364ef7173995581740f448125fb8a215f97065b73cb3fadbcdb885a + - 000024453eb58e2f0ace673df99ee9b53825a078255dcd0971ea8c4cefa782ff + - 0000291f0dff2025dd5ade362cc7b8a991a631821e87bdf7b732b5bfedd55507 + - 0000294369e45c257715c5bac9f8e714e97413cbcfd58d2fd26cef6fe39aba64 + - 00002a147679a0cb6d2a3d33fb5d44418bdef1af0bc7630291bac709f41d8567 + - 00002c3d04975c2a1166e115e93cf8d63df5900adf7105cc70ab7a13e8baadca + - 00002d82c41a430f89cf4e7d4c524143e57e93ee4c73c16daacdab7d79d28e48 + - 00002d9b1ec90c02749c9acceca9e919d523237cef864ec29ab334777b80c226 + - 00002fec62ae183501b05a9c272c7534bf96c78e9a8d237215121cd56ec5cab2 + - 0000302581088e4717d680662c4a9ae07d8e0727f040ec127953c371ea32ea77 + - 0000336ff31ea3e1717c0b02619bc2d09ead38089f298bef179f4b6715eae1f6 + - 000035d96f815ee188b740b4b351279e13d99a89e227393b3f25074d59fbcb8c + - 00003de89f0752a6a8cd127250c624ff431975c559934ee532c76dcd899c2e66 + - 00003e5372eb70089919c4a6ef7c54e4618c7ac59e16b76b8b5b5e448717ff9a + - 00003f99d7f877d384b0de992d7e2a8d8aaae685fd25f1819b4ee25c9b913d03 + - 000040db8e91bcdc1e65bc868e904345396a0bc4eb084694a72dbcc485555d80 + - 00004305882eb3eef6b45f025ff58eb7baa5ca35f7d6f42c8b085482b00474e6 + - 000045ecbab77c9a8d819ff6d26893b9da2774eee5539f17d8fc2394f82b758e diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index c9fd671df..5a287a6d7 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -14,9 +14,13 @@ import os from math import log -from typing import List, NamedTuple, Optional +from typing import Any, Dict, List, NamedTuple, Optional, Union + +import pydantic from hathor.checkpoint import Checkpoint +from hathor.utils import yaml +from hathor.utils.pydantic import BaseModel DECIMAL_PLACES = 2 @@ -379,3 +383,73 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: # Time to update the peers that are running sync. SYNC_UPDATE_INTERVAL: int = 10 * 60 # seconds + + @classmethod + def from_yaml(cls, *, filepath: str) -> 'HathorSettings': + """Takes a filepath to a yaml file and returns a validated HathorSettings instance.""" + settings_dict = yaml.dict_from(filepath=filepath) + + return HathorSettings.from_dict(settings_dict) + + @classmethod + def from_dict(cls, settings: Dict[str, Any]) -> 'HathorSettings': + """Takes a settings dict and returns a validated HathorSettings instance.""" + # This intermediate step shouldn't be necessary, but for some reason pydantic.create_model_from_namedtuple + # doesn't support default attribute values, so we do this to add them + all_settings = HathorSettings(**settings) + validated_settings = _ValidatedHathorSettings(**all_settings._asdict()) + + return HathorSettings(**validated_settings.dict()) + + +def _parse_checkpoints(checkpoints: Union[Dict[int, str], List[Checkpoint]]) -> List[Checkpoint]: + """Parse a dictionary of raw checkpoint data into a list of checkpoints.""" + if isinstance(checkpoints, Dict): + return [ + Checkpoint(height, bytes.fromhex(_hash)) + for height, _hash in checkpoints.items() + ] + + if not isinstance(checkpoints, List): + raise TypeError(f'expected \'Dict[int, str]\' or \'List[Checkpoint]\', got {checkpoints}') + + return checkpoints + + +def _parse_hex_str(hex_str: Union[str, bytes]) -> bytes: + """Parse a raw hex string into bytes.""" + if isinstance(hex_str, str): + return bytes.fromhex(hex_str.lstrip('x')) + + if not isinstance(hex_str, bytes): + raise TypeError(f'expected \'str\' or \'bytes\', got {hex_str}') + + return hex_str + + +_ValidatedHathorSettings = pydantic.create_model_from_namedtuple( + HathorSettings, + __base__=BaseModel, + __validators__=dict( + _parse_hex_str=pydantic.validator( + 'P2PKH_VERSION_BYTE', + 'MULTISIG_VERSION_BYTE', + 'GENESIS_OUTPUT_SCRIPT', + 'GENESIS_BLOCK_HASH', + 'GENESIS_TX1_HASH', + 'GENESIS_TX2_HASH', + pre=True, + allow_reuse=True + )(_parse_hex_str), + _parse_soft_voided_tx_id=pydantic.validator( + 'SOFT_VOIDED_TX_IDS', + pre=True, + allow_reuse=True, + each_item=True + )(_parse_hex_str), + _parse_checkpoints=pydantic.validator( + 'CHECKPOINTS', + pre=True + )(_parse_checkpoints) + ) +) diff --git a/hathor/conf/testnet.yml b/hathor/conf/testnet.yml new file mode 100644 index 000000000..1a5f73e47 --- /dev/null +++ b/hathor/conf/testnet.yml @@ -0,0 +1,37 @@ +P2PKH_VERSION_BYTE: x49 +MULTISIG_VERSION_BYTE: x87 +NETWORK_NAME: testnet-golf +BOOTSTRAP_DNS: + - golf.testnet.hathor.network + +# Genesis stuff +GENESIS_OUTPUT_SCRIPT: 76a914a584cf48b161e4a49223ed220df30037ab740e0088ac +GENESIS_TIMESTAMP: 1577836800 +GENESIS_BLOCK_NONCE: 826272 +GENESIS_BLOCK_HASH: 0000033139d08176d1051fb3a272c3610457f0c7f686afbe0afe3d37f966db85 +GENESIS_TX1_NONCE: 190 +GENESIS_TX1_HASH: 00e161a6b0bee1781ea9300680913fb76fd0fac4acab527cd9626cc1514abdc9 +GENESIS_TX2_NONCE: 115 +GENESIS_TX2_HASH: 00975897028ceb037307327c953f5e7ad4d3f42402d71bd3d11ecb63ac39f01a + +# tx weight parameters. With these settings tx weight is always 8 +MIN_TX_WEIGHT_K: 0 +MIN_TX_WEIGHT_COEFFICIENT: 0 +MIN_TX_WEIGHT: 8 +CHECKPOINTS: + 100_000: 0000007ece4c7830169f360ed11c51b776e1b72bf0060e6e5b325ca8be474ac5 + 200_000: 00000113ecd4b666116abf3d3f05ad509d903d6b456a1e8c35e46a9e426af11a + 300_000: 000000e42df13e4e7490cee98f303cb3b0ca33f362af180c5f7df740c98699d9 + 400_000: 000000e9a748b34fc4d662d88bb36ef2a033ba129960924208be14eccdac1a65 + 500_000: 000000b5c4572d7b85e585849540ece44b73948c5cdbc6f17a9a3a77fbd0f29a + 600_000: 000000f6743ba3d67e51d7adc21821b8263726ce3bc86010d5e1a905bf2531dc + 700_000: 0000008fda01c9e5fd6f99a5461e6dbf1039cba38cc8d0fc738a097d71caa968 + 800_000: 000000397af32fcc4eeb6985d96326c1ff4644792631872a00394688b1782af5 + 900_000: 00000097ae405036614f4335ad0e631df8fc5f7434e82c3421627e2fea4e1830 + 1_000_000: 000000145ba662cdee0d72034658f93a0a3a4568d5ba5083ff09013ca1e6556c + 1_100_000: 000000404e6ff6a23695a6ffe712ce1c4efc02e75bbc11c3129f4c2377b07743 + 1_200_000: 0000003be5fae5bb2c9ceaed589d172bcd9e74ca6c8d7d2ca06567f65cea7c9b + 1_300_000: 0000000000007d39de6e781c377bc202213b0b5b60db14c13d0b16e06d6fd5ac + 1_400_000: 000000000df9cb786c68a643a52a67c22ab54e8b8e41cbe9b761133f6c8abbfe + 1_500_000: 000000000c3591805f4748480b59ac1788f754fc004930985a487580e2b5de8f + 1_600_000: 00000000060adfdfd7d488d4d510b5779cf35a3c50df7bcff941fbb6957be4d2 diff --git a/hathor/conf/unittests.yml b/hathor/conf/unittests.yml new file mode 100644 index 000000000..94328344f --- /dev/null +++ b/hathor/conf/unittests.yml @@ -0,0 +1,20 @@ +P2PKH_VERSION_BYTE: x28 +MULTISIG_VERSION_BYTE: x64 +NETWORK_NAME: unittests +BLOCKS_PER_HALVING: 120 +MIN_BLOCK_WEIGHT: 2 +MIN_TX_WEIGHT: 2 +MIN_SHARE_WEIGHT: 2 +MAX_TX_WEIGHT_DIFF: 25.0 +BLOCK_DIFFICULTY_N_BLOCKS: 20 +GENESIS_OUTPUT_SCRIPT: 76a914fd05059b6006249543b82f36876a17c73fd2267b88ac +GENESIS_BLOCK_NONCE: 0 +GENESIS_BLOCK_HASH: 339f47da87435842b0b1b528ecd9eac2495ce983b3e9c923a37e1befbe12c792 +GENESIS_TX1_NONCE: 6 +GENESIS_TX1_HASH: 16ba3dbe424c443e571b00840ca54b9ff4cff467e10b6a15536e718e2008f952 +GENESIS_TX2_NONCE: 2 +GENESIS_TX2_HASH: 33e14cb555a96967841dcbe0f95e9eab5810481d01de8f4f73afb8cce365e869 +REWARD_SPEND_MIN_BLOCKS: 10 +SLOW_ASSERTS: true +ENABLE_EVENT_QUEUE_FEATURE: true +MAX_TX_WEIGHT_DIFF_ACTIVATION: 0.0 diff --git a/hathor/utils/pydantic.py b/hathor/utils/pydantic.py index 6b5be9fa2..44c7d6766 100644 --- a/hathor/utils/pydantic.py +++ b/hathor/utils/pydantic.py @@ -15,8 +15,6 @@ from pydantic import BaseModel as PydanticBaseModel, Extra from pydantic.generics import GenericModel as PydanticGenericModel -from hathor.util import json_dumpb - class BaseModel(PydanticBaseModel): """Substitute for pydantic's BaseModel. @@ -28,6 +26,7 @@ class BaseModel(PydanticBaseModel): def json_dumpb(self) -> bytes: """Utility method for converting a Model into bytes representation of a JSON.""" + from hathor.util import json_dumpb return json_dumpb(self.dict()) class Config: diff --git a/hathor/utils/yaml.py b/hathor/utils/yaml.py new file mode 100644 index 000000000..ffaef57a1 --- /dev/null +++ b/hathor/utils/yaml.py @@ -0,0 +1,23 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict + +import yaml + + +def dict_from(*, filepath: str) -> Dict[str, Any]: + """Takes a filepath to a yaml file and returns a dictionary with its contents.""" + with open(filepath, 'r') as file: + return yaml.safe_load(file) diff --git a/poetry.lock b/poetry.lock index 7b1713470..dc098a245 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. +# This file is automatically @generated by Poetry and should not be changed by hand. [[package]] name = "aiohttp" @@ -1467,7 +1467,7 @@ files = [ name = "pyyaml" version = "6.0" description = "YAML parser and emitter for Python" -category = "dev" +category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1934,6 +1934,18 @@ files = [ [package.dependencies] types-cryptography = "*" +[[package]] +name = "types-pyyaml" +version = "6.0.12.9" +description = "Typing stubs for PyYAML" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "types-PyYAML-6.0.12.9.tar.gz", hash = "sha256:c51b1bd6d99ddf0aa2884a7a328810ebf70a4262c292195d3f4f9a0005f9eeb6"}, + {file = "types_PyYAML-6.0.12.9-py3-none-any.whl", hash = "sha256:5aed5aa66bd2d2e158f75dda22b059570ede988559f030cf294871d3b647e3e8"}, +] + [[package]] name = "types-requests" version = "2.28.11.4" @@ -2209,4 +2221,4 @@ sentry = ["sentry-sdk", "structlog-sentry"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "1032dbd9e22b32f9f3780d270fb03c9c23d26b8e6a54c0befdb005027c4caa1a" +content-hash = "7b9a6eb9086f7219946672192db025a947716f1584035ed650476b8c416ff385" diff --git a/pyproject.toml b/pyproject.toml index f0d03df73..9d3bb09cc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,6 +48,7 @@ yamllint = "~1.31.0" # stubs: types-requests = "=2.28.11.4" types-pyopenssl = "=22.1.0.2" +types-pyyaml = "=6.0.12.9" [tool.poetry.dependencies] python = ">=3.9,<4" @@ -77,6 +78,7 @@ sentry-sdk = {version = "^1.5.11", optional = true} structlog-sentry = {version = "^1.4.0", optional = true} hathorlib = "0.3.0" pydantic = "~1.10.6" +pyyaml = "~6.0" [tool.poetry.extras] sentry = ["sentry-sdk", "structlog-sentry"] diff --git a/tests/conftest.py b/tests/conftest.py index 7f43fdac7..475c5b59f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -4,7 +4,9 @@ from twisted.internet import asyncioreactor -os.environ['HATHOR_CONFIG_FILE'] = os.environ.get('HATHOR_TEST_CONFIG_FILE', 'hathor.conf.unittests') +from hathor.conf import UNITTESTS_SETTINGS_FILEPATH + +os.environ['HATHOR_CONFIG_YAML'] = os.environ.get('HATHOR_TEST_CONFIG_YAML', UNITTESTS_SETTINGS_FILEPATH) if sys.platform == 'win32': # See: https://twistedmatrix.com/documents/current/api/twisted.internet.asyncioreactor.AsyncioSelectorReactor.html diff --git a/tests/others/resources/invalid_hathor_settings_fixture.yml b/tests/others/resources/invalid_hathor_settings_fixture.yml new file mode 100644 index 000000000..515a11690 --- /dev/null +++ b/tests/others/resources/invalid_hathor_settings_fixture.yml @@ -0,0 +1,40 @@ +P2PKH_VERSION_BYTE: x28 +MULTISIG_VERSION_BYTE: 64 +NETWORK_NAME: testing +BOOTSTRAP_DNS: + - mainnet.hathor.network +ENABLE_PEER_WHITELIST: true +WHITELIST_URL: https://hathor-public-files.s3.amazonaws.com/whitelist_peer_ids + +GENESIS_OUTPUT_SCRIPT: 76a9147fd4ae0e4fb2d2854e76d359029d8078bb99649e88ac +GENESIS_TIMESTAMP: 1578075305 +GENESIS_BLOCK_NONCE: 2591358 +GENESIS_BLOCK_HASH: 000006cb93385b8b87a545a1cbb6197e6caff600c12cc12fc54250d39c8088fc +GENESIS_TX1_NONCE: 7715 +GENESIS_TX1_HASH: 0002d4d2a15def7604688e1878ab681142a7b155cbe52a6b4e031250ae96db0a +GENESIS_TX2_NONCE: 3769 +GENESIS_TX2_HASH: 0002ad8d1519daaddc8e1a37b14aac0b045129c01832281fb1c02d873c7abbf9 + +MIN_TX_WEIGHT_K: 0 +MIN_TX_WEIGHT_COEFFICIENT: 0 +MIN_TX_WEIGHT: 8 + +BLOCKS_PER_HALVING: 120 +MIN_BLOCK_WEIGHT: 2 +MIN_SHARE_WEIGHT: 2 +MAX_TX_WEIGHT_DIFF: 25.0 +BLOCK_DIFFICULTY_N_BLOCKS: 20 + +REWARD_SPEND_MIN_BLOCKS: 10 +SLOW_ASSERTS: true +ENABLE_EVENT_QUEUE_FEATURE: true +MAX_TX_WEIGHT_DIFF_ACTIVATION: 0.0 + +CHECKPOINTS: + - 0000000000001247073138556b4f60fff3ff6eec6521373ccee5a6526a7c10af + - 00000000000001bf13197340ae0807df2c16f4959da6054af822550d7b20e19e + + +SOFT_VOIDED_TX_IDS: + - gggggggggg + - 000000001980b413ad5b5c5152338093aecfb1f5a7563d4e7fef8fb240a50bb9 diff --git a/tests/others/resources/missing_hathor_settings_fixture.yml b/tests/others/resources/missing_hathor_settings_fixture.yml new file mode 100644 index 000000000..78ee76f16 --- /dev/null +++ b/tests/others/resources/missing_hathor_settings_fixture.yml @@ -0,0 +1,39 @@ +P2PKH_VERSION_BYTE: x28 +MULTISIG_VERSION_BYTE: '64' +BOOTSTRAP_DNS: + - mainnet.hathor.network +ENABLE_PEER_WHITELIST: true +WHITELIST_URL: https://hathor-public-files.s3.amazonaws.com/whitelist_peer_ids + +GENESIS_OUTPUT_SCRIPT: 76a9147fd4ae0e4fb2d2854e76d359029d8078bb99649e88ac +GENESIS_TIMESTAMP: 1578075305 +GENESIS_BLOCK_NONCE: 2591358 +GENESIS_BLOCK_HASH: 000006cb93385b8b87a545a1cbb6197e6caff600c12cc12fc54250d39c8088fc +GENESIS_TX1_NONCE: 7715 +GENESIS_TX1_HASH: 0002d4d2a15def7604688e1878ab681142a7b155cbe52a6b4e031250ae96db0a +GENESIS_TX2_NONCE: 3769 +GENESIS_TX2_HASH: 0002ad8d1519daaddc8e1a37b14aac0b045129c01832281fb1c02d873c7abbf9 + +MIN_TX_WEIGHT_K: 0 +MIN_TX_WEIGHT_COEFFICIENT: 0 +MIN_TX_WEIGHT: 8 + +BLOCKS_PER_HALVING: 120 +MIN_BLOCK_WEIGHT: 2 +MIN_SHARE_WEIGHT: 2 +MAX_TX_WEIGHT_DIFF: 25.0 +BLOCK_DIFFICULTY_N_BLOCKS: 20 + +REWARD_SPEND_MIN_BLOCKS: 10 +SLOW_ASSERTS: true +ENABLE_EVENT_QUEUE_FEATURE: true +MAX_TX_WEIGHT_DIFF_ACTIVATION: 0.0 + +CHECKPOINTS: + 100_000: 0000000000001247073138556b4f60fff3ff6eec6521373ccee5a6526a7c10af + 200_000: 00000000000001bf13197340ae0807df2c16f4959da6054af822550d7b20e19e + + +SOFT_VOIDED_TX_IDS: + - 0000000012a922a6887497bed9c41e5ed7dc7213cae107db295602168266cd02 + - 000000001980b413ad5b5c5152338093aecfb1f5a7563d4e7fef8fb240a50bb9 diff --git a/tests/others/resources/valid_hathor_settings_fixture.yml b/tests/others/resources/valid_hathor_settings_fixture.yml new file mode 100644 index 000000000..81eb5cf3c --- /dev/null +++ b/tests/others/resources/valid_hathor_settings_fixture.yml @@ -0,0 +1,40 @@ +P2PKH_VERSION_BYTE: x28 +MULTISIG_VERSION_BYTE: '64' +NETWORK_NAME: testing +BOOTSTRAP_DNS: + - mainnet.hathor.network +ENABLE_PEER_WHITELIST: true +WHITELIST_URL: https://hathor-public-files.s3.amazonaws.com/whitelist_peer_ids + +GENESIS_OUTPUT_SCRIPT: 76a9147fd4ae0e4fb2d2854e76d359029d8078bb99649e88ac +GENESIS_TIMESTAMP: 1578075305 +GENESIS_BLOCK_NONCE: 2591358 +GENESIS_BLOCK_HASH: 000006cb93385b8b87a545a1cbb6197e6caff600c12cc12fc54250d39c8088fc +GENESIS_TX1_NONCE: 7715 +GENESIS_TX1_HASH: 0002d4d2a15def7604688e1878ab681142a7b155cbe52a6b4e031250ae96db0a +GENESIS_TX2_NONCE: 3769 +GENESIS_TX2_HASH: 0002ad8d1519daaddc8e1a37b14aac0b045129c01832281fb1c02d873c7abbf9 + +MIN_TX_WEIGHT_K: 0 +MIN_TX_WEIGHT_COEFFICIENT: 0 +MIN_TX_WEIGHT: 8 + +BLOCKS_PER_HALVING: 120 +MIN_BLOCK_WEIGHT: 2 +MIN_SHARE_WEIGHT: 2 +MAX_TX_WEIGHT_DIFF: 25.0 +BLOCK_DIFFICULTY_N_BLOCKS: 20 + +REWARD_SPEND_MIN_BLOCKS: 10 +SLOW_ASSERTS: true +ENABLE_EVENT_QUEUE_FEATURE: true +MAX_TX_WEIGHT_DIFF_ACTIVATION: 0.0 + +CHECKPOINTS: + 100_000: 0000000000001247073138556b4f60fff3ff6eec6521373ccee5a6526a7c10af + 200_000: 00000000000001bf13197340ae0807df2c16f4959da6054af822550d7b20e19e + + +SOFT_VOIDED_TX_IDS: + - 0000000012a922a6887497bed9c41e5ed7dc7213cae107db295602168266cd02 + - 000000001980b413ad5b5c5152338093aecfb1f5a7563d4e7fef8fb240a50bb9 diff --git a/tests/others/test_hathor_settings.py b/tests/others/test_hathor_settings.py new file mode 100644 index 000000000..47ff53e97 --- /dev/null +++ b/tests/others/test_hathor_settings.py @@ -0,0 +1,107 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from pathlib import Path + +import pytest +from pydantic import ValidationError + +from hathor.checkpoint import Checkpoint +from hathor.conf import MAINNET_SETTINGS_FILEPATH, TESTNET_SETTINGS_FILEPATH, UNITTESTS_SETTINGS_FILEPATH +from hathor.conf.mainnet import SETTINGS as MAINNET_SETTINGS +from hathor.conf.settings import HathorSettings +from hathor.conf.testnet import SETTINGS as TESTNET_SETTINGS +from hathor.conf.unittests import SETTINGS as UNITTESTS_SETTINGS + +VALID_HATHOR_SETTINGS_FIXTURE_FILE = 'resources/valid_hathor_settings_fixture.yml' +INVALID_HATHOR_SETTINGS_FIXTURE_FILE = 'resources/invalid_hathor_settings_fixture.yml' +MISSING_HATHOR_SETTINGS_FIXTURE_FILE = 'resources/missing_hathor_settings_fixture.yml' + + +def test_valid_hathor_settings_from_yaml(hathor_settings): + parent_dir = Path(__file__).parent + settings_filepath = str(parent_dir / VALID_HATHOR_SETTINGS_FIXTURE_FILE) + + assert hathor_settings == HathorSettings.from_yaml(filepath=settings_filepath) + + +def test_invalid_hathor_settings_from_yaml(): + parent_dir = Path(__file__).parent + settings_filepath = str(parent_dir / INVALID_HATHOR_SETTINGS_FIXTURE_FILE) + + with pytest.raises(ValidationError): + HathorSettings.from_yaml(filepath=settings_filepath) + + +def test_missing_hathor_settings_from_yaml(): + parent_dir = Path(__file__).parent + settings_filepath = str(parent_dir / MISSING_HATHOR_SETTINGS_FIXTURE_FILE) + + with pytest.raises(TypeError): + HathorSettings.from_yaml(filepath=settings_filepath) + + +@pytest.fixture +def hathor_settings(): + return HathorSettings( + P2PKH_VERSION_BYTE=b'\x28', + MULTISIG_VERSION_BYTE=b'\x64', + NETWORK_NAME='testing', + BOOTSTRAP_DNS=['mainnet.hathor.network'], + ENABLE_PEER_WHITELIST=True, + WHITELIST_URL='https://hathor-public-files.s3.amazonaws.com/whitelist_peer_ids', + MIN_TX_WEIGHT_K=0, + MIN_TX_WEIGHT_COEFFICIENT=0, + MIN_TX_WEIGHT=8, + GENESIS_OUTPUT_SCRIPT=bytes.fromhex('76a9147fd4ae0e4fb2d2854e76d359029d8078bb99649e88ac'), + GENESIS_TIMESTAMP=1578075305, + GENESIS_BLOCK_NONCE=2591358, + GENESIS_BLOCK_HASH=bytes.fromhex('000006cb93385b8b87a545a1cbb6197e6caff600c12cc12fc54250d39c8088fc'), + GENESIS_TX1_NONCE=7715, + GENESIS_TX1_HASH=bytes.fromhex('0002d4d2a15def7604688e1878ab681142a7b155cbe52a6b4e031250ae96db0a'), + GENESIS_TX2_NONCE=3769, + GENESIS_TX2_HASH=bytes.fromhex('0002ad8d1519daaddc8e1a37b14aac0b045129c01832281fb1c02d873c7abbf9'), + CHECKPOINTS=[ + Checkpoint(100_000, bytes.fromhex('0000000000001247073138556b4f60fff3ff6eec6521373ccee5a6526a7c10af')), + Checkpoint(200_000, bytes.fromhex('00000000000001bf13197340ae0807df2c16f4959da6054af822550d7b20e19e')), + ], + SOFT_VOIDED_TX_IDS=[ + bytes.fromhex('0000000012a922a6887497bed9c41e5ed7dc7213cae107db295602168266cd02'), + bytes.fromhex('000000001980b413ad5b5c5152338093aecfb1f5a7563d4e7fef8fb240a50bb9'), + ], + REWARD_SPEND_MIN_BLOCKS=10, + SLOW_ASSERTS=True, + ENABLE_EVENT_QUEUE_FEATURE=True, + MAX_TX_WEIGHT_DIFF_ACTIVATION=0.0, + BLOCKS_PER_HALVING=120, + MIN_BLOCK_WEIGHT=2, + MIN_SHARE_WEIGHT=2, + MAX_TX_WEIGHT_DIFF=25.0, + BLOCK_DIFFICULTY_N_BLOCKS=20, + ) + + +# TODO: Tests below are temporary while settings via python coexist with settings via yaml, just to make sure +# the conversion was made correctly. After python settings are removed, this file can be removed too. + + +def test_mainnet_settings_migration(): + assert MAINNET_SETTINGS == HathorSettings.from_yaml(filepath=MAINNET_SETTINGS_FILEPATH) + + +def test_testnet_settings_migration(): + assert TESTNET_SETTINGS == HathorSettings.from_yaml(filepath=TESTNET_SETTINGS_FILEPATH) + + +def test_unittests_settings_migration(): + assert UNITTESTS_SETTINGS == HathorSettings.from_yaml(filepath=UNITTESTS_SETTINGS_FILEPATH) From 33fb275029595b12f9da0ee86bae43e3b4187444 Mon Sep 17 00:00:00 2001 From: Marcelo Salhab Brogliato Date: Thu, 27 Apr 2023 17:06:19 -0500 Subject: [PATCH 06/24] feat: Add safeguards for partially validated transactions (part 1/2) --- hathor/conf/settings.py | 5 +- hathor/consensus/consensus.py | 5 ++ hathor/graphviz.py | 4 ++ hathor/indexes/base_index.py | 6 ++ hathor/indexes/manager.py | 8 +-- hathor/indexes/memory_height_index.py | 9 ++- hathor/indexes/rocksdb_deps_index.py | 18 ++++++ hathor/simulator/fake_connection.py | 11 +++- hathor/simulator/simulator.py | 13 +++- hathor/simulator/trigger.py | 17 +++++ hathor/simulator/tx_generator.py | 2 + hathor/transaction/base_transaction.py | 62 +++++++++++++++++-- hathor/transaction/storage/memory_storage.py | 1 + .../storage/transaction_storage.py | 25 +++++++- hathor/transaction/transaction.py | 2 + hathor/transaction/transaction_metadata.py | 30 +++++++-- tests/consensus/test_consensus.py | 4 +- tests/consensus/test_soft_voided.py | 5 +- tests/consensus/test_soft_voided3.py | 5 +- tests/consensus/test_soft_voided4.py | 5 +- tests/p2p/test_protocol.py | 4 +- tests/resources/transaction/test_tx.py | 8 +++ tests/tx/test_cache_storage.py | 2 + tests/tx/test_tx.py | 2 + tests/tx/test_tx_storage.py | 15 +++++ tests/wallet/test_wallet.py | 4 ++ tests/wallet/test_wallet_hd.py | 4 ++ 27 files changed, 244 insertions(+), 32 deletions(-) diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index 5a287a6d7..9c2ce0af1 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -366,12 +366,15 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: # List of soft voided transaction. SOFT_VOIDED_TX_IDS: List[bytes] = [] - # Identifier used in metadata's voided_by. + # Identifier used in metadata's voided_by to mark a tx as soft-voided. SOFT_VOIDED_ID: bytes = b'tx-non-grata' # Identifier used in metadata's voided_by when an unexpected exception occurs at consensus. CONSENSUS_FAIL_ID: bytes = b'consensus-fail' + # Identifier used in metadata's voided_by to mark a tx as partially validated. + PARTIALLY_VALIDATED_ID: bytes = b'pending-validation' + ENABLE_EVENT_QUEUE_FEATURE: bool = False EVENT_API_DEFAULT_BATCH_SIZE: int = 100 diff --git a/hathor/consensus/consensus.py b/hathor/consensus/consensus.py index 045396cf6..fcb030201 100644 --- a/hathor/consensus/consensus.py +++ b/hathor/consensus/consensus.py @@ -83,6 +83,11 @@ def _unsafe_update(self, base: BaseTransaction) -> None: """Run a consensus update with its own context, indexes will be updated accordingly.""" from hathor.transaction import Block, Transaction + # XXX: first make sure we can run the consensus update on this tx: + meta = base.get_metadata() + assert meta.voided_by is None or (settings.PARTIALLY_VALIDATED_ID not in meta.voided_by) + assert meta.validation.is_fully_connected() + # this context instance will live only while this update is running context = self.create_context() diff --git a/hathor/graphviz.py b/hathor/graphviz.py index 0f7933c6b..a62fcf210 100644 --- a/hathor/graphviz.py +++ b/hathor/graphviz.py @@ -52,6 +52,7 @@ def __init__(self, storage: TransactionStorage, include_funds: bool = False, self.voided_attrs = dict(style='dashed,filled', penwidth='0.25', fillcolor='#BDC3C7') self.soft_voided_attrs = dict(style='dashed,filled', penwidth='0.25', fillcolor='#CCCCFF') self.conflict_attrs = dict(style='dashed,filled', penwidth='2.0', fillcolor='#BDC3C7') + self.not_fully_validated_attrs = dict(style='dashed,filled', penwidth='0.25', fillcolor='#F9FFAB') # Labels self.labels: Dict[bytes, str] = {} @@ -96,6 +97,9 @@ def get_node_attrs(self, tx: BaseTransaction) -> Dict[str, str]: else: node_attrs.update(self.voided_attrs) + if not meta.validation.is_fully_connected(): + node_attrs.update(self.not_fully_validated_attrs) + return node_attrs def get_edge_attrs(self, tx: BaseTransaction, neighbor_hash: bytes) -> Dict[str, str]: diff --git a/hathor/indexes/base_index.py b/hathor/indexes/base_index.py index b68fc8b17..6d452f6d3 100644 --- a/hathor/indexes/base_index.py +++ b/hathor/indexes/base_index.py @@ -15,12 +15,16 @@ from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Optional +from structlog import get_logger + from hathor.indexes.scope import Scope from hathor.transaction.base_transaction import BaseTransaction if TYPE_CHECKING: # pragma: no cover from hathor.indexes.manager import IndexesManager +logger = get_logger() + class BaseIndex(ABC): """ All indexes must inherit from this index. @@ -28,6 +32,8 @@ class BaseIndex(ABC): This class exists so we can interact with indexes without knowing anything specific to its implemented. It was created to generalize how we initialize indexes and keep track of which ones are up-to-date. """ + def __init__(self) -> None: + self.log = logger.new() def init_start(self, indexes_manager: 'IndexesManager') -> None: """ This method will always be called when starting the index manager, regardless of initialization state. diff --git a/hathor/indexes/manager.py b/hathor/indexes/manager.py index d6899f6a2..98aca2440 100644 --- a/hathor/indexes/manager.py +++ b/hathor/indexes/manager.py @@ -319,12 +319,12 @@ def enable_utxo_index(self) -> None: if self.utxo is None: self.utxo = MemoryUtxoIndex() - def enable_deps_index(self) -> None: + def enable_mempool_index(self) -> None: from hathor.indexes.memory_mempool_tips_index import MemoryMempoolTipsIndex if self.mempool_tips is None: self.mempool_tips = MemoryMempoolTipsIndex() - def enable_mempool_index(self) -> None: + def enable_deps_index(self) -> None: from hathor.indexes.memory_deps_index import MemoryDepsIndex if self.deps is None: self.deps = MemoryDepsIndex() @@ -373,13 +373,13 @@ def enable_utxo_index(self) -> None: if self.utxo is None: self.utxo = RocksDBUtxoIndex(self._db) - def enable_deps_index(self) -> None: + def enable_mempool_index(self) -> None: from hathor.indexes.memory_mempool_tips_index import MemoryMempoolTipsIndex if self.mempool_tips is None: # XXX: use of RocksDBMempoolTipsIndex is very slow and was suspended self.mempool_tips = MemoryMempoolTipsIndex() - def enable_mempool_index(self) -> None: + def enable_deps_index(self) -> None: from hathor.indexes.memory_deps_index import MemoryDepsIndex if self.deps is None: # XXX: use of RocksDBDepsIndex is currently suspended until it is fixed diff --git a/hathor/indexes/memory_height_index.py b/hathor/indexes/memory_height_index.py index 5b776881c..5bdb62a25 100644 --- a/hathor/indexes/memory_height_index.py +++ b/hathor/indexes/memory_height_index.py @@ -24,6 +24,7 @@ class MemoryHeightIndex(HeightIndex): _index: List[IndexEntry] def __init__(self) -> None: + super().__init__() self.force_clear() def get_db_name(self) -> Optional[str]: @@ -37,11 +38,17 @@ def _add(self, height: int, block_hash: bytes, timestamp: int, *, can_reorg: boo raise ValueError(f'parent hash required (current height: {len(self._index)}, new height: {height})') elif len(self._index) == height: self._index.append(IndexEntry(block_hash, timestamp)) - elif self._index[height] != block_hash: + elif self._index[height].hash != block_hash: if can_reorg: del self._index[height:] self._index.append(IndexEntry(block_hash, timestamp)) else: + self.log.error( + 'adding would cause a re-org', + height=height, + current_block=self._index[height].hash.hex(), + new_block=block_hash.hex() + ) raise ValueError('adding would cause a re-org, use can_reorg=True to accept re-orgs') else: # nothing to do (there are more blocks, but the block at height currently matches the added block) diff --git a/hathor/indexes/rocksdb_deps_index.py b/hathor/indexes/rocksdb_deps_index.py index 55169ba50..d5e40b788 100644 --- a/hathor/indexes/rocksdb_deps_index.py +++ b/hathor/indexes/rocksdb_deps_index.py @@ -194,6 +194,9 @@ def add_tx(self, tx: BaseTransaction, partial: bool = True) -> None: batch = rocksdb.WriteBatch() validation = tx.get_metadata().validation if validation.is_fully_connected(): + # discover if new txs are ready because of this tx + self._update_new_deps_ready(tx, batch) + # finally remove from rev deps self._del_from_deps(tx, batch) elif not partial: raise ValueError('partial=False will only accept fully connected transactions') @@ -208,6 +211,18 @@ def del_tx(self, tx: BaseTransaction) -> None: self._del_from_deps(tx, batch) self._db.write(batch) + def _update_new_deps_ready(self, tx: BaseTransaction, batch: 'rocksdb.WriteBatch') -> None: + """Go over the reverse dependencies of tx and check if any of them are now ready to be validated. + + This is also idempotent. + """ + assert tx.hash is not None + assert tx.storage is not None + for candidate_hash in self._iter_rev_deps_of(tx.hash): + candidate_tx = tx.storage.get_transaction(candidate_hash) + if candidate_tx.is_ready_for_validation(): + self._add_ready(candidate_hash, batch) + def _add_deps(self, tx: BaseTransaction, batch: 'rocksdb.WriteBatch') -> None: assert tx.hash is not None for dep in tx.get_all_dependencies(): @@ -230,6 +245,9 @@ def _add_needed(self, tx: BaseTransaction, batch: 'rocksdb.WriteBatch') -> None: self.log.debug('tx parent is needed', tx=tx.hash.hex(), tx_dep=tx_dep_hash.hex()) batch.put((self._cf, self._to_key_needed(tx_dep_hash)), self._to_value_needed(height, tx.hash)) + # also, remove the given transaction from needed, because we already have it + batch.delete((self._cf, self._to_key_needed(tx.hash))) + def remove_ready_for_validation(self, tx: bytes) -> None: self._db.delete((self._cf, self._to_key_ready(tx))) diff --git a/hathor/simulator/fake_connection.py b/hathor/simulator/fake_connection.py index f671a2a3b..5f3877e97 100644 --- a/hathor/simulator/fake_connection.py +++ b/hathor/simulator/fake_connection.py @@ -13,7 +13,7 @@ # limitations under the License. from collections import deque -from typing import TYPE_CHECKING, Deque +from typing import TYPE_CHECKING, Deque, Optional from OpenSSL.crypto import X509 from structlog import get_logger @@ -157,9 +157,16 @@ def run_one_step(self, debug=False, force=False): return True - def run_until_complete(self, debug=False, force=False): + def run_until_empty(self, max_steps: Optional[int] = None, debug: bool = False, force: bool = False) -> None: + """ Step until the connection reports as empty, optionally raise an assert if it takes more than `max_steps`. + """ + steps = 0 while not self.is_empty(): + steps += 1 + if max_steps is not None and steps > max_steps: + raise AssertionError('took more steps than expected') self.run_one_step(debug=debug, force=force) + self.log.debug('conn empty', steps=steps) def _deliver_message(self, proto, data, debug=False): proto.dataReceived(data) diff --git a/hathor/simulator/simulator.py b/hathor/simulator/simulator.py index 6831c3bcf..71a34b522 100644 --- a/hathor/simulator/simulator.py +++ b/hathor/simulator/simulator.py @@ -239,16 +239,27 @@ def _run(self, interval: float, step: float, status_interval: float) -> Generato def run_until_complete(self, max_interval: float, + min_interval: float = 0.0, step: float = DEFAULT_STEP_INTERVAL, status_interval: float = DEFAULT_STATUS_INTERVAL) -> bool: """ Will stop when all peers have synced/errored (-> True), or when max_interval is elapsed (-> False). + Optionally keep running for at least `min_interval` ignoring the stop condition. + Make sure miners/tx_generators are stopped or this will almost certainly run until max_interval. """ assert self._started + steps = 0 + interval = 0.0 + initial = self._clock.seconds() for _ in self._run(max_interval, step, status_interval): - if all(not conn.can_step() for conn in self._connections): + steps += 1 + latest_time = self._clock.seconds() + interval = latest_time - initial + if interval > min_interval and all(not conn.can_step() for conn in self._connections): + self.log.debug('run_until_complete: all done', steps=steps, dt=interval) return True + self.log.debug('run_until_complete: max steps exceeded', steps=steps, dt=interval) return False def run(self, diff --git a/hathor/simulator/trigger.py b/hathor/simulator/trigger.py index 32a758bd0..2df07a457 100644 --- a/hathor/simulator/trigger.py +++ b/hathor/simulator/trigger.py @@ -17,6 +17,7 @@ if TYPE_CHECKING: from hathor.simulator.miner import AbstractMiner + from hathor.simulator.tx_generator import RandomTransactionGenerator from hathor.wallet import BaseWallet @@ -54,3 +55,19 @@ def __init__(self, wallet: 'BaseWallet', token_uid: bytes, minimum_balance: int) def should_stop(self) -> bool: balance = self.wallet.balance[self.token_uid].available return balance >= self.minimum_balance + + +class StopAfterNTransactions(Trigger): + """Stop the simulation after N transactions are found.""" + def __init__(self, tx_generator: 'RandomTransactionGenerator', *, quantity: int) -> None: + self.tx_generator = tx_generator + self.quantity = quantity + self.reset() + + def reset(self) -> None: + """Reset the counter, so this trigger can be reused.""" + self.initial_counter = self.tx_generator.transactions_found + + def should_stop(self) -> bool: + diff = self.tx_generator.transactions_found - self.initial_counter + return diff >= self.quantity diff --git a/hathor/simulator/tx_generator.py b/hathor/simulator/tx_generator.py index f7bd337aa..aa14d8888 100644 --- a/hathor/simulator/tx_generator.py +++ b/hathor/simulator/tx_generator.py @@ -62,6 +62,7 @@ def __init__(self, manager: 'HathorManager', rng: Random, *, # Most recent transactions generated here. # The lowest index has the most recent transaction. + self.transactions_found: int = 0 self.latest_transactions: Deque[Transaction] = deque() self.double_spending_only = False @@ -87,6 +88,7 @@ def schedule_next_transaction(self): if self.tx: ret = self.manager.propagate_tx(self.tx, fails_silently=False) assert ret is True + self.transactions_found += 1 self.latest_transactions.appendleft(self.tx.hash) if len(self.latest_transactions) > self.MAX_LATEST_TRANSACTIONS_LEN: self.latest_transactions.pop() diff --git a/hathor/transaction/base_transaction.py b/hathor/transaction/base_transaction.py index e93defc3b..c983ef739 100644 --- a/hathor/transaction/base_transaction.py +++ b/hathor/transaction/base_transaction.py @@ -494,6 +494,7 @@ def validate_checkpoint(self, checkpoints: List[Checkpoint]) -> bool: self.verify_checkpoint(checkpoints) meta.validation = ValidationState.CHECKPOINT + self.mark_partially_validated() return True def validate_basic(self, skip_block_weight_verification: bool = False) -> bool: @@ -508,6 +509,7 @@ def validate_basic(self, skip_block_weight_verification: bool = False) -> bool: self.verify_basic(skip_block_weight_verification=skip_block_weight_verification) meta.validation = ValidationState.BASIC + self.mark_partially_validated() return True def validate_full(self, skip_block_weight_verification: bool = False, sync_checkpoints: bool = False, @@ -522,7 +524,10 @@ def validate_full(self, skip_block_weight_verification: bool = False, sync_check # skip full validation when it is a checkpoint if meta.validation.is_checkpoint(): meta.validation = ValidationState.CHECKPOINT_FULL + # at last, remove the partially validated mark + self.unmark_partially_validated() return True + # XXX: in some cases it might be possible that this transaction is verified by a checkpoint but we went # directly into trying a full validation so we should check it here to make sure the validation states # ends up being CHECKPOINT_FULL instead of FULL @@ -535,8 +540,31 @@ def validate_full(self, skip_block_weight_verification: bool = False, sync_check meta.validation = ValidationState.CHECKPOINT_FULL else: meta.validation = ValidationState.FULL + + # at last, remove the partially validated mark + self.unmark_partially_validated() return True + def mark_partially_validated(self) -> None: + """ This function is used to add the partially-validated mark from the voided-by metadata. + + It is idempotent: calling it multiple time has the same effect as calling it once. But it must only be called + when the validation state is *NOT* "fully connected", otherwise it'll raise an assertion error. + """ + tx_meta = self.get_metadata() + assert not tx_meta.validation.is_fully_connected() + tx_meta.add_voided_by(settings.PARTIALLY_VALIDATED_ID) + + def unmark_partially_validated(self) -> None: + """ This function is used to remove the partially-validated mark from the voided-by metadata. + + It is idempotent: calling it multiple time has the same effect as calling it once. But it must only be called + when the validation state is "fully connected", otherwise it'll raise an assertion error. + """ + tx_meta = self.get_metadata() + assert tx_meta.validation.is_fully_connected() + tx_meta.del_voided_by(settings.PARTIALLY_VALIDATED_ID) + @abstractmethod def verify_checkpoint(self, checkpoints: List[Checkpoint]) -> None: """Check that this tx is a known checkpoint or is parent of another checkpoint-valid tx/block. @@ -702,6 +730,9 @@ def resolve(self, update_time: bool = True) -> bool: if hash_bytes: self.hash = hash_bytes + metadata = getattr(self, '_metadata', None) + if metadata is not None and metadata.hash is not None: + metadata.hash = hash_bytes return True else: return False @@ -850,12 +881,18 @@ def reset_metadata(self) -> None: """ Reset transaction's metadata. It is used when a node is initializing and recalculating all metadata. """ + from hathor.transaction.transaction_metadata import ValidationState assert self.storage is not None score = self.weight if self.is_genesis else 0 - self._metadata = TransactionMetadata(hash=self.hash, score=score, - accumulated_weight=self.weight, - height=self.calculate_height(), - min_height=self.calculate_min_height()) + self._metadata = TransactionMetadata(hash=self.hash, + score=score, + accumulated_weight=self.weight) + if self.is_genesis: + self._metadata.validation = ValidationState.CHECKPOINT_FULL + self._metadata.voided_by = set() + else: + self._metadata.validation = ValidationState.INITIAL + self._metadata.voided_by = {settings.PARTIALLY_VALIDATED_ID} self._metadata._tx_ref = weakref.ref(self) self.storage.save_transaction(self, only_metadata=True) @@ -910,12 +947,18 @@ def update_initial_metadata(self, *, save: bool = True) -> None: It is called when a new transaction/block is received by HathorManager. """ + self._update_height_metadata() self._update_parents_children_metadata() self._update_reward_lock_metadata() if save: assert self.storage is not None self.storage.save_transaction(self, only_metadata=True) + def _update_height_metadata(self) -> None: + """Update the vertice height metadata.""" + meta = self.get_metadata() + meta.height = self.calculate_height() + def _update_reward_lock_metadata(self) -> None: """Update the txs/block min_height metadata.""" metadata = self.get_metadata() @@ -1057,6 +1100,17 @@ def clone(self) -> 'BaseTransaction': def get_token_uid(self, index: int) -> bytes: raise NotImplementedError + def is_ready_for_validation(self) -> bool: + """Check whether the transaction is ready to be validated: all dependencies exist and are fully connected.""" + assert self.storage is not None + for dep_hash in self.get_all_dependencies(): + dep_meta = self.storage.get_metadata(dep_hash) + if dep_meta is None: + return False + if not dep_meta.validation.is_fully_connected(): + return False + return True + class TxInput: _tx: BaseTransaction # XXX: used for caching on hathor.transaction.Transaction.get_spent_tx diff --git a/hathor/transaction/storage/memory_storage.py b/hathor/transaction/storage/memory_storage.py index 8d57eb1ae..af07758f9 100644 --- a/hathor/transaction/storage/memory_storage.py +++ b/hathor/transaction/storage/memory_storage.py @@ -85,6 +85,7 @@ def _get_transaction(self, hash_bytes: bytes) -> BaseTransaction: tx = self._clone(self.transactions[hash_bytes]) if hash_bytes in self.metadata: tx._metadata = self._clone(self.metadata[hash_bytes]) + assert tx._metadata is not None return tx else: raise TransactionDoesNotExist(hash_bytes.hex()) diff --git a/hathor/transaction/storage/transaction_storage.py b/hathor/transaction/storage/transaction_storage.py index d74bd5ae7..476557102 100644 --- a/hathor/transaction/storage/transaction_storage.py +++ b/hathor/transaction/storage/transaction_storage.py @@ -351,10 +351,26 @@ def save_transaction(self: 'TransactionStorage', tx: BaseTransaction, *, only_me """ assert tx.hash is not None meta = tx.get_metadata() + self.pre_save_validation(tx, meta) - # XXX: we can only add to cache and publish txs that are fully connected (which also implies it's valid) - if not meta.validation.is_fully_connected(): - return + def pre_save_validation(self, tx: BaseTransaction, tx_meta: TransactionMetadata) -> None: + """ Must be run before every save, only raises AssertionError. + + A failure means there is a bug in the code that allowed the condition to reach the "save" code. This is a last + second measure to prevent persisting a bad transaction/metadata. + + This method receives the transaction AND the metadata in order to avoid calling ".get_metadata()" which could + potentially create a fresh metadata. + """ + assert tx.hash is not None + assert tx_meta.hash is not None + assert tx.hash == tx_meta.hash, f'{tx.hash.hex()} != {tx_meta.hash.hex()}' + voided_by = tx_meta.get_frozen_voided_by() + # XXX: PARTIALLY_VALIDATED_ID must be included if the tx is fully connected and must not be included otherwise + has_partially_validated_marker = settings.PARTIALLY_VALIDATED_ID in voided_by + validation_is_fully_connected = tx_meta.validation.is_fully_connected() + assert (not has_partially_validated_marker) == validation_is_fully_connected, \ + 'Inconsistent ValidationState and voided_by' @abstractmethod def remove_transaction(self, tx: BaseTransaction) -> None: @@ -685,6 +701,7 @@ def topological_iterator(self) -> Iterator[BaseTransaction]: assert self.indexes is not None if self._always_use_topological_dfs: + self.log.debug('force choosing DFS iterator') return self._topological_sort_dfs() db_last_started_at = self.get_last_started_at() @@ -697,8 +714,10 @@ def topological_iterator(self) -> Iterator[BaseTransaction]: iter_tx: Iterator[BaseTransaction] if can_use_timestamp_index: + self.log.debug('choosing timestamp-index iterator') iter_tx = self._topological_sort_timestamp_index() else: + self.log.debug('choosing metadata iterator') iter_tx = self._topological_sort_metadata() return iter_tx diff --git a/hathor/transaction/transaction.py b/hathor/transaction/transaction.py index 6393c8690..4b32bfbf4 100644 --- a/hathor/transaction/transaction.py +++ b/hathor/transaction/transaction.py @@ -275,6 +275,8 @@ def verify_basic(self, skip_block_weight_verification: bool = False) -> None: def verify_checkpoint(self, checkpoints: List[Checkpoint]) -> None: assert self.storage is not None + if self.is_genesis: + return meta = self.get_metadata() # at least one child must be checkpoint validated for child_tx in map(self.storage.get_transaction, meta.children): diff --git a/hathor/transaction/transaction_metadata.py b/hathor/transaction/transaction_metadata.py index 15bb6ac6d..effb32a45 100644 --- a/hathor/transaction/transaction_metadata.py +++ b/hathor/transaction/transaction_metadata.py @@ -14,7 +14,7 @@ from collections import defaultdict from enum import IntEnum, unique -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set +from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set from hathor.util import practically_equal @@ -138,12 +138,11 @@ def __init__(self, spent_outputs: Optional[Dict[int, List[bytes]]] = None, hash: # Hash of the transactions that conflicts with this transaction. self.conflict_with = None - # Hash of the transactions that void this transaction. - # - # When a transaction has a conflict and is voided because of this conflict, its own hash is added to + # - Hashes of the transactions that void this transaction. + # - When a transaction has a conflict and is voided because of this conflict, its own hash is added to # voided_by. The logic is that the transaction is voiding itself. - # - # When a block is voided, its own hash is added to voided_by. + # - When a block is voided, its own hash is added to voided_by. + # - When it is constructed it will be voided by "partially validated" until it is validated self.voided_by = None self._last_voided_by_hash = None @@ -351,3 +350,22 @@ def add_voided_by(self, item: bytes) -> None: self.voided_by = {item} else: self.voided_by.add(item) + + def del_voided_by(self, item: bytes) -> None: + """Deletes `item` from `self.voided_by`. Note that this method does not save the change.""" + if self.voided_by is not None: + self.voided_by.discard(item) + if not self.voided_by: + self.voided_by = None + + def get_frozen_voided_by(self) -> FrozenSet[bytes]: + """Return a frozen set copy of voided_by.""" + if self.voided_by is None: + return frozenset() + return frozenset(self.voided_by) + + def is_in_voided_by(self, item: bytes) -> bool: + """Return True if item exists in voided_by.""" + if self.voided_by is None: + return False + return item in self.voided_by diff --git a/tests/consensus/test_consensus.py b/tests/consensus/test_consensus.py index d2c5d50f9..a2d64ce22 100644 --- a/tests/consensus/test_consensus.py +++ b/tests/consensus/test_consensus.py @@ -120,8 +120,8 @@ def test_dont_revert_block_low_weight(self): manager = self.create_peer('testnet', tx_storage=self.tx_storage) # Mine a few blocks in a row with no transaction but the genesis - blocks = add_new_blocks(manager, 3, advance_clock=15) - add_blocks_unlock_reward(manager) + add_new_blocks(manager, 3, advance_clock=15) + blocks = add_blocks_unlock_reward(manager) # Add some transactions between blocks add_new_transactions(manager, 5, advance_clock=15) diff --git a/tests/consensus/test_soft_voided.py b/tests/consensus/test_soft_voided.py index 2a952fdc3..5449287c1 100644 --- a/tests/consensus/test_soft_voided.py +++ b/tests/consensus/test_soft_voided.py @@ -1,6 +1,7 @@ from hathor.conf import HathorSettings from hathor.graphviz import GraphvizVisualizer from hathor.simulator import FakeConnection, Simulator +from hathor.simulator.trigger import StopAfterNTransactions from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import add_custom_tx, gen_new_tx @@ -43,8 +44,8 @@ def _run_test(self, simulator, soft_voided_tx_ids): gen_tx2 = simulator.create_tx_generator(manager2, rate=10 / 60., hashpower=1e6, ignore_no_funds=True) gen_tx2.start() - while not gen_tx2.latest_transactions: - simulator.run(600) + trigger = StopAfterNTransactions(gen_tx2, quantity=1) + self.assertTrue(simulator.run(7200, trigger=trigger)) yield gen_tx2 diff --git a/tests/consensus/test_soft_voided3.py b/tests/consensus/test_soft_voided3.py index b6b7be415..721e42d47 100644 --- a/tests/consensus/test_soft_voided3.py +++ b/tests/consensus/test_soft_voided3.py @@ -1,6 +1,7 @@ from hathor.conf import HathorSettings from hathor.graphviz import GraphvizVisualizer from hathor.simulator import FakeConnection, Simulator +from hathor.simulator.trigger import StopAfterNTransactions from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import add_custom_tx, gen_custom_tx, gen_new_tx @@ -44,8 +45,8 @@ def _run_test(self, simulator, soft_voided_tx_ids): gen_tx2 = simulator.create_tx_generator(manager2, rate=10 / 60., hashpower=1e6, ignore_no_funds=True) gen_tx2.start() - while not gen_tx2.latest_transactions: - simulator.run(300) + trigger = StopAfterNTransactions(gen_tx2, quantity=1) + self.assertTrue(simulator.run(7200, trigger=trigger)) yield gen_tx2 diff --git a/tests/consensus/test_soft_voided4.py b/tests/consensus/test_soft_voided4.py index 0d557d8f0..57a9cd4c2 100644 --- a/tests/consensus/test_soft_voided4.py +++ b/tests/consensus/test_soft_voided4.py @@ -1,6 +1,7 @@ from hathor.conf import HathorSettings from hathor.graphviz import GraphvizVisualizer from hathor.simulator import FakeConnection, Simulator +from hathor.simulator.trigger import StopAfterNTransactions from tests import unittest from tests.simulation.base import SimulatorTestCase from tests.utils import add_custom_tx, gen_new_double_spending @@ -38,8 +39,8 @@ def _run_test(self, simulator, soft_voided_tx_ids): gen_tx2 = simulator.create_tx_generator(manager2, rate=10 / 60., hashpower=1e6, ignore_no_funds=True) gen_tx2.start() - while not gen_tx2.latest_transactions: - simulator.run(600) + trigger = StopAfterNTransactions(gen_tx2, quantity=1) + self.assertTrue(simulator.run(7200, trigger=trigger)) yield gen_tx2 diff --git a/tests/p2p/test_protocol.py b/tests/p2p/test_protocol.py index 990056f60..c661545b1 100644 --- a/tests/p2p/test_protocol.py +++ b/tests/p2p/test_protocol.py @@ -215,8 +215,8 @@ def test_invalid_same_peer_id2(self): self.conn.run_one_step() conn.run_one_step() # continue until messages stop - self.conn.run_until_complete() - conn.run_until_complete() + self.conn.run_until_empty() + conn.run_until_empty() self.run_to_completion() # one of the peers will close the connection. We don't know which on, as it depends # on the peer ids diff --git a/tests/resources/transaction/test_tx.py b/tests/resources/transaction/test_tx.py index b94028760..1fe8bbc23 100644 --- a/tests/resources/transaction/test_tx.py +++ b/tests/resources/transaction/test_tx.py @@ -3,6 +3,7 @@ from hathor.transaction import Transaction from hathor.transaction.resources import TransactionResource from hathor.transaction.token_creation_tx import TokenCreationTransaction +from hathor.transaction.transaction_metadata import ValidationState from tests import unittest from tests.resources.base_resource import StubSite, _BaseResourceTest from tests.utils import add_blocks_unlock_reward, add_new_blocks, add_new_transactions @@ -84,6 +85,7 @@ def test_get_one_known_tx(self): '2dc703120a77192fc16eda9ed22e1b88ac40200000218def416095b08602003d3c40fb04737e1a2a848cfd2592490a71cd' '0248b9e7d6a626f45dec86975b00f4dd53f84f1f0091125250b044e49023fbbd0f74f6093cdd2226fdff3e09a1000002be') tx = Transaction.create_from_struct(bytes.fromhex(tx_hex), self.manager.tx_storage) + tx.get_metadata().validation = ValidationState.FULL self.manager.tx_storage.save_transaction(tx) tx_parent1_hex = ('0001010102001c382847d8440d05da95420bee2ebeb32bc437f82a9ae47b0745c8a29a7b0d001c382847d844' @@ -95,6 +97,7 @@ def test_get_one_known_tx(self): '5250b044e49023fbbd0f74f6093cdd2226fdff3e09a1001f16fe62e3433bcc74b262c11a1fa94fcb38484f4d' '8fb080f53a0c9c57ddb000000120') tx_parent1 = Transaction.create_from_struct(bytes.fromhex(tx_parent1_hex), self.manager.tx_storage) + tx_parent1.get_metadata().validation = ValidationState.FULL self.manager.tx_storage.save_transaction(tx_parent1) tx_parent2_hex = ('0001000103001f16fe62e3433bcc74b262c11a1fa94fcb38484f4d8fb080f53a0c9c57ddb001006946304402' @@ -106,6 +109,7 @@ def test_get_one_known_tx(self): '62e3433bcc74b262c11a1fa94fcb38484f4d8fb080f53a0c9c57ddb00065329457d13410ac711318bd941e16' 'd57709926b76e64763bf19c3f13eeac30000016d') tx_parent2 = Transaction.create_from_struct(bytes.fromhex(tx_parent2_hex), self.manager.tx_storage) + tx_parent2.get_metadata().validation = ValidationState.FULL self.manager.tx_storage.save_transaction(tx_parent2) tx_input_hex = ('0001010203007231eee3cb6160d95172a409d634d0866eafc8775f5729fff6a61e7850aba500b3ab76c5337b55' @@ -120,6 +124,7 @@ def test_get_one_known_tx(self): '5e95ac369b31f46188ac40200000218def416082eba802000e4e54b2922c1fa34b5d427f1e96885612e28673ac' 'cfaf6e7ceb2ba91c9c84009c8174d4a46ebcc789d1989e3dec5b68cffeef239fd8cf86ef62728e2eacee000001b6') tx_input = Transaction.create_from_struct(bytes.fromhex(tx_input_hex), self.manager.tx_storage) + tx_input.get_metadata().validation = ValidationState.FULL self.manager.tx_storage.save_transaction(tx_input) # XXX: this is completely dependant on MemoryTokensIndex implementation, hence use_memory_storage=True @@ -186,6 +191,7 @@ def test_get_one_known_tx_with_authority(self): '7851af043c11e19f28675b010e8cf4d8da3278f126d2429490a804a7fb2c000023b318c91dcfd4b967b205dc938f9f5e2fd' '5114256caacfb8f6dd13db33000020393') tx = Transaction.create_from_struct(bytes.fromhex(tx_hex), self.manager.tx_storage) + tx.get_metadata().validation = ValidationState.FULL self.manager.tx_storage.save_transaction(tx) tx_parent1_hex = ('0001010203000023b318c91dcfd4b967b205dc938f9f5e2fd5114256caacfb8f6dd13db330000023b318c91dcfd' @@ -200,6 +206,7 @@ def test_get_one_known_tx_with_authority(self): '08ef288ac40311513e4fef9d161087be202000023b318c91dcfd4b967b205dc938f9f5e2fd5114256caacfb8f6d' 'd13db3300038c3d3b69ce90bb88c0c4d6a87b9f0c349e5b10c9b7ce6714f996e512ac16400021261') tx_parent1 = Transaction.create_from_struct(bytes.fromhex(tx_parent1_hex), self.manager.tx_storage) + tx_parent1.get_metadata().validation = ValidationState.FULL self.manager.tx_storage.save_transaction(tx_parent1) tx_parent2_hex = ('000201040000476810205cb3625d62897fcdad620e01d66649869329640f5504d77e960d01006a473045022100c' @@ -213,6 +220,7 @@ def test_get_one_known_tx_with_authority(self): '00d810') tx_parent2_bytes = bytes.fromhex(tx_parent2_hex) tx_parent2 = TokenCreationTransaction.create_from_struct(tx_parent2_bytes, self.manager.tx_storage) + tx_parent2.get_metadata().validation = ValidationState.FULL self.manager.tx_storage.save_transaction(tx_parent2) # Both inputs are the same as the last parent, so no need to manually add them diff --git a/tests/tx/test_cache_storage.py b/tests/tx/test_cache_storage.py index 6d92791a1..cb4461624 100644 --- a/tests/tx/test_cache_storage.py +++ b/tests/tx/test_cache_storage.py @@ -31,9 +31,11 @@ def tearDown(self): super().tearDown() def _get_new_tx(self, nonce): + from hathor.transaction.transaction_metadata import ValidationState tx = Transaction(nonce=nonce, storage=self.cache_storage) tx.update_hash() meta = TransactionMetadata(hash=tx.hash) + meta.validation = ValidationState.FULL tx._metadata = meta return tx diff --git a/tests/tx/test_tx.py b/tests/tx/test_tx.py index 2f9ee80c9..1b1ee1a63 100644 --- a/tests/tx/test_tx.py +++ b/tests/tx/test_tx.py @@ -30,6 +30,7 @@ ) from hathor.transaction.scripts import P2PKH, parse_address_script from hathor.transaction.storage import TransactionMemoryStorage +from hathor.transaction.transaction_metadata import ValidationState from hathor.transaction.util import int_to_bytes from hathor.wallet import Wallet from tests import unittest @@ -197,6 +198,7 @@ def test_struct(self): def test_children_update(self): tx = self._gen_tx_spending_genesis_block() + tx.get_metadata().validation = ValidationState.FULL # get info before update children_len = [] diff --git a/tests/tx/test_tx_storage.py b/tests/tx/test_tx_storage.py index bd77f2028..97ef9d9dc 100644 --- a/tests/tx/test_tx_storage.py +++ b/tests/tx/test_tx_storage.py @@ -223,6 +223,21 @@ def test_save_block(self): def test_save_tx(self): self.validate_save(self.tx) + def test_pre_save_validation_invalid_tx_1(self): + self.tx.get_metadata().validation = ValidationState.BASIC + with self.assertRaises(AssertionError): + self.validate_save(self.tx) + + def test_pre_save_validation_invalid_tx_2(self): + self.tx.get_metadata().add_voided_by(settings.PARTIALLY_VALIDATED_ID) + with self.assertRaises(AssertionError): + self.validate_save(self.tx) + + def test_pre_save_validation_success(self): + self.tx.get_metadata().validation = ValidationState.BASIC + self.tx.get_metadata().add_voided_by(settings.PARTIALLY_VALIDATED_ID) + self.validate_save(self.tx) + def test_save_token_creation_tx(self): tx = create_tokens(self.manager, propagate=False) tx.get_metadata().validation = ValidationState.FULL diff --git a/tests/wallet/test_wallet.py b/tests/wallet/test_wallet.py index fcaa87741..ed05b53a6 100644 --- a/tests/wallet/test_wallet.py +++ b/tests/wallet/test_wallet.py @@ -55,6 +55,8 @@ def test_wallet_keys_storage(self): self.assertEqual(key, key2) def test_wallet_create_transaction(self): + from hathor.transaction.transaction_metadata import ValidationState + genesis_private_key_bytes = get_private_key_bytes( self.genesis_private_key, encryption_algorithm=serialization.BestAvailableEncryption(PASSWORD) @@ -84,6 +86,7 @@ def test_wallet_create_transaction(self): tx1 = w.prepare_transaction_compute_inputs(Transaction, [out], self.storage) tx1.storage = self.storage tx1.update_hash() + tx1.get_metadata().validation = ValidationState.FULL self.storage.save_transaction(tx1) w.on_new_tx(tx1) self.assertEqual(len(w.spent_txs), 1) @@ -99,6 +102,7 @@ def test_wallet_create_transaction(self): outputs=[out], tx_storage=self.storage) tx2.storage = self.storage tx2.update_hash() + tx2.get_metadata().validation = ValidationState.FULL self.storage.save_transaction(tx2) w.on_new_tx(tx2) self.assertEqual(len(w.spent_txs), 2) diff --git a/tests/wallet/test_wallet_hd.py b/tests/wallet/test_wallet_hd.py index bec8154d1..c231053ba 100644 --- a/tests/wallet/test_wallet_hd.py +++ b/tests/wallet/test_wallet_hd.py @@ -25,6 +25,8 @@ def setUp(self): self.TOKENS = self.BLOCK_TOKENS def test_transaction_and_balance(self): + from hathor.transaction.transaction_metadata import ValidationState + # generate a new block and check if we increase balance new_address = self.wallet.get_unused_address() out = WalletOutputInfo(decode_address(new_address), self.TOKENS, timelock=None) @@ -42,6 +44,7 @@ def test_transaction_and_balance(self): tx1.update_hash() tx1.verify_script(tx1.inputs[0], block) tx1.storage = self.tx_storage + tx1.get_metadata().validation = ValidationState.FULL self.wallet.on_new_tx(tx1) self.tx_storage.save_transaction(tx1) self.assertEqual(len(self.wallet.spent_txs), 1) @@ -60,6 +63,7 @@ def test_transaction_and_balance(self): tx2.update_hash() tx2.storage = self.tx_storage tx2.verify_script(tx2.inputs[0], tx1) + tx2.get_metadata().validation = ValidationState.FULL self.tx_storage.save_transaction(tx2) self.wallet.on_new_tx(tx2) self.assertEqual(len(self.wallet.spent_txs), 2) From 9ba84cc8116c27698da8e1e3a706dfd27906e60c Mon Sep 17 00:00:00 2001 From: Marcelo Salhab Brogliato Date: Fri, 19 May 2023 00:48:59 -0500 Subject: [PATCH 07/24] refactor(stratum): Build StratumFactory on builder --- hathor/builder/builder.py | 22 ++++++++++++++++------ hathor/builder/cli_builder.py | 7 ++++++- hathor/cli/run_node.py | 6 ++++++ hathor/manager.py | 16 ++++------------ hathor/stratum/stratum.py | 10 ++-------- tests/others/test_init_manager.py | 2 +- tests/resources/test_stratum.py | 5 ----- tests/tx/test_stratum.py | 3 +-- 8 files changed, 36 insertions(+), 35 deletions(-) diff --git a/hathor/builder/builder.py b/hathor/builder/builder.py index a5954880e..137cbd2f8 100644 --- a/hathor/builder/builder.py +++ b/hathor/builder/builder.py @@ -30,6 +30,7 @@ from hathor.p2p.peer_id import PeerId from hathor.pubsub import PubSubManager from hathor.storage import RocksDBStorage +from hathor.stratum import StratumFactory from hathor.transaction.storage import TransactionMemoryStorage, TransactionRocksDBStorage, TransactionStorage from hathor.util import Random, Reactor, get_environment_info from hathor.wallet import BaseWallet, Wallet @@ -56,6 +57,7 @@ class BuildArtifacts(NamedTuple): indexes: Optional[IndexesManager] wallet: Optional[BaseWallet] rocksdb_storage: Optional[RocksDBStorage] + stratum_factory: Optional[StratumFactory] class Builder: @@ -109,7 +111,7 @@ def __init__(self) -> None: self._enable_sync_v1: Optional[bool] = None self._enable_sync_v2: Optional[bool] = None - self._stratum_port: Optional[int] = None + self._enable_stratum_server: Optional[bool] = None self._full_verification: Optional[bool] = None @@ -152,9 +154,6 @@ def build(self) -> BuildArtifacts: if self._enable_sync_v2 is not None: kwargs['enable_sync_v2'] = self._enable_sync_v2 - if self._stratum_port is not None: - kwargs['stratum_port'] = self._stratum_port - if self._network is None: raise TypeError('you must set a network') @@ -180,6 +179,10 @@ def build(self) -> BuildArtifacts: **kwargs ) + stratum_factory: Optional[StratumFactory] = None + if self._enable_stratum_server: + stratum_factory = self._create_stratum_server(manager) + self.artifacts = BuildArtifacts( peer_id=peer_id, settings=settings, @@ -193,6 +196,7 @@ def build(self) -> BuildArtifacts: indexes=indexes, wallet=wallet, rocksdb_storage=self._rocksdb_storage, + stratum_factory=stratum_factory, ) return self.artifacts @@ -252,6 +256,12 @@ def _get_or_create_pubsub(self) -> PubSubManager: self._pubsub = PubSubManager(self._get_reactor()) return self._pubsub + def _create_stratum_server(self, manager: HathorManager) -> StratumFactory: + stratum_factory = StratumFactory(manager=manager) + manager.stratum_factory = stratum_factory + manager.metrics.stratum_factory = stratum_factory + return stratum_factory + def _get_or_create_rocksdb_storage(self) -> RocksDBStorage: assert self._rocksdb_path is not None @@ -363,9 +373,9 @@ def enable_keypair_wallet(self, directory: str, *, unlock: Optional[bytes] = Non self._wallet_unlock = unlock return self - def enable_stratum_server(self, port: int) -> 'Builder': + def enable_stratum_server(self) -> 'Builder': self.check_if_can_modify() - self._stratum_port = port + self._enable_stratum_server = True return self def enable_address_index(self) -> 'Builder': diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index 39ff6b68b..25628f59a 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -31,6 +31,7 @@ from hathor.p2p.peer_id import PeerId from hathor.p2p.utils import discover_hostname from hathor.pubsub import PubSubManager +from hathor.stratum import StratumFactory from hathor.wallet import BaseWallet, HDWallet, Wallet logger = get_logger() @@ -176,7 +177,6 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa tx_storage=tx_storage, event_manager=event_manager, wallet=self.wallet, - stratum_port=args.stratum, ssl=True, checkpoints=settings.CHECKPOINTS, enable_sync_v1=enable_sync_v1, @@ -188,6 +188,11 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa enable_event_queue=bool(args.x_enable_event_queue) ) + if args.stratum: + stratum_factory = StratumFactory(self.manager) + self.manager.stratum_factory = stratum_factory + self.manager.metrics.stratum_factory = stratum_factory + if args.data: self.manager.set_cmd_path(args.data) diff --git a/hathor/cli/run_node.py b/hathor/cli/run_node.py index c8ea2af04..fc0bf640f 100644 --- a/hathor/cli/run_node.py +++ b/hathor/cli/run_node.py @@ -137,9 +137,14 @@ def prepare(self, args: Namespace, *, register_resources: bool = True) -> None: except BuilderError as err: self.log.error(str(err)) sys.exit(2) + self.tx_storage = self.manager.tx_storage self.wallet = self.manager.wallet self.start_manager(args) + + if args.stratum: + self.reactor.listenTCP(args.stratum, self.manager.stratum_factory) + if register_resources: resources_builder = ResourcesBuilder(self.manager, builder.event_ws_factory) status_server = resources_builder.build(args) @@ -163,6 +168,7 @@ def prepare(self, args: Namespace, *, register_resources: bool = True) -> None: indexes=self.manager.tx_storage.indexes, wallet=self.manager.wallet, rocksdb_storage=getattr(builder, 'rocksdb_storage', None), + stratum_factory=self.manager.stratum_factory, ) def start_sentry_if_possible(self, args: Namespace) -> None: diff --git a/hathor/manager.py b/hathor/manager.py index b608f4386..0479bf447 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -45,6 +45,7 @@ from hathor.p2p.protocol import HathorProtocol from hathor.profiler import get_cpu_profiler from hathor.pubsub import HathorEvents, PubSubManager +from hathor.stratum import StratumFactory from hathor.transaction import BaseTransaction, Block, MergeMinedBlock, Transaction, TxVersion, sum_weights from hathor.transaction.exceptions import TxValidationError from hathor.transaction.storage import TransactionStorage @@ -91,7 +92,6 @@ def __init__(self, network: str, hostname: Optional[str] = None, wallet: Optional[BaseWallet] = None, - stratum_port: Optional[int] = None, ssl: bool = True, enable_sync_v1: bool = False, enable_sync_v1_1: bool = True, @@ -113,9 +113,6 @@ def __init__(self, :param tx_storage: Required storage backend. :type tx_storage: :py:class:`hathor.transaction.storage.transaction_storage.TransactionStorage` - - :param stratum_port: Stratum server port. Stratum server will only be created if it is not None. - :type stratum_port: Optional[int] """ from hathor.metrics import Metrics from hathor.p2p.factory import HathorClientFactory, HathorServerFactory @@ -210,14 +207,9 @@ def __init__(self, self.wallet.pubsub = self.pubsub self.wallet.reactor = self.reactor - if stratum_port: - # XXX: only import if needed - from hathor.stratum import StratumFactory - self.stratum_factory: Optional[StratumFactory] = StratumFactory(manager=self, port=stratum_port) - else: - self.stratum_factory = None - # Set stratum factory for metrics object - self.metrics.stratum_factory = self.stratum_factory + # It will be inject later by the builder. + # XXX Remove this attribute after all dependencies are cleared. + self.stratum_factory: Optional[StratumFactory] = None self._allow_mining_without_peers = False diff --git a/hathor/stratum/stratum.py b/hathor/stratum/stratum.py index f50992d6f..c7acb0c6a 100644 --- a/hathor/stratum/stratum.py +++ b/hathor/stratum/stratum.py @@ -726,16 +726,14 @@ class StratumFactory(Factory): jobs: Set[UUID] manager: 'HathorManager' miner_protocols: Dict[UUID, StratumProtocol] - port: int tx_queue: List[bytes] mining_tx_pool: Dict[bytes, BaseTransaction] mined_txs: Dict[bytes, Transaction] deferreds_tx: Dict[bytes, Deferred] - def __init__(self, manager: 'HathorManager', port: int, reactor: Reactor = reactor): + def __init__(self, manager: 'HathorManager', reactor: Reactor = reactor): self.log = logger.new() self.manager = manager - self.port = port self.reactor = reactor self.jobs = set() @@ -769,13 +767,9 @@ def on_new_block(event: HathorEvents, args: EventArguments) -> None: self.update_jobs() self.manager.pubsub.subscribe(HathorEvents.NETWORK_NEW_TX_ACCEPTED, on_new_block) - # XXX: self.reactor is IReactorTime, which does not guarantee listenTCP method, normally it will have that - # method, but on tests we use a Clock instead, which does not have listenTCP, there shouldn't be any - # issues using the "default" reactor though - self._listen = reactor.listenTCP(self.port, self) def stop(self) -> Optional[Deferred]: - return self._listen.stopListening() + return None def mine_transaction(self, tx: Transaction, deferred: Deferred) -> None: """ diff --git a/tests/others/test_init_manager.py b/tests/others/test_init_manager.py index bdeb5f4e0..f6618b24e 100644 --- a/tests/others/test_init_manager.py +++ b/tests/others/test_init_manager.py @@ -61,7 +61,7 @@ def test_invalid_arguments(self): def tests_init_with_stratum(self): builder = TestBuilder() builder.set_tx_storage(self.tx_storage) - builder.enable_stratum_server(50505) + builder.enable_stratum_server() artifacts = builder.build() manager = artifacts.manager manager.start() diff --git a/tests/resources/test_stratum.py b/tests/resources/test_stratum.py index ee47bd35c..e73056a55 100644 --- a/tests/resources/test_stratum.py +++ b/tests/resources/test_stratum.py @@ -10,11 +10,6 @@ class StratumResourceTest(_BaseResourceTest._ResourceTest): - def _manager_kwargs(self): - kwargs = super()._manager_kwargs() - kwargs['stratum_port'] = 8123 - return kwargs - def setUp(self): super().setUp() self.web = StubSite(MiningStatsResource(self.manager)) diff --git a/tests/tx/test_stratum.py b/tests/tx/test_stratum.py index 0769c10af..42d5082df 100644 --- a/tests/tx/test_stratum.py +++ b/tests/tx/test_stratum.py @@ -38,8 +38,7 @@ def setUp(self): super().setUp() self.manager = self.create_peer('testnet') self.manager.allow_mining_without_peers() - port = self.rng.randint(8000, 9000) - self.factory = StratumFactory(self.manager, port=port, reactor=MemoryReactorHeapClock()) + self.factory = StratumFactory(self.manager, reactor=MemoryReactorHeapClock()) self.factory.start() self.protocol = self.factory.buildProtocol('127.0.0.1') self.transport = StringTransportWithDisconnection() From d7508e0432dc36b059d5e4dad2be1ecf4c64886f Mon Sep 17 00:00:00 2001 From: Marcelo Salhab Brogliato Date: Fri, 19 May 2023 13:03:43 -0500 Subject: [PATCH 08/24] feat(sysctl): Add helper command --- hathor/sysctl/protocol.py | 44 ++++++++++++++++++++++++++++++++-- hathor/sysctl/sysctl.py | 7 ++++++ tests/sysctl/test_sysctl.py | 47 +++++++++++++++++++++++++++++++++++-- 3 files changed, 94 insertions(+), 4 deletions(-) diff --git a/hathor/sysctl/protocol.py b/hathor/sysctl/protocol.py index 45c879dc7..c84b10900 100644 --- a/hathor/sysctl/protocol.py +++ b/hathor/sysctl/protocol.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import inspect import json -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Callable, List, Optional from pydantic import ValidationError from twisted.protocols.basic import LineReceiver @@ -35,7 +36,11 @@ def lineReceived(self, raw: bytes) -> None: line = raw.decode('utf-8').strip() except UnicodeDecodeError: self.sendError('command is not utf-8 valid') - if line == '!backup': + if line.startswith('!help'): + _, _, path = line.partition(' ') + self.help(path) + return + elif line == '!backup': self.backup() return head, separator, tail = line.partition('=') @@ -89,6 +94,28 @@ def backup(self) -> None: output = f'{key}={self._serialize(value)}' self.sendLine(output.encode('utf-8')) + def help(self, path: str) -> None: + """Show all available commands.""" + if path == '': + self._send_all_commands() + return + try: + cmd = self.root.get_command(path) + except SysctlEntryNotFound: + self.sendError(f'{path} not found') + return + + output: List[str] = [] + output.extend(self._get_method_help('getter', cmd.getter)) + output.append('') + output.extend(self._get_method_help('setter', cmd.setter)) + self.sendLine('\n'.join(output).encode('utf-8')) + + def _send_all_commands(self) -> None: + all_paths = list(self.root.get_all_paths()) + for path in sorted(all_paths): + self.sendLine(path.encode('utf-8')) + def _serialize(self, value: Any) -> str: """Serialize the return of a sysctl getter.""" output: str @@ -107,3 +134,16 @@ def _deserialize(self, value_str: str) -> Any: if len(parts) > 1: return tuple(json.loads(x) for x in parts) return json.loads(value_str) + + def _get_method_help(self, method_name: str, method: Optional[Callable]) -> List[str]: + """Return a list of strings with the help for `method`.""" + if method is None: + return [f'{method_name}: not available'] + + output: List[str] = [] + doc: str = inspect.getdoc(method) or '(no help found)' + signature = inspect.signature(method) + output.append(f'{method_name}{signature}:') + for line in doc.splitlines(): + output.append(f' {line.strip()}') + return output diff --git a/hathor/sysctl/sysctl.py b/hathor/sysctl/sysctl.py index c45f62667..a2f40a778 100644 --- a/hathor/sysctl/sysctl.py +++ b/hathor/sysctl/sysctl.py @@ -102,3 +102,10 @@ def get_all(self, prefix: str = '') -> Iterator[Tuple[str, Any]]: continue value = cmd.getter() yield (self.path_join(prefix, path), value) + + def get_all_paths(self, prefix: str = '') -> Iterator[str]: + """Return all available paths.""" + for path, child in self._children.items(): + yield from child.get_all_paths(self.path_join(prefix, path)) + for path, cmd in self._commands.items(): + yield self.path_join(prefix, path) diff --git a/tests/sysctl/test_sysctl.py b/tests/sysctl/test_sysctl.py index ead97371c..6e0e80ab5 100644 --- a/tests/sysctl/test_sysctl.py +++ b/tests/sysctl/test_sysctl.py @@ -16,11 +16,16 @@ class SysctlTest(unittest.TestCase): def setUp(self) -> None: super().setUp() + getter_max_connections = MagicMock(return_value=3) + getter_max_connections.__doc__ = 'Return the number of maximum connections.' + setter_max_connections = MagicMock() + setter_max_connections.__doc__ = 'Set the number of maximum connections.' + net = Sysctl() net.register( 'max_connections', - MagicMock(return_value=3), # int - MagicMock(), + getter_max_connections, # int + setter_max_connections, ) net.register( 'readonly', @@ -141,6 +146,17 @@ def test_get_all(self) -> None: ('net.readonly', 0.25), }) + def test_get_all_paths(self) -> None: + all_items = set(self.root.get_all_paths()) + self.assertEqual(all_items, { + 'net.max_connections', + 'core.writeonly', + 'core.loglevel', + 'net.rate_limit', + 'net.readonly', + 'ab.bc.cd.useless', + }) + ################## # Protocol: Get ################## @@ -228,3 +244,30 @@ def test_proto_backup(self) -> None: b'net.readonly=0.25', b'', # output ends with a new line (\n) }) + + def test_proto_help(self) -> None: + self.proto.lineReceived(b'!help') + output = self.tr.value() + lines = set(output.split(b'\n')) + self.assertEqual(lines, { + b'net.max_connections', + b'core.writeonly', + b'core.loglevel', + b'net.rate_limit', + b'net.readonly', + b'ab.bc.cd.useless', + b'', # output ends with a new line (\n) + }) + + def test_proto_help_method(self) -> None: + self.proto.lineReceived(b'!help net.max_connections') + output = self.tr.value() + lines = output.split(b'\n') + self.assertEqual(lines, [ + b'getter(*args, **kwargs):', + b' Return the number of maximum connections.', + b'', + b'setter(*args, **kwargs):', + b' Set the number of maximum connections.', + b'' + ]) From 5449ab665919e208d19c7b4a8464d71dda0215f3 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Mon, 22 May 2023 15:12:49 -0300 Subject: [PATCH 09/24] feat(events): reset event queue command (#608) --- hathor/builder/cli_builder.py | 19 ++++----- hathor/cli/main.py | 3 ++ hathor/cli/reset_event_queue.py | 48 +++++++++++++++++++++ hathor/event/event_manager.py | 2 +- hathor/event/model/event_data.py | 3 +- hathor/event/storage/event_storage.py | 15 ++++++- hathor/event/storage/memory_storage.py | 7 ++- hathor/event/storage/rocksdb_storage.py | 7 ++- hathor/manager.py | 9 ++-- tests/event/test_base_event.py | 1 + tests/event/test_event_storage.py | 57 ++++++++++++++++++++++--- tests/event/websocket/test_protocol.py | 2 +- tests/others/test_cli_builder.py | 2 + 13 files changed, 146 insertions(+), 29 deletions(-) create mode 100644 hathor/cli/reset_event_queue.py diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index 25628f59a..d09f7200a 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -168,6 +168,14 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa soft_voided_tx_ids = set(settings.SOFT_VOIDED_TX_IDS) consensus_algorithm = ConsensusAlgorithm(soft_voided_tx_ids, pubsub=pubsub) + if args.x_enable_event_queue: + if not settings.ENABLE_EVENT_QUEUE_FEATURE: + self.log.error('The event queue feature is not available yet') + sys.exit(-1) + + self.log.info('--x-enable-event-queue flag provided. ' + 'The events detected by the full node will be stored and can be retrieved by clients') + self.manager = HathorManager( reactor, pubsub=pubsub, @@ -185,7 +193,7 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa consensus_algorithm=consensus_algorithm, environment_info=get_environment_info(args=str(args), peer_id=peer_id.id), full_verification=full_verification, - enable_event_queue=bool(args.x_enable_event_queue) + enable_event_queue=args.x_enable_event_queue ) if args.stratum: @@ -230,15 +238,6 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa if args.memory_indexes and args.memory_storage: self.log.warn('--memory-indexes is implied for memory storage or JSON storage') - if args.x_enable_event_queue: - if not settings.ENABLE_EVENT_QUEUE_FEATURE: - self.log.error('The event queue feature is not available yet') - sys.exit(-1) - - self.manager.enable_event_queue = True - self.log.info('--x-enable-event-queue flag provided. ' - 'The events detected by the full node will be stored and can be retrieved by clients') - for description in args.listen: self.manager.add_listen_address(description) diff --git a/hathor/cli/main.py b/hathor/cli/main.py index d1a81c4b9..82403398f 100644 --- a/hathor/cli/main.py +++ b/hathor/cli/main.py @@ -48,6 +48,7 @@ def __init__(self) -> None: peer_id, quick_test, replay_logs, + reset_event_queue, run_node, shell, stratum_mining, @@ -78,6 +79,8 @@ def __init__(self) -> None: self.add_cmd('oracle', 'oracle-get-pubkey', oracle_get_pubkey, 'Read an oracle private key and output public key hash') self.add_cmd('oracle', 'oracle-encode-data', oracle_encode_data, 'Encode data and sign it with a private key') + self.add_cmd('events', 'reset-event-queue', reset_event_queue, 'Delete all events and related data from the ' + 'database') self.add_cmd('dev', 'shell', shell, 'Run a Python shell') self.add_cmd('dev', 'quick_test', quick_test, 'Similar to run_node but will quit after receiving a tx') self.add_cmd('dev', 'generate_nginx_config', nginx_config, 'Generate nginx config from OpenAPI json') diff --git a/hathor/cli/reset_event_queue.py b/hathor/cli/reset_event_queue.py new file mode 100644 index 000000000..90fd10358 --- /dev/null +++ b/hathor/cli/reset_event_queue.py @@ -0,0 +1,48 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import ArgumentParser, Namespace + +from structlog import get_logger + +logger = get_logger() + + +def create_parser() -> ArgumentParser: + from hathor.cli.util import create_parser + + parser = create_parser() + parser.add_argument('--data', help='Data directory') + + return parser + + +def execute(args: Namespace) -> None: + from hathor.event.storage import EventRocksDBStorage + from hathor.storage import RocksDBStorage + + assert args.data is not None, '--data is required' + + rocksdb_storage = RocksDBStorage(path=args.data) + event_storage = EventRocksDBStorage(rocksdb_storage) + + logger.info('removing all events and related data...') + event_storage.reset_all() + logger.info('reset complete') + + +def main(): + parser = create_parser() + args = parser.parse_args() + execute(args) diff --git a/hathor/event/event_manager.py b/hathor/event/event_manager.py index ff3197b29..2d50f04c3 100644 --- a/hathor/event/event_manager.py +++ b/hathor/event/event_manager.py @@ -83,7 +83,7 @@ def start(self, peer_id: str) -> None: self._previous_node_state = self._event_storage.get_node_state() if self._should_reload_events(): - self._event_storage.clear_events() + self._event_storage.reset_events() else: self._last_event = self._event_storage.get_last_event() self._last_existing_group_id = self._event_storage.get_last_group_id() diff --git a/hathor/event/model/event_data.py b/hathor/event/model/event_data.py index f794be3a0..f5c341593 100644 --- a/hathor/event/model/event_data.py +++ b/hathor/event/model/event_data.py @@ -88,7 +88,7 @@ def from_event_arguments(cls, args: EventArguments) -> 'EmptyData': class TxData(BaseEventData, extra=Extra.ignore): hash: str - nonce: int + nonce: Optional[int] = None timestamp: int version: int weight: float @@ -100,6 +100,7 @@ class TxData(BaseEventData, extra=Extra.ignore): token_name: Optional[str] token_symbol: Optional[str] metadata: 'TxMetadata' + aux_pow: Optional[str] = None @classmethod def from_event_arguments(cls, args: EventArguments) -> 'TxData': diff --git a/hathor/event/storage/event_storage.py b/hathor/event/storage/event_storage.py index 90a68d761..2cff3ad33 100644 --- a/hathor/event/storage/event_storage.py +++ b/hathor/event/storage/event_storage.py @@ -46,8 +46,19 @@ def iter_from_event(self, key: int) -> Iterator[BaseEvent]: raise NotImplementedError @abstractmethod - def clear_events(self) -> None: - """Clear all stored events and related metadata.""" + def reset_events(self) -> None: + """ + Reset event-related data: events, last_event, and last_group_id. + This should be used to clear old events from the database when reloading events. + """ + raise NotImplementedError + + @abstractmethod + def reset_all(self) -> None: + """ + Reset all data and metadata: events, last_event, last_group_id, node_state, and event_queue_enabled. + This should be used for a full wipe out of the event storage. + """ raise NotImplementedError @abstractmethod diff --git a/hathor/event/storage/memory_storage.py b/hathor/event/storage/memory_storage.py index 569d51d0f..b790bbe1f 100644 --- a/hathor/event/storage/memory_storage.py +++ b/hathor/event/storage/memory_storage.py @@ -58,11 +58,16 @@ def iter_from_event(self, key: int) -> Iterator[BaseEvent]: yield self._events[key] key += 1 - def clear_events(self) -> None: + def reset_events(self) -> None: self._events = [] self._last_event = None self._last_group_id = None + def reset_all(self) -> None: + self.reset_events() + self._node_state = None + self._event_queue_enabled = False + def save_node_state(self, state: NodeState) -> None: self._node_state = state diff --git a/hathor/event/storage/rocksdb_storage.py b/hathor/event/storage/rocksdb_storage.py index eae5f5305..edda8dcd0 100644 --- a/hathor/event/storage/rocksdb_storage.py +++ b/hathor/event/storage/rocksdb_storage.py @@ -91,7 +91,7 @@ def get_last_event(self) -> Optional[BaseEvent]: def get_last_group_id(self) -> Optional[int]: return self._last_group_id - def clear_events(self) -> None: + def reset_events(self) -> None: self._last_event = None self._last_group_id = None @@ -100,6 +100,11 @@ def clear_events(self) -> None: self._cf_event = self._rocksdb_storage.get_or_create_column_family(_CF_NAME_EVENT) + def reset_all(self) -> None: + self.reset_events() + self._db.delete((self._cf_meta, _KEY_NODE_STATE)) + self._db.delete((self._cf_meta, _KEY_EVENT_QUEUE_ENABLED)) + def save_node_state(self, state: NodeState) -> None: self._db.put((self._cf_meta, _KEY_NODE_STATE), int_to_bytes(state.value, 8)) diff --git a/hathor/manager.py b/hathor/manager.py index 0479bf447..c5983f6ea 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -122,8 +122,9 @@ def __init__(self, raise TypeError(f'{type(self).__name__}() at least one sync version is required') if event_manager.get_event_queue_state() is True and not enable_event_queue: - raise ValueError( - 'cannot start manager without event queue feature, as it was enabled in the previous startup' + raise InitializationError( + 'Cannot start manager without event queue feature, as it was enabled in the previous startup. ' + 'Either enable it, or use the reset-event-queue CLI command to remove all event-related data' ) self._enable_sync_v1 = enable_sync_v1 @@ -223,10 +224,6 @@ def __init__(self, # Can be activated on the command line with --full-verification self._full_verification = full_verification - # Activated with --x-enable-event-queue flag - # It activates the event mechanism inside full node - self.enable_event_queue = False - # List of whitelisted peers self.peers_whitelist: List[str] = [] diff --git a/tests/event/test_base_event.py b/tests/event/test_base_event.py index 90b0b3e76..d99144781 100644 --- a/tests/event/test_base_event.py +++ b/tests/event/test_base_event.py @@ -50,6 +50,7 @@ def test_create_base_event(event_id, group_id): token_name=None, token_symbol=None, tokens=[], + aux_pow=None, metadata=dict( hash='abc', spent_outputs=[], diff --git a/tests/event/test_event_storage.py b/tests/event/test_event_storage.py index 4889bcb15..84427e1d7 100644 --- a/tests/event/test_event_storage.py +++ b/tests/event/test_event_storage.py @@ -137,11 +137,14 @@ def test_save_event_queue_disabled_and_retrieve(self): assert enabled is False - def test_clear_events_empty_database(self): - self._test_clear_events() + def test_reset_events_empty_database(self): + self._test_reset_events() - def _test_clear_events(self) -> None: - self.event_storage.clear_events() + def test_reset_all_empty_database(self): + self._test_reset_events() + + def _test_reset_events(self) -> None: + self.event_storage.reset_events() events = list(self.event_storage.iter_from_event(0)) last_event = self.event_storage.get_last_event() @@ -151,7 +154,22 @@ def _test_clear_events(self) -> None: assert last_event is None assert last_group_id is None - def test_clear_events_full_database(self): + def _test_reset_all(self) -> None: + self.event_storage.reset_all() + + events = list(self.event_storage.iter_from_event(0)) + last_event = self.event_storage.get_last_event() + last_group_id = self.event_storage.get_last_group_id() + node_state = self.event_storage.get_node_state() + event_queue_state = self.event_storage.get_event_queue_state() + + assert events == [] + assert last_event is None + assert last_group_id is None + assert node_state is None + assert event_queue_state is False + + def test_reset_events_full_database(self): n_events = 10 expected_last_group_id = 4 expected_node_state = NodeState.SYNC @@ -170,7 +188,7 @@ def test_clear_events_full_database(self): assert node_state == expected_node_state assert event_queue_state is True - self._test_clear_events() + self._test_reset_events() node_state = self.event_storage.get_node_state() event_queue_state = self.event_storage.get_event_queue_state() @@ -178,6 +196,33 @@ def test_clear_events_full_database(self): assert node_state == expected_node_state assert event_queue_state is True + def test_reset_all_full_database(self): + n_events = 10 + expected_last_group_id = 4 + expected_node_state = NodeState.SYNC + + self._populate_events_and_last_group_id(n_events=n_events, last_group_id=4) + self.event_storage.save_node_state(expected_node_state) + self.event_storage.save_event_queue_state(True) + + events = list(self.event_storage.iter_from_event(0)) + last_group_id = self.event_storage.get_last_group_id() + node_state = self.event_storage.get_node_state() + event_queue_state = self.event_storage.get_event_queue_state() + + assert len(events) == n_events + assert last_group_id == expected_last_group_id + assert node_state == expected_node_state + assert event_queue_state is True + + self._test_reset_all() + + node_state = self.event_storage.get_node_state() + event_queue_state = self.event_storage.get_event_queue_state() + + assert node_state is None + assert event_queue_state is False + @pytest.mark.skipif(not HAS_ROCKSDB, reason='requires python-rocksdb') class EventStorageRocksDBTest(EventStorageBaseTest): diff --git a/tests/event/websocket/test_protocol.py b/tests/event/websocket/test_protocol.py index 0ef5ded16..646344c0d 100644 --- a/tests/event/websocket/test_protocol.py +++ b/tests/event/websocket/test_protocol.py @@ -101,7 +101,7 @@ def test_send_event_response(): b'"token_name":null,"token_symbol":null,"metadata":{"hash":"abc","spent_outputs":[],' \ b'"conflict_with":[],"voided_by":[],"received_by":[],"children":[],"twins":[],' \ b'"accumulated_weight":10.0,"score":20.0,"first_block":null,"height":100,' \ - b'"validation":"validation"}},"group_id":null},"latest_event_id":10}' + b'"validation":"validation"},"aux_pow":null},"group_id":null},"latest_event_id":10}' protocol.sendMessage.assert_called_once_with(expected_payload) diff --git a/tests/others/test_cli_builder.py b/tests/others/test_cli_builder.py index 4aa4b0e00..447aacfd6 100644 --- a/tests/others/test_cli_builder.py +++ b/tests/others/test_cli_builder.py @@ -163,6 +163,7 @@ def test_event_queue_with_rocksdb_storage(self): self.assertIsInstance(manager._event_manager, EventManager) self.assertIsInstance(manager._event_manager._event_storage, EventRocksDBStorage) self.assertIsInstance(manager._event_manager._event_ws_factory, EventWebsocketFactory) + self.assertTrue(manager._enable_event_queue) def test_event_queue_with_memory_storage(self): manager = self._build(['--x-enable-event-queue', '--memory-storage']) @@ -170,6 +171,7 @@ def test_event_queue_with_memory_storage(self): self.assertIsInstance(manager._event_manager, EventManager) self.assertIsInstance(manager._event_manager._event_storage, EventMemoryStorage) self.assertIsInstance(manager._event_manager._event_ws_factory, EventWebsocketFactory) + self.assertTrue(manager._enable_event_queue) def test_event_queue_with_full_verification(self): args = ['--x-enable-event-queue', '--memory-storage', '--x-full-verification'] From 7b58c758a7ac0e7c2e9c3119013c9ff6a7236b6f Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Tue, 23 May 2023 11:50:01 -0300 Subject: [PATCH 10/24] chore: python 3.9 updates (#616) --- hathor/event/websocket/request.py | 8 +++----- hathor/indexes/rocksdb_utils.py | 3 +-- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/hathor/event/websocket/request.py b/hathor/event/websocket/request.py index c4c5efd04..446c62840 100644 --- a/hathor/event/websocket/request.py +++ b/hathor/event/websocket/request.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Literal, Optional, Union +from typing import Annotated, Literal, Optional, Union -from pydantic import NonNegativeInt +from pydantic import Field, NonNegativeInt from hathor.utils.pydantic import BaseModel @@ -54,9 +54,7 @@ class StopStreamRequest(BaseModel): type: Literal['STOP_STREAM'] -# This could be more performatic in Python 3.9: -# Request = Annotated[StartStreamRequest | AckRequest | StopStreamRequest, Field(discriminator='type')] -Request = Union[StartStreamRequest, AckRequest, StopStreamRequest] +Request = Annotated[Union[StartStreamRequest, AckRequest, StopStreamRequest], Field(discriminator='type')] class RequestWrapper(BaseModel): diff --git a/hathor/indexes/rocksdb_utils.py b/hathor/indexes/rocksdb_utils.py index ad1a94325..3a98fed2d 100644 --- a/hathor/indexes/rocksdb_utils.py +++ b/hathor/indexes/rocksdb_utils.py @@ -121,8 +121,7 @@ def _clone_into_dict(self) -> Dict[bytes, bytes]: return {k: v for (_, k), v in it} -# XXX: should be `Collection[bytes]`, which only works on Python 3.9+ -class RocksDBSimpleSet(Collection, RocksDBIndexUtils): +class RocksDBSimpleSet(Collection[bytes], RocksDBIndexUtils): def __init__(self, db: 'rocksdb.DB', log: 'structlog.stdlib.BoundLogger', *, cf_name: bytes) -> None: self.log = log super().__init__(db, cf_name) From c5c827e0e5477f3ca3a39141baca326647985d95 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Tue, 23 May 2023 17:00:18 -0300 Subject: [PATCH 11/24] feat(feature-activation): implement basic structures (#611) --- hathor/cli/run_node.py | 11 +- hathor/conf/settings.py | 4 + hathor/conf/testnet.yml | 4 + hathor/exception.py | 5 + hathor/feature_activation/__init__.py | 0 hathor/feature_activation/feature.py | 27 +++ hathor/feature_activation/model/__init__.py | 0 hathor/feature_activation/model/criteria.py | 115 ++++++++++++ .../feature_activation/model/feature_state.py | 32 ++++ hathor/feature_activation/settings.py | 120 +++++++++++++ tests/feature_activation/__init__.py | 0 tests/feature_activation/test_criteria.py | 158 +++++++++++++++++ tests/feature_activation/test_settings.py | 164 ++++++++++++++++++ .../invalid_byte_hathor_settings_fixture.yml} | 0 ...valid_features_hathor_settings_fixture.yml | 55 ++++++ .../missing_hathor_settings_fixture.yml | 0 .../valid_hathor_settings_fixture.yml | 0 tests/others/test_hathor_settings.py | 67 ++++--- 18 files changed, 733 insertions(+), 29 deletions(-) create mode 100644 hathor/feature_activation/__init__.py create mode 100644 hathor/feature_activation/feature.py create mode 100644 hathor/feature_activation/model/__init__.py create mode 100644 hathor/feature_activation/model/criteria.py create mode 100644 hathor/feature_activation/model/feature_state.py create mode 100644 hathor/feature_activation/settings.py create mode 100644 tests/feature_activation/__init__.py create mode 100644 tests/feature_activation/test_criteria.py create mode 100644 tests/feature_activation/test_settings.py rename tests/others/{resources/invalid_hathor_settings_fixture.yml => fixtures/invalid_byte_hathor_settings_fixture.yml} (100%) create mode 100644 tests/others/fixtures/invalid_features_hathor_settings_fixture.yml rename tests/others/{resources => fixtures}/missing_hathor_settings_fixture.yml (100%) rename tests/others/{resources => fixtures}/valid_hathor_settings_fixture.yml (100%) diff --git a/hathor/cli/run_node.py b/hathor/cli/run_node.py index fc0bf640f..b9b23a108 100644 --- a/hathor/cli/run_node.py +++ b/hathor/cli/run_node.py @@ -17,9 +17,11 @@ from argparse import SUPPRESS, ArgumentParser, Namespace from typing import Any, Callable, List, Tuple +from pydantic import ValidationError from structlog import get_logger -from hathor.conf import TESTNET_SETTINGS_FILEPATH +from hathor.conf import TESTNET_SETTINGS_FILEPATH, HathorSettings +from hathor.exception import PreInitializationError logger = get_logger() # LOGGING_CAPTURE_STDOUT = True @@ -324,6 +326,13 @@ def __init__(self, *, argv=None): elif args.testnet: os.environ['HATHOR_CONFIG_YAML'] = TESTNET_SETTINGS_FILEPATH + try: + HathorSettings() + except (TypeError, ValidationError) as e: + raise PreInitializationError( + 'An error was found while trying to initialize HathorSettings. See above for details.' + ) from e + self.prepare(args) self.register_signal_handlers(args) if args.sysctl: diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index 9c2ce0af1..574d0c968 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -19,6 +19,7 @@ import pydantic from hathor.checkpoint import Checkpoint +from hathor.feature_activation.settings import Settings as FeatureActivationSettings from hathor.utils import yaml from hathor.utils.pydantic import BaseModel @@ -387,6 +388,9 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: # Time to update the peers that are running sync. SYNC_UPDATE_INTERVAL: int = 10 * 60 # seconds + # All settings related to Feature Activation + FEATURE_ACTIVATION: FeatureActivationSettings = FeatureActivationSettings() + @classmethod def from_yaml(cls, *, filepath: str) -> 'HathorSettings': """Takes a filepath to a yaml file and returns a validated HathorSettings instance.""" diff --git a/hathor/conf/testnet.yml b/hathor/conf/testnet.yml index 1a5f73e47..98ad03430 100644 --- a/hathor/conf/testnet.yml +++ b/hathor/conf/testnet.yml @@ -35,3 +35,7 @@ CHECKPOINTS: 1_400_000: 000000000df9cb786c68a643a52a67c22ab54e8b8e41cbe9b761133f6c8abbfe 1_500_000: 000000000c3591805f4748480b59ac1788f754fc004930985a487580e2b5de8f 1_600_000: 00000000060adfdfd7d488d4d510b5779cf35a3c50df7bcff941fbb6957be4d2 + +# TODO: Enable this config when settings via python modules are no longer used +# FEATURE_ACTIVATION: +# default_threshold: 30240 # 30240 = 75% of evaluation_interval (40320) diff --git a/hathor/exception.py b/hathor/exception.py index 93ad6e9be..f898b3a66 100644 --- a/hathor/exception.py +++ b/hathor/exception.py @@ -29,6 +29,11 @@ class InvalidNewTransaction(HathorError): pass +class PreInitializationError(HathorError): + """Raised when there's anything wrong during pre-initialization that should cause it to be aborted. + """ + + class InitializationError(HathorError): """Raised when there's anything wrong during initialization that should cause it to be aborted. """ diff --git a/hathor/feature_activation/__init__.py b/hathor/feature_activation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/hathor/feature_activation/feature.py b/hathor/feature_activation/feature.py new file mode 100644 index 000000000..3c626f46f --- /dev/null +++ b/hathor/feature_activation/feature.py @@ -0,0 +1,27 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum, unique + + +@unique +class Feature(Enum): + """ + An enum containing all features that participate in the feature activation process, past or future, activated + or not, for all networks. Features should NOT be removed from this enum, to preserve history. Their values + should NOT be changed either, as configuration uses them for setting feature activation criteria. + """ + + NOP_FEATURE_1 = 'NOP_FEATURE_1' + NOP_FEATURE_2 = 'NOP_FEATURE_2' diff --git a/hathor/feature_activation/model/__init__.py b/hathor/feature_activation/model/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/hathor/feature_activation/model/criteria.py b/hathor/feature_activation/model/criteria.py new file mode 100644 index 000000000..87f489713 --- /dev/null +++ b/hathor/feature_activation/model/criteria.py @@ -0,0 +1,115 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, ClassVar, Optional + +from pydantic import Field, NonNegativeInt, validator + +from hathor import version +from hathor.utils.pydantic import BaseModel + + +class Criteria(BaseModel, validate_all=True): + """ + Represents the configuration for a certain feature activation criteria. + + Attributes: + evaluation_interval: the number of blocks in the feature activation evaluation interval. Class variable. + + max_signal_bits: the number of bits used in the first byte of a block's version field. Class variable. + + bit: which bit in the version field of the block is going to be used to signal the feature support by miners. + + start_height: the height of the first block at which this feature's activation process starts. + + timeout_height: the height of the first block at which this feature's activation process is over. + + threshold: the minimum number of blocks per evaluation interval required to activate the feature. + + minimum_activation_height: the height of the first block at which the feature is allowed to become active. + + activate_on_timeout: whether the feature should be activated even if the activation criteria are not met when + the timeout_height is reached, effectively forcing activation. + + version: the client version of hathor-core at which this feature was defined. + """ + evaluation_interval: ClassVar[Optional[int]] = None + max_signal_bits: ClassVar[Optional[int]] = None + + bit: NonNegativeInt + start_height: NonNegativeInt + timeout_height: NonNegativeInt + threshold: Optional[NonNegativeInt] = None + minimum_activation_height: NonNegativeInt = 0 + activate_on_timeout: bool = False + version: str = Field(..., regex=version.BUILD_VERSION_REGEX) + + @validator('bit') + def _validate_bit(cls, bit: int) -> int: + """Validates that the bit is lower than the max_signal_bits.""" + assert Criteria.max_signal_bits is not None, 'Criteria.max_signal_bits must be set' + + if bit >= Criteria.max_signal_bits: + raise ValueError(f'bit must be lower than max_signal_bits: {bit} >= {Criteria.max_signal_bits}') + + return bit + + @validator('timeout_height') + def _validate_timeout_height(cls, timeout_height: int, values: dict[str, Any]) -> int: + """Validates that the timeout_height is greater than the start_height.""" + start_height = values.get('start_height') + assert start_height is not None, 'start_height must be set' + + if timeout_height <= start_height: + raise ValueError(f'timeout_height must be greater than start_height: {timeout_height} <= {start_height}') + + return timeout_height + + @validator('threshold') + def _validate_threshold(cls, threshold: Optional[int]) -> Optional[int]: + """Validates that the threshold is not greater than the evaluation_interval.""" + assert Criteria.evaluation_interval is not None, 'Criteria.evaluation_interval must be set' + + if threshold is not None and threshold > Criteria.evaluation_interval: + raise ValueError( + f'threshold must not be greater than evaluation_interval: {threshold} > {Criteria.evaluation_interval}' + ) + + return threshold + + @validator('minimum_activation_height') + def _validate_minimum_activation_height(cls, minimum_activation_height: int, values: dict[str, Any]) -> int: + """Validates that the minimum_activation_height is not greater than the timeout_height.""" + timeout_height = values.get('timeout_height') + assert timeout_height is not None, 'timeout_height must be set' + + if minimum_activation_height > timeout_height: + raise ValueError( + f'minimum_activation_height must not be greater than timeout_height: ' + f'{minimum_activation_height} > {timeout_height}' + ) + + return minimum_activation_height + + @validator('start_height', 'timeout_height', 'minimum_activation_height') + def _validate_evaluation_interval_multiple(cls, value: int) -> int: + """Validates that the value is a multiple of evaluation_interval.""" + assert Criteria.evaluation_interval is not None, 'Criteria.evaluation_interval must be set' + + if value % Criteria.evaluation_interval != 0: + raise ValueError( + f'Should be a multiple of evaluation_interval: {value} % {Criteria.evaluation_interval} != 0' + ) + + return value diff --git a/hathor/feature_activation/model/feature_state.py b/hathor/feature_activation/model/feature_state.py new file mode 100644 index 000000000..6e4932432 --- /dev/null +++ b/hathor/feature_activation/model/feature_state.py @@ -0,0 +1,32 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum + + +class FeatureState(Enum): + """ + Possible states a feature can be in, for each block. + + Attributes: + DEFINED: Represents that a feature is defined. It's the first state for each feature. + STARTED: Represents that the activation process for some feature is started. + ACTIVE: Represents that a certain feature is activated. + FAILED: Represents that a certain feature is not and will never be activated. + """ + + DEFINED = 0 + STARTED = 1 + ACTIVE = 2 + FAILED = 3 diff --git a/hathor/feature_activation/settings.py b/hathor/feature_activation/settings.py new file mode 100644 index 000000000..be6a407bb --- /dev/null +++ b/hathor/feature_activation/settings.py @@ -0,0 +1,120 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict +from typing import Any, NamedTuple, Optional + +from pydantic import Field, NonNegativeInt, PositiveInt, validator + +from hathor.feature_activation.feature import Feature +from hathor.feature_activation.model.criteria import Criteria +from hathor.utils.pydantic import BaseModel + + +class Settings(BaseModel, validate_all=True): + """Feature Activation settings.""" + + # The number of blocks in the feature activation evaluation interval. + # Equivalent to 14 days (40320 * 30 seconds = 14 days) + evaluation_interval: PositiveInt = 40320 + + # The number of bits used in the first byte of a block's version field. The 4 left-most bits are not used. + max_signal_bits: int = Field(ge=1, le=8, default=4) + + # Specifies the default minimum number of blocks per evaluation interval required to activate a feature. + # Usually calculated from a percentage of evaluation_interval. + default_threshold: NonNegativeInt = 36288 # 36288 = 90% of evaluation_interval (40320) + + # Dictionary of Feature enum to Criteria definition for all features that participate in the feature activation + # process for a network, past or future, activated or not. Features should NOT be removed from this list, and + # neither their values changed, to preserve history. + features: dict[Feature, Criteria] = {} + + @validator('evaluation_interval') + def _process_evaluation_interval(cls, evaluation_interval: int) -> int: + """Sets the evaluation_interval on Criteria.""" + Criteria.evaluation_interval = evaluation_interval + return evaluation_interval + + @validator('max_signal_bits') + def _process_max_signal_bits(cls, max_signal_bits: int) -> int: + """Sets the max_signal_bits on Criteria.""" + Criteria.max_signal_bits = max_signal_bits + return max_signal_bits + + @validator('default_threshold') + def _validate_default_threshold(cls, default_threshold: int, values: dict[str, Any]) -> int: + """Validates that the default_threshold is not greater than the evaluation_interval.""" + evaluation_interval = values.get('evaluation_interval') + assert evaluation_interval is not None, 'evaluation_interval must be set' + + if default_threshold > evaluation_interval: + raise ValueError( + f'default_threshold must not be greater than evaluation_interval: ' + f'{default_threshold} > {evaluation_interval}' + ) + + return default_threshold + + @validator('features') + def _validate_conflicting_bits(cls, features: dict[Feature, Criteria]) -> dict[Feature, Criteria]: + """ + Validates that a bit is only reused if the start_height of a new feature is + greater than the timeout_height of the previous feature that used that bit. + """ + intervals_by_bit = _get_intervals_by_bit(features) + + for intervals in intervals_by_bit.values(): + overlap = _find_overlap(intervals) + + if overlap: + first, second = overlap + raise ValueError( + f'At least one pair of Features have the same bit configured for an overlapping interval: ' + f'{first.feature} and {second.feature}' + ) + + return features + + +class FeatureInterval(NamedTuple): + begin: int + end: int + feature: Feature + + +def _get_intervals_by_bit(features: dict[Feature, Criteria]) -> dict[int, list[FeatureInterval]]: + """Returns a list of (start_height, timeout_height) intervals for all features, grouped by bit.""" + intervals_by_bit: dict[int, list[FeatureInterval]] = defaultdict(list) + + for feature, criteria in features.items(): + intervals = intervals_by_bit[criteria.bit] + interval = FeatureInterval(begin=criteria.start_height, end=criteria.timeout_height, feature=feature) + intervals.append(interval) + + return intervals_by_bit + + +def _find_overlap(intervals: list[FeatureInterval]) -> Optional[tuple[FeatureInterval, FeatureInterval]]: + """Takes a list of closed intervals and returns the first pair of intervals that overlap, or None otherwise.""" + sorted_intervals = sorted(intervals, key=lambda interval: interval[0]) + previous_interval: Optional[FeatureInterval] = None + + for current_interval in sorted_intervals: + if previous_interval is not None and current_interval.begin <= previous_interval.end: + return previous_interval, current_interval + + previous_interval = current_interval + + return None diff --git a/tests/feature_activation/__init__.py b/tests/feature_activation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/feature_activation/test_criteria.py b/tests/feature_activation/test_criteria.py new file mode 100644 index 000000000..9f6faaf37 --- /dev/null +++ b/tests/feature_activation/test_criteria.py @@ -0,0 +1,158 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest.mock import patch + +import pytest +from pydantic import ValidationError + +from hathor.feature_activation.model.criteria import Criteria + +VALID_CRITERIA = dict( + bit=0, + start_height=1000, + timeout_height=2000, + threshold=0, + minimum_activation_height=0, + activate_on_timeout=False, + version='0.0.0' +) + + +@patch('hathor.feature_activation.model.criteria.Criteria.evaluation_interval', 1000) +@patch('hathor.feature_activation.model.criteria.Criteria.max_signal_bits', 2) +class TestCriteria: + @pytest.mark.parametrize( + 'criteria', + [ + VALID_CRITERIA, + dict( + bit=1, + start_height=100_000, + timeout_height=102_000, + threshold=1000, + minimum_activation_height=101_000, + activate_on_timeout=True, + version='0.52.3' + ) + ] + ) + def test_valid_criteria(self, criteria): + Criteria(**criteria) + + @pytest.mark.parametrize( + ['bit', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (2, 'bit must be lower than max_signal_bits: 2 >= 2'), + (10, 'bit must be lower than max_signal_bits: 10 >= 2') + ] + ) + def test_bit(self, bit, error): + criteria = VALID_CRITERIA | dict(bit=bit) + with pytest.raises(ValidationError) as e: + Criteria(**criteria) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + @pytest.mark.parametrize( + ['start_height', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (1, 'Should be a multiple of evaluation_interval: 1 % 1000 != 0'), + (45, 'Should be a multiple of evaluation_interval: 45 % 1000 != 0'), + (100, 'Should be a multiple of evaluation_interval: 100 % 1000 != 0') + ] + ) + def test_start_height(self, start_height, error): + criteria = VALID_CRITERIA | dict(start_height=start_height) + with pytest.raises(ValidationError) as e: + Criteria(**criteria) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + @pytest.mark.parametrize( + ['timeout_height', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (1, 'timeout_height must be greater than start_height: 1 <= 1000'), + (45, 'timeout_height must be greater than start_height: 45 <= 1000'), + (100, 'timeout_height must be greater than start_height: 100 <= 1000'), + (1111, 'Should be a multiple of evaluation_interval: 1111 % 1000 != 0') + ] + ) + def test_timeout_height(self, timeout_height, error): + criteria = VALID_CRITERIA | dict(timeout_height=timeout_height) + with pytest.raises(ValidationError) as e: + Criteria(**criteria) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + @pytest.mark.parametrize( + ['threshold', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (1001, 'threshold must not be greater than evaluation_interval: 1001 > 1000'), + (100000, 'threshold must not be greater than evaluation_interval: 100000 > 1000') + ] + ) + def test_threshold(self, threshold, error): + criteria = VALID_CRITERIA | dict(threshold=threshold) + with pytest.raises(ValidationError) as e: + Criteria(**criteria) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + @pytest.mark.parametrize( + ['minimum_activation_height', 'error'], + [ + (-10, 'ensure this value is greater than or equal to 0'), + (-1, 'ensure this value is greater than or equal to 0'), + (1, 'Should be a multiple of evaluation_interval: 1 % 1000 != 0'), + (45, 'Should be a multiple of evaluation_interval: 45 % 1000 != 0'), + (100, 'Should be a multiple of evaluation_interval: 100 % 1000 != 0'), + (10_000, 'minimum_activation_height must not be greater than timeout_height: 10000 > 2000') + ] + ) + def test_minimum_activation_height(self, minimum_activation_height, error): + criteria = VALID_CRITERIA | dict(minimum_activation_height=minimum_activation_height) + with pytest.raises(ValidationError) as e: + Criteria(**criteria) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + @pytest.mark.parametrize( + ['version', 'error'], + [ + ('0', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"'), + ('alpha', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"'), + ('0.0', 'string does not match regex "^(\\d+\\.\\d+\\.\\d+(-rc\\.\\d+)?|nightly-[a-f0-9]{7,8})$"') + ] + ) + def test_version(self, version, error): + criteria = VALID_CRITERIA | dict(version=version) + with pytest.raises(ValidationError) as e: + Criteria(**criteria) + + errors = e.value.errors() + assert errors[0]['msg'] == error diff --git a/tests/feature_activation/test_settings.py b/tests/feature_activation/test_settings.py new file mode 100644 index 000000000..717801159 --- /dev/null +++ b/tests/feature_activation/test_settings.py @@ -0,0 +1,164 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from pydantic import ValidationError + +from hathor.feature_activation.feature import Feature +from hathor.feature_activation.settings import FeatureInterval, Settings, _find_overlap + + +@pytest.mark.parametrize( + 'features', + [ + dict( + NOP_FEATURE_1=dict( + bit=0, + start_height=0, + timeout_height=40320, + threshold=0, + version='0.0.0' + ), + NOP_FEATURE_2=dict( + bit=1, + start_height=0, + timeout_height=40320, + threshold=0, + version='0.0.0' + ) + ), + dict( + NOP_FEATURE_1=dict( + bit=0, + start_height=0, + timeout_height=40320, + threshold=0, + version='0.0.0' + ), + NOP_FEATURE_2=dict( + bit=0, + start_height=2 * 40320, + timeout_height=3 * 40320, + threshold=0, + version='0.0.0' + ) + ) + ] +) +def test_valid_settings(features): + data = dict(features=features) + Settings(**data) + + +@pytest.mark.parametrize( + 'features', + [ + dict( + NOP_FEATURE_1=dict( + bit=0, + start_height=0, + timeout_height=40320, + threshold=0, + version='0.0.0' + ), + NOP_FEATURE_2=dict( + bit=0, + start_height=0, + timeout_height=40320, + threshold=0, + version='0.0.0' + ) + ), + dict( + NOP_FEATURE_1=dict( + bit=0, + start_height=0, + timeout_height=40320, + threshold=0, + version='0.0.0' + ), + NOP_FEATURE_2=dict( + bit=0, + start_height=40320, + timeout_height=2 * 40320, + threshold=0, + version='0.0.0' + ) + ), + dict( + NOP_FEATURE_1=dict( + bit=1, + start_height=10 * 40320, + timeout_height=20 * 40320, + threshold=0, + version='0.0.0' + ), + NOP_FEATURE_2=dict( + bit=1, + start_height=15 * 40320, + timeout_height=16 * 40320, + threshold=0, + version='0.0.0' + ) + ) + ] +) +def test_conflicting_bits(features): + with pytest.raises(ValidationError) as e: + data = dict(features=features) + Settings(**data) + + errors = e.value.errors() + assert errors[0]['msg'] == 'At least one pair of Features have the same bit configured for an overlapping ' \ + 'interval: Feature.NOP_FEATURE_1 and Feature.NOP_FEATURE_2' + + +@pytest.mark.parametrize( + ['evaluation_interval', 'default_threshold', 'error'], + [ + (10, 50, 'default_threshold must not be greater than evaluation_interval: 50 > 10'), + (100, 101, 'default_threshold must not be greater than evaluation_interval: 101 > 100') + ] +) +def test_default_threshold(evaluation_interval, default_threshold, error): + with pytest.raises(ValidationError) as e: + data = dict(evaluation_interval=evaluation_interval, default_threshold=default_threshold) + Settings(**data) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + +@pytest.mark.parametrize( + ['intervals', 'expected'], + [ + ([], None), + ([FeatureInterval(0, 10, Feature.NOP_FEATURE_1)], None), + ([FeatureInterval(0, 10, Feature.NOP_FEATURE_1), FeatureInterval(11, 20, Feature.NOP_FEATURE_1)], None), + ( + [FeatureInterval(0, 10, Feature.NOP_FEATURE_1), FeatureInterval(10, 20, Feature.NOP_FEATURE_1)], + (FeatureInterval(0, 10, Feature.NOP_FEATURE_1), FeatureInterval(10, 20, Feature.NOP_FEATURE_1)) + ), + ( + [ + FeatureInterval(0, 10, Feature.NOP_FEATURE_1), + FeatureInterval(20, 30, Feature.NOP_FEATURE_1), + FeatureInterval(15, 25, Feature.NOP_FEATURE_1) + ], + (FeatureInterval(15, 25, Feature.NOP_FEATURE_1), FeatureInterval(20, 30, Feature.NOP_FEATURE_1)) + ) + ] +) +def test_find_overlap(intervals, expected): + assert expected == _find_overlap(intervals) diff --git a/tests/others/resources/invalid_hathor_settings_fixture.yml b/tests/others/fixtures/invalid_byte_hathor_settings_fixture.yml similarity index 100% rename from tests/others/resources/invalid_hathor_settings_fixture.yml rename to tests/others/fixtures/invalid_byte_hathor_settings_fixture.yml diff --git a/tests/others/fixtures/invalid_features_hathor_settings_fixture.yml b/tests/others/fixtures/invalid_features_hathor_settings_fixture.yml new file mode 100644 index 000000000..4835ae207 --- /dev/null +++ b/tests/others/fixtures/invalid_features_hathor_settings_fixture.yml @@ -0,0 +1,55 @@ +P2PKH_VERSION_BYTE: x28 +MULTISIG_VERSION_BYTE: '64' +NETWORK_NAME: testing +BOOTSTRAP_DNS: + - mainnet.hathor.network +ENABLE_PEER_WHITELIST: true +WHITELIST_URL: https://hathor-public-files.s3.amazonaws.com/whitelist_peer_ids + +GENESIS_OUTPUT_SCRIPT: 76a9147fd4ae0e4fb2d2854e76d359029d8078bb99649e88ac +GENESIS_TIMESTAMP: 1578075305 +GENESIS_BLOCK_NONCE: 2591358 +GENESIS_BLOCK_HASH: 000006cb93385b8b87a545a1cbb6197e6caff600c12cc12fc54250d39c8088fc +GENESIS_TX1_NONCE: 7715 +GENESIS_TX1_HASH: 0002d4d2a15def7604688e1878ab681142a7b155cbe52a6b4e031250ae96db0a +GENESIS_TX2_NONCE: 3769 +GENESIS_TX2_HASH: 0002ad8d1519daaddc8e1a37b14aac0b045129c01832281fb1c02d873c7abbf9 + +MIN_TX_WEIGHT_K: 0 +MIN_TX_WEIGHT_COEFFICIENT: 0 +MIN_TX_WEIGHT: 8 + +BLOCKS_PER_HALVING: 120 +MIN_BLOCK_WEIGHT: 2 +MIN_SHARE_WEIGHT: 2 +MAX_TX_WEIGHT_DIFF: 25.0 +BLOCK_DIFFICULTY_N_BLOCKS: 20 + +REWARD_SPEND_MIN_BLOCKS: 10 +SLOW_ASSERTS: true +ENABLE_EVENT_QUEUE_FEATURE: true +MAX_TX_WEIGHT_DIFF_ACTIVATION: 0.0 + +CHECKPOINTS: + 100_000: 0000000000001247073138556b4f60fff3ff6eec6521373ccee5a6526a7c10af + 200_000: 00000000000001bf13197340ae0807df2c16f4959da6054af822550d7b20e19e + + +SOFT_VOIDED_TX_IDS: + - 0000000012a922a6887497bed9c41e5ed7dc7213cae107db295602168266cd02 + - 000000001980b413ad5b5c5152338093aecfb1f5a7563d4e7fef8fb240a50bb9 + +FEATURE_ACTIVATION: + evaluation_interval: 1000 + default_threshold: 900 + features: + NOP_FEATURE_1: + bit: 0 + start_height: 0 + timeout_height: 1000 + version: 0.0.0 + NOP_FEATURE_2: + bit: 1 + start_height: 0 + timeout_height: 1001 + version: 0.0.0 diff --git a/tests/others/resources/missing_hathor_settings_fixture.yml b/tests/others/fixtures/missing_hathor_settings_fixture.yml similarity index 100% rename from tests/others/resources/missing_hathor_settings_fixture.yml rename to tests/others/fixtures/missing_hathor_settings_fixture.yml diff --git a/tests/others/resources/valid_hathor_settings_fixture.yml b/tests/others/fixtures/valid_hathor_settings_fixture.yml similarity index 100% rename from tests/others/resources/valid_hathor_settings_fixture.yml rename to tests/others/fixtures/valid_hathor_settings_fixture.yml diff --git a/tests/others/test_hathor_settings.py b/tests/others/test_hathor_settings.py index 47ff53e97..135effdee 100644 --- a/tests/others/test_hathor_settings.py +++ b/tests/others/test_hathor_settings.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from pathlib import Path import pytest @@ -23,37 +24,13 @@ from hathor.conf.testnet import SETTINGS as TESTNET_SETTINGS from hathor.conf.unittests import SETTINGS as UNITTESTS_SETTINGS -VALID_HATHOR_SETTINGS_FIXTURE_FILE = 'resources/valid_hathor_settings_fixture.yml' -INVALID_HATHOR_SETTINGS_FIXTURE_FILE = 'resources/invalid_hathor_settings_fixture.yml' -MISSING_HATHOR_SETTINGS_FIXTURE_FILE = 'resources/missing_hathor_settings_fixture.yml' - - -def test_valid_hathor_settings_from_yaml(hathor_settings): - parent_dir = Path(__file__).parent - settings_filepath = str(parent_dir / VALID_HATHOR_SETTINGS_FIXTURE_FILE) - - assert hathor_settings == HathorSettings.from_yaml(filepath=settings_filepath) - - -def test_invalid_hathor_settings_from_yaml(): - parent_dir = Path(__file__).parent - settings_filepath = str(parent_dir / INVALID_HATHOR_SETTINGS_FIXTURE_FILE) - - with pytest.raises(ValidationError): - HathorSettings.from_yaml(filepath=settings_filepath) - -def test_missing_hathor_settings_from_yaml(): +@pytest.mark.parametrize('filepath', ['fixtures/valid_hathor_settings_fixture.yml']) +def test_valid_hathor_settings_from_yaml(filepath): parent_dir = Path(__file__).parent - settings_filepath = str(parent_dir / MISSING_HATHOR_SETTINGS_FIXTURE_FILE) - - with pytest.raises(TypeError): - HathorSettings.from_yaml(filepath=settings_filepath) - + settings_filepath = str(parent_dir / filepath) -@pytest.fixture -def hathor_settings(): - return HathorSettings( + expected_hathor_settings = HathorSettings( P2PKH_VERSION_BYTE=b'\x28', MULTISIG_VERSION_BYTE=b'\x64', NETWORK_NAME='testing', @@ -90,6 +67,40 @@ def hathor_settings(): BLOCK_DIFFICULTY_N_BLOCKS=20, ) + assert expected_hathor_settings == HathorSettings.from_yaml(filepath=settings_filepath) + + +@pytest.mark.parametrize( + ['filepath', 'error'], + [ + ('fixtures/invalid_byte_hathor_settings_fixture.yml', "expected 'str' or 'bytes', got 64"), + ( + 'fixtures/invalid_features_hathor_settings_fixture.yml', + 'Should be a multiple of evaluation_interval: 1001 % 1000 != 0' + ) + ] +) +def test_invalid_hathor_settings_from_yaml(filepath, error): + parent_dir = Path(__file__).parent + settings_filepath = str(parent_dir / filepath) + + with pytest.raises(ValidationError) as e: + HathorSettings.from_yaml(filepath=settings_filepath) + + errors = e.value.errors() + assert errors[0]['msg'] == error + + +@pytest.mark.parametrize('filepath', ['fixtures/missing_hathor_settings_fixture.yml']) +def test_missing_hathor_settings_from_yaml(filepath): + parent_dir = Path(__file__).parent + settings_filepath = str(parent_dir / filepath) + + with pytest.raises(TypeError) as e: + HathorSettings.from_yaml(filepath=settings_filepath) + + assert "missing 1 required positional argument: 'NETWORK_NAME'" in str(e.value) + # TODO: Tests below are temporary while settings via python coexist with settings via yaml, just to make sure # the conversion was made correctly. After python settings are removed, this file can be removed too. From f822d8812b203a729a3c2bfae1241e1d9a6fa490 Mon Sep 17 00:00:00 2001 From: Marcelo Salhab Brogliato Date: Fri, 19 May 2023 17:44:41 -0500 Subject: [PATCH 12/24] refactor(p2p): Build ConnectionsManager in builder --- hathor/builder/builder.py | 63 ++++++++++++++++++++++------- hathor/builder/cli_builder.py | 28 +++++++++---- hathor/manager.py | 31 ++------------ hathor/p2p/factory.py | 23 ++++------- hathor/p2p/manager.py | 52 +++++++++++++++++------- hathor/p2p/protocol.py | 22 +++++----- hathor/p2p/sync_v1_1_factory.py | 11 ++++- hathor/p2p/sync_v1_factory.py | 11 ++++- hathor/simulator/fake_connection.py | 4 +- tests/others/test_init_manager.py | 3 +- tests/others/test_metrics.py | 3 +- tests/p2p/netfilter/test_factory.py | 7 ++-- tests/p2p/test_peer_id.py | 9 +++-- tests/p2p/test_sync.py | 4 +- tests/unittest.py | 8 +++- 15 files changed, 170 insertions(+), 109 deletions(-) diff --git a/hathor/builder/builder.py b/hathor/builder/builder.py index 137cbd2f8..c074ecf2f 100644 --- a/hathor/builder/builder.py +++ b/hathor/builder/builder.py @@ -108,8 +108,9 @@ def __init__(self) -> None: self._enable_tokens_index: bool = False self._enable_utxo_index: bool = False - self._enable_sync_v1: Optional[bool] = None - self._enable_sync_v2: Optional[bool] = None + self._enable_sync_v1: bool = False + self._enable_sync_v1_1: bool = True + self._enable_sync_v2: bool = False self._enable_stratum_server: Optional[bool] = None @@ -121,6 +122,9 @@ def build(self) -> BuildArtifacts: if self.artifacts is not None: raise ValueError('cannot call build twice') + if self._network is None: + raise TypeError('you must set a network') + settings = self._get_settings() reactor = self._get_reactor() pubsub = self._get_or_create_pubsub() @@ -130,6 +134,8 @@ def build(self) -> BuildArtifacts: soft_voided_tx_ids = self._get_soft_voided_tx_ids() consensus_algorithm = ConsensusAlgorithm(soft_voided_tx_ids, pubsub) + p2p_manager = self._get_p2p_manager() + wallet = self._get_or_create_wallet() event_manager = self._get_or_create_event_manager() tx_storage = self._get_or_create_tx_storage() @@ -147,16 +153,6 @@ def build(self) -> BuildArtifacts: kwargs: Dict[str, Any] = {} - if self._enable_sync_v1 is not None: - # XXX: the interface of the Builder was kept using v1 instead of v1_1 to minimize the changes needed - kwargs['enable_sync_v1_1'] = self._enable_sync_v1 - - if self._enable_sync_v2 is not None: - kwargs['enable_sync_v2'] = self._enable_sync_v2 - - if self._network is None: - raise TypeError('you must set a network') - if self._full_verification is not None: kwargs['full_verification'] = self._full_verification @@ -165,12 +161,13 @@ def build(self) -> BuildArtifacts: manager = HathorManager( reactor, + network=self._network, pubsub=pubsub, consensus_algorithm=consensus_algorithm, peer_id=peer_id, tx_storage=tx_storage, + p2p_manager=p2p_manager, event_manager=event_manager, - network=self._network, wallet=wallet, rng=self._rng, checkpoints=self._checkpoints, @@ -179,6 +176,8 @@ def build(self) -> BuildArtifacts: **kwargs ) + p2p_manager.set_manager(manager) + stratum_factory: Optional[StratumFactory] = None if self._enable_stratum_server: stratum_factory = self._create_stratum_server(manager) @@ -189,7 +188,7 @@ def build(self) -> BuildArtifacts: rng=self._rng, reactor=reactor, manager=manager, - p2p_manager=manager.connections, + p2p_manager=p2p_manager, pubsub=pubsub, consensus=consensus_algorithm, tx_storage=tx_storage, @@ -279,6 +278,27 @@ def _get_or_create_rocksdb_storage(self) -> RocksDBStorage: return self._rocksdb_storage + def _get_p2p_manager(self) -> ConnectionsManager: + enable_ssl = True + reactor = self._get_reactor() + my_peer = self._get_peer_id() + + assert self._network is not None + + p2p_manager = ConnectionsManager( + reactor, + network=self._network, + my_peer=my_peer, + pubsub=self._get_or_create_pubsub(), + ssl=enable_ssl, + whitelist_only=False, + rng=self._rng, + enable_sync_v1=self._enable_sync_v1, + enable_sync_v1_1=self._enable_sync_v1_1, + enable_sync_v2=self._enable_sync_v2, + ) + return p2p_manager + def _get_or_create_tx_storage(self) -> TransactionStorage: if self._tx_storage is not None: return self._tx_storage @@ -435,6 +455,11 @@ def set_enable_sync_v1(self, enable_sync_v1: bool) -> 'Builder': self._enable_sync_v1 = enable_sync_v1 return self + def set_enable_sync_v1_1(self, enable_sync_v1_1: bool) -> 'Builder': + self.check_if_can_modify() + self._enable_sync_v1_1 = enable_sync_v1_1 + return self + def set_enable_sync_v2(self, enable_sync_v2: bool) -> 'Builder': self.check_if_can_modify() self._enable_sync_v2 = enable_sync_v2 @@ -450,6 +475,16 @@ def disable_sync_v1(self) -> 'Builder': self._enable_sync_v1 = False return self + def enable_sync_v1_1(self) -> 'Builder': + self.check_if_can_modify() + self._enable_sync_v1_1 = True + return self + + def disable_sync_v1_1(self) -> 'Builder': + self.check_if_can_modify() + self._enable_sync_v1_1 = False + return self + def enable_sync_v2(self) -> 'Builder': self.check_if_can_modify() self._enable_sync_v2 = True diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index d09f7200a..7d3a9e040 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -28,10 +28,12 @@ from hathor.exception import BuilderError from hathor.indexes import IndexesManager from hathor.manager import HathorManager +from hathor.p2p.manager import ConnectionsManager from hathor.p2p.peer_id import PeerId from hathor.p2p.utils import discover_hostname from hathor.pubsub import PubSubManager from hathor.stratum import StratumFactory +from hathor.util import Random from hathor.wallet import BaseWallet, HDWallet, Wallet logger = get_logger() @@ -176,26 +178,38 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa self.log.info('--x-enable-event-queue flag provided. ' 'The events detected by the full node will be stored and can be retrieved by clients') - self.manager = HathorManager( + p2p_manager = ConnectionsManager( reactor, + network=network, + my_peer=peer_id, pubsub=pubsub, - peer_id=peer_id, + ssl=True, + whitelist_only=False, + rng=Random(), + enable_sync_v1=enable_sync_v1, + enable_sync_v1_1=enable_sync_v1_1, + enable_sync_v2=enable_sync_v2, + ) + + self.manager = HathorManager( + reactor, network=network, hostname=hostname, + pubsub=pubsub, + consensus_algorithm=consensus_algorithm, + peer_id=peer_id, tx_storage=tx_storage, + p2p_manager=p2p_manager, event_manager=event_manager, wallet=self.wallet, - ssl=True, checkpoints=settings.CHECKPOINTS, - enable_sync_v1=enable_sync_v1, - enable_sync_v1_1=enable_sync_v1_1, - enable_sync_v2=enable_sync_v2, - consensus_algorithm=consensus_algorithm, environment_info=get_environment_info(args=str(args), peer_id=peer_id.id), full_verification=full_verification, enable_event_queue=args.x_enable_event_queue ) + p2p_manager.set_manager(self.manager) + if args.stratum: stratum_factory = StratumFactory(self.manager) self.manager.stratum_factory = stratum_factory diff --git a/hathor/manager.py b/hathor/manager.py index c5983f6ea..108a6001b 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -40,6 +40,7 @@ SpendingVoidedError, ) from hathor.mining import BlockTemplate, BlockTemplates +from hathor.p2p.manager import ConnectionsManager from hathor.p2p.peer_discovery import PeerDiscovery from hathor.p2p.peer_id import PeerId from hathor.p2p.protocol import HathorProtocol @@ -88,14 +89,11 @@ def __init__(self, consensus_algorithm: ConsensusAlgorithm, peer_id: PeerId, tx_storage: TransactionStorage, + p2p_manager: ConnectionsManager, event_manager: EventManager, network: str, hostname: Optional[str] = None, wallet: Optional[BaseWallet] = None, - ssl: bool = True, - enable_sync_v1: bool = False, - enable_sync_v1_1: bool = True, - enable_sync_v2: bool = False, capabilities: Optional[List[str]] = None, checkpoints: Optional[List[Checkpoint]] = None, rng: Optional[Random] = None, @@ -108,18 +106,10 @@ def __init__(self, :param network: Name of the network this node participates. Usually it is either testnet or mainnet. :type network: string - :param hostname: The hostname of this node. It is used to generate its entrypoints. - :type hostname: string - :param tx_storage: Required storage backend. :type tx_storage: :py:class:`hathor.transaction.storage.transaction_storage.TransactionStorage` """ from hathor.metrics import Metrics - from hathor.p2p.factory import HathorClientFactory, HathorServerFactory - from hathor.p2p.manager import ConnectionsManager - - if not (enable_sync_v1 or enable_sync_v1_1 or enable_sync_v2): - raise TypeError(f'{type(self).__name__}() at least one sync version is required') if event_manager.get_event_queue_state() is True and not enable_event_queue: raise InitializationError( @@ -127,9 +117,6 @@ def __init__(self, 'Either enable it, or use the reset-event-queue CLI command to remove all event-related data' ) - self._enable_sync_v1 = enable_sync_v1 - self._enable_sync_v2 = enable_sync_v2 - self._cmd_path: Optional[str] = None self.log = logger.new() @@ -177,23 +164,11 @@ def __init__(self, self._event_manager.save_event_queue_state(enable_event_queue) self._enable_event_queue = enable_event_queue - if enable_sync_v2: - assert self.tx_storage.indexes is not None - self.log.debug('enable sync-v2 indexes') - self.tx_storage.indexes.enable_deps_index() - self.tx_storage.indexes.enable_mempool_index() - self.consensus_algorithm = consensus_algorithm self.peer_discoveries: List[PeerDiscovery] = [] - self.ssl = ssl - self.server_factory = HathorServerFactory(self.network, self.my_peer, node=self, use_ssl=ssl) - self.client_factory = HathorClientFactory(self.network, self.my_peer, node=self, use_ssl=ssl) - self.connections = ConnectionsManager(self.reactor, self.my_peer, self.server_factory, self.client_factory, - self.pubsub, self, ssl, whitelist_only=False, rng=self.rng, - enable_sync_v1=enable_sync_v1, enable_sync_v2=enable_sync_v2, - enable_sync_v1_1=enable_sync_v1_1) + self.connections = p2p_manager self.metrics = Metrics( pubsub=self.pubsub, diff --git a/hathor/p2p/factory.py b/hathor/p2p/factory.py index 67f538390..aa55b0b2d 100644 --- a/hathor/p2p/factory.py +++ b/hathor/p2p/factory.py @@ -33,22 +33,20 @@ class HathorServerFactory(protocol.ServerFactory): """ manager: Optional[ConnectionsManager] - protocol: Optional[Type[MyServerProtocol]] = MyServerProtocol + protocol: Type[MyServerProtocol] = MyServerProtocol def __init__( self, network: str, my_peer: PeerId, - connections: Optional[ConnectionsManager] = None, + p2p_manager: ConnectionsManager, *, - node: 'HathorManager', use_ssl: bool, ): super().__init__() self.network = network self.my_peer = my_peer - self.connections = connections - self.node = node + self.p2p_manager = p2p_manager self.use_ssl = use_ssl def buildProtocol(self, addr: IAddress) -> MyServerProtocol: @@ -56,8 +54,7 @@ def buildProtocol(self, addr: IAddress) -> MyServerProtocol: p = self.protocol( network=self.network, my_peer=self.my_peer, - connections=self.connections, - node=self.node, + p2p_manager=self.p2p_manager, use_ssl=self.use_ssl, inbound=True, ) @@ -69,23 +66,20 @@ class HathorClientFactory(protocol.ClientFactory): """ HathorClientFactory is used to generate HathorProtocol objects when we connected to another peer. """ - manager: Optional[ConnectionsManager] - protocol: Optional[Type[MyClientProtocol]] = MyClientProtocol + protocol: Type[MyClientProtocol] = MyClientProtocol def __init__( self, network: str, my_peer: PeerId, - connections: Optional[ConnectionsManager] = None, + p2p_manager: ConnectionsManager, *, - node: 'HathorManager', use_ssl: bool, ): super().__init__() self.network = network self.my_peer = my_peer - self.connections = connections - self.node = node + self.p2p_manager = p2p_manager self.use_ssl = use_ssl def buildProtocol(self, addr: IAddress) -> MyClientProtocol: @@ -93,8 +87,7 @@ def buildProtocol(self, addr: IAddress) -> MyClientProtocol: p = self.protocol( network=self.network, my_peer=self.my_peer, - connections=self.connections, - node=self.node, + p2p_manager=self.p2p_manager, use_ssl=self.use_ssl, inbound=False, ) diff --git a/hathor/p2p/manager.py b/hathor/p2p/manager.py index 5ef2a7174..f09a83c90 100644 --- a/hathor/p2p/manager.py +++ b/hathor/p2p/manager.py @@ -40,7 +40,6 @@ from twisted.internet.interfaces import IDelayedCall from hathor.manager import HathorManager - from hathor.p2p.factory import HathorClientFactory, HathorServerFactory logger = get_logger() settings = HathorSettings() @@ -78,6 +77,7 @@ class ConnectionsManager: class GlobalRateLimiter: SEND_TIPS = 'NodeSyncTimestamp.send_tips' + manager: Optional['HathorManager'] connections: Set[HathorProtocol] connected_peers: Dict[str, HathorProtocol] connecting_peers: Dict[IStreamClientEndpoint, _ConnectingPeer] @@ -87,9 +87,16 @@ class GlobalRateLimiter: rate_limiter: RateLimiter - def __init__(self, reactor: Reactor, my_peer: PeerId, server_factory: 'HathorServerFactory', - client_factory: 'HathorClientFactory', pubsub: PubSubManager, manager: 'HathorManager', - ssl: bool, rng: Random, whitelist_only: bool, enable_sync_v1: bool, enable_sync_v2: bool, + def __init__(self, + reactor: Reactor, + network: str, + my_peer: PeerId, + pubsub: PubSubManager, + ssl: bool, + rng: Random, + whitelist_only: bool, + enable_sync_v1: bool, + enable_sync_v2: bool, enable_sync_v1_1: bool) -> None: from hathor.p2p.sync_v1_1_factory import SyncV11Factory from hathor.p2p.sync_v1_factory import SyncV1Factory @@ -99,21 +106,23 @@ def __init__(self, reactor: Reactor, my_peer: PeerId, server_factory: 'HathorSer self.log = logger.new() self.rng = rng - self.manager = manager + self.manager = None self.reactor = reactor self.my_peer = my_peer + self.network = network + # Options self.localhost_only = False # Factories. - self.server_factory = server_factory - self.server_factory.connections = self - - self.client_factory = client_factory - self.client_factory.connections = self + from hathor.p2p.factory import HathorClientFactory, HathorServerFactory + self.use_ssl = ssl + self.server_factory = HathorServerFactory(self.network, self.my_peer, p2p_manager=self, use_ssl=self.use_ssl) + self.client_factory = HathorClientFactory(self.network, self.my_peer, p2p_manager=self, use_ssl=self.use_ssl) + # Global maximum number of connections. self.max_connections: int = settings.PEER_MAX_CONNECTIONS # Global rate limiter for all connections. @@ -162,11 +171,13 @@ def __init__(self, reactor: Reactor, my_peer: PeerId, server_factory: 'HathorSer # Pubsub object to publish events self.pubsub = pubsub - self.ssl = ssl - # Parameter to explicitly enable whitelist-only mode, when False it will still check the whitelist for sync-v1 self.whitelist_only = whitelist_only + self.enable_sync_v1 = enable_sync_v1 + self.enable_sync_v1_1 = enable_sync_v1_1 + self.enable_sync_v2 = enable_sync_v2 + # sync-manager factories self._sync_factories = {} if enable_sync_v1: @@ -176,6 +187,16 @@ def __init__(self, reactor: Reactor, my_peer: PeerId, server_factory: 'HathorSer if enable_sync_v2: self._sync_factories[SyncVersion.V2] = SyncV1Factory(self) + def set_manager(self, manager: 'HathorManager') -> None: + """Set the manager. This method must be called before start().""" + self.manager = manager + if self.enable_sync_v2: + assert self.manager.tx_storage.indexes is not None + indexes = self.manager.tx_storage.indexes + self.log.debug('enable sync-v2 indexes') + indexes.enable_deps_index() + indexes.enable_mempool_index() + def disable_rate_limiter(self) -> None: """Disable global rate limiter.""" self.rate_limiter.unset_limit(self.GlobalRateLimiter.SEND_TIPS) @@ -228,6 +249,7 @@ def _get_peers_count(self) -> PeerConnectionsMetrics: def get_sync_versions(self) -> Set[SyncVersion]: """Set of versions that were enabled and are supported.""" + assert self.manager is not None if self.manager.has_sync_version_capability(): return set(self._sync_factories.keys()) else: @@ -410,6 +432,7 @@ def reconnect_to_all(self) -> None: TODO(epnichols): Should we always connect to *all*? Should there be a max #? """ # when we have no connected peers left, run the discovery process again + assert self.manager is not None if len(self.connected_peers) < 1: self.manager.do_discovery() now = int(self.reactor.seconds()) @@ -458,6 +481,7 @@ def _update_whitelist_err(self, *args: Any, **kwargs: Any) -> None: self.log.error('update whitelist failed', args=args, kwargs=kwargs) def _update_whitelist_cb(self, body: Optional[bytes]) -> None: + assert self.manager is not None if body is None: self.log.warn('update whitelist got no response') return @@ -521,7 +545,7 @@ def connect_to(self, description: str, peer: Optional[PeerId] = None, use_ssl: O return if use_ssl is None: - use_ssl = self.ssl + use_ssl = self.use_ssl connection_string, peer_id = description_to_connection_string(description) # When using twisted endpoints we can't have // in the connection string endpoint_url = connection_string.replace('//', '') @@ -564,7 +588,7 @@ def listen(self, description: str, use_ssl: Optional[bool] = None) -> IStreamSer endpoint = endpoints.serverFromString(self.reactor, description) if use_ssl is None: - use_ssl = self.ssl + use_ssl = self.use_ssl factory: IProtocolFactory if use_ssl: diff --git a/hathor/p2p/protocol.py b/hathor/p2p/protocol.py index a4e09c244..2f2a17482 100644 --- a/hathor/p2p/protocol.py +++ b/hathor/p2p/protocol.py @@ -74,7 +74,7 @@ class WarningFlags(str, Enum): network: str my_peer: PeerId - connections: Optional['ConnectionsManager'] + connections: 'ConnectionsManager' node: 'HathorManager' app_version: str last_message: float @@ -91,19 +91,17 @@ class WarningFlags(str, Enum): idle_timeout: int sync_version: Optional[SyncVersion] # version chosen to be used on this connection - def __init__(self, network: str, my_peer: PeerId, connections: Optional['ConnectionsManager'] = None, *, - node: 'HathorManager', use_ssl: bool, inbound: bool) -> None: + def __init__(self, network: str, my_peer: PeerId, p2p_manager: 'ConnectionsManager', + *, use_ssl: bool, inbound: bool) -> None: self.network = network self.my_peer = my_peer - self.connections = connections - self.node = node + self.connections = p2p_manager - if self.connections is not None: - assert self.connections.reactor is not None - self.reactor = self.connections.reactor - else: - from hathor.util import reactor - self.reactor = reactor + assert p2p_manager.manager is not None + self.node = p2p_manager.manager + + assert self.connections.reactor is not None + self.reactor = self.connections.reactor # Indicate whether it is an inbound connection (true) or an outbound connection (false). self.inbound = inbound @@ -292,7 +290,7 @@ def recv_message(self, cmd: ProtocolMessages, payload: str) -> Optional[Deferred """ assert self.state is not None - self.last_message = self.node.reactor.seconds() + self.last_message = self.reactor.seconds() self.reset_idle_timeout() if not self.ratelimit.add_hit(self.RateLimitKeys.GLOBAL): diff --git a/hathor/p2p/sync_v1_1_factory.py b/hathor/p2p/sync_v1_1_factory.py index 99af53ff6..42d79bacf 100644 --- a/hathor/p2p/sync_v1_1_factory.py +++ b/hathor/p2p/sync_v1_1_factory.py @@ -27,7 +27,14 @@ class SyncV11Factory(SyncManagerFactory): def __init__(self, connections: ConnectionsManager): - self.downloader = Downloader(connections.manager) + self.connections = connections + self._downloader: Optional[Downloader] = None + + def get_downloader(self) -> Downloader: + if self._downloader is None: + assert self.connections.manager is not None + self._downloader = Downloader(self.connections.manager) + return self._downloader def create_sync_manager(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncManager: - return NodeSyncTimestamp(protocol, downloader=self.downloader, reactor=reactor) + return NodeSyncTimestamp(protocol, downloader=self.get_downloader(), reactor=reactor) diff --git a/hathor/p2p/sync_v1_factory.py b/hathor/p2p/sync_v1_factory.py index e332b7f3c..0811beab0 100644 --- a/hathor/p2p/sync_v1_factory.py +++ b/hathor/p2p/sync_v1_factory.py @@ -27,7 +27,14 @@ class SyncV1Factory(SyncManagerFactory): def __init__(self, connections: ConnectionsManager): - self.downloader = Downloader(connections.manager) + self.connections = connections + self._downloader: Optional[Downloader] = None + + def get_downloader(self) -> Downloader: + if self._downloader is None: + assert self.connections.manager is not None + self._downloader = Downloader(self.connections.manager) + return self._downloader def create_sync_manager(self, protocol: 'HathorProtocol', reactor: Optional[Reactor] = None) -> SyncManager: - return NodeSyncTimestamp(protocol, downloader=self.downloader, reactor=reactor) + return NodeSyncTimestamp(protocol, downloader=self.get_downloader(), reactor=reactor) diff --git a/hathor/simulator/fake_connection.py b/hathor/simulator/fake_connection.py index 5f3877e97..a292d8510 100644 --- a/hathor/simulator/fake_connection.py +++ b/hathor/simulator/fake_connection.py @@ -53,8 +53,8 @@ def __init__(self, manager1: 'HathorManager', manager2: 'HathorManager', *, late self.latency = latency self.is_connected = True - self._proto1 = manager1.server_factory.buildProtocol(HostnameAddress(b'fake', 0)) - self._proto2 = manager2.client_factory.buildProtocol(HostnameAddress(b'fake', 0)) + self._proto1 = manager1.connections.server_factory.buildProtocol(HostnameAddress(b'fake', 0)) + self._proto2 = manager2.connections.client_factory.buildProtocol(HostnameAddress(b'fake', 0)) self.tr1 = HathorStringTransport(self._proto2.my_peer) self.tr2 = HathorStringTransport(self._proto1.my_peer) diff --git a/tests/others/test_init_manager.py b/tests/others/test_init_manager.py index f6618b24e..2fe481543 100644 --- a/tests/others/test_init_manager.py +++ b/tests/others/test_init_manager.py @@ -50,11 +50,12 @@ def test_invalid_arguments(self): manager = artifacts.manager del manager - # disabling both sync versions should be invalid + # disabling all sync versions should be invalid with self.assertRaises(TypeError): builder = TestBuilder() builder.set_tx_storage(self.tx_storage) builder.disable_sync_v1() + builder.disable_sync_v1_1() builder.disable_sync_v2() builder.build() diff --git a/tests/others/test_metrics.py b/tests/others/test_metrics.py index 58cada2f1..b1e5d8a71 100644 --- a/tests/others/test_metrics.py +++ b/tests/others/test_metrics.py @@ -210,8 +210,7 @@ def build_hathor_protocol(): protocol = HathorProtocol( network="testnet", my_peer=my_peer, - connections=manager.connections, - node=manager, + p2p_manager=manager.connections, use_ssl=False, inbound=False ) diff --git a/tests/p2p/netfilter/test_factory.py b/tests/p2p/netfilter/test_factory.py index 76f28ada1..2dc4d5cde 100644 --- a/tests/p2p/netfilter/test_factory.py +++ b/tests/p2p/netfilter/test_factory.py @@ -1,13 +1,12 @@ from twisted.internet.address import IPv4Address -from hathor.p2p.factory import HathorServerFactory from hathor.p2p.netfilter import get_table from hathor.p2p.netfilter.factory import NetfilterFactory from hathor.p2p.netfilter.matches import NetfilterMatchIPAddress from hathor.p2p.netfilter.rule import NetfilterRule from hathor.p2p.netfilter.targets import NetfilterReject -from hathor.p2p.peer_id import PeerId from tests import unittest +from tests.unittest import TestBuilder class NetfilterFactoryTest(unittest.TestCase): @@ -18,7 +17,9 @@ def test_factory(self): rule = NetfilterRule(match, NetfilterReject()) pre_conn.add_rule(rule) - wrapped_factory = HathorServerFactory('testnet', PeerId(), node=None, use_ssl=False) + builder = TestBuilder() + artifacts = builder.build() + wrapped_factory = artifacts.p2p_manager.server_factory factory = NetfilterFactory(connections=None, wrappedFactory=wrapped_factory) ret = factory.buildProtocol(IPv4Address('TCP', '192.168.0.1', 1234)) diff --git a/tests/p2p/test_peer_id.py b/tests/p2p/test_peer_id.py index 344c41fe6..8cb20dca8 100644 --- a/tests/p2p/test_peer_id.py +++ b/tests/p2p/test_peer_id.py @@ -7,8 +7,8 @@ from hathor.conf import HathorSettings from hathor.p2p.peer_id import InvalidPeerIdException, PeerId from hathor.p2p.peer_storage import PeerStorage -from hathor.p2p.protocol import HathorProtocol from tests import unittest +from tests.unittest import TestBuilder settings = HathorSettings() @@ -150,8 +150,11 @@ def test_retry_connection(self): self.assertEqual(p.retry_timestamp, 0) def test_validate_certificate(self): + builder = TestBuilder() + artifacts = builder.build() + protocol = artifacts.p2p_manager.server_factory.buildProtocol('127.0.0.1') + peer = PeerId('testnet') - protocol = HathorProtocol('testnet', peer, None, node=None, use_ssl=True, inbound=True) class FakeTransport: def getPeerCertificate(self): @@ -216,7 +219,7 @@ def test_validate_entrypoint(self): peer_id.entrypoints = ['tcp://127.0.0.1:40403'] # we consider that we are starting the connection to the peer - protocol = HathorProtocol('testnet', peer_id, None, node=manager, use_ssl=True, inbound=False) + protocol = manager.connections.client_factory.buildProtocol('127.0.0.1') protocol.connection_string = 'tcp://127.0.0.1:40403' result = yield peer_id.validate_entrypoint(protocol) self.assertTrue(result) diff --git a/tests/p2p/test_sync.py b/tests/p2p/test_sync.py index c2c2724a5..8950c4601 100644 --- a/tests/p2p/test_sync.py +++ b/tests/p2p/test_sync.py @@ -268,7 +268,7 @@ def test_downloader(self): self.assertTrue(isinstance(conn.proto1.state, PeerIdState)) self.assertTrue(isinstance(conn.proto2.state, PeerIdState)) - downloader = conn.proto2.connections._sync_factories[SyncVersion.V1_1].downloader + downloader = conn.proto2.connections._sync_factories[SyncVersion.V1_1].get_downloader() node_sync1 = NodeSyncTimestamp(conn.proto1, downloader, reactor=conn.proto1.node.reactor) node_sync1.start() @@ -361,7 +361,7 @@ def _downloader_bug_setup(self): # create the peer that will experience the bug self.manager_bug = self.create_peer(self.network) - self.downloader = self.manager_bug.connections._sync_factories[SyncVersion.V1_1].downloader + self.downloader = self.manager_bug.connections._sync_factories[SyncVersion.V1_1].get_downloader() self.downloader.window_size = 1 self.conn1 = FakeConnection(self.manager_bug, self.manager1) self.conn2 = FakeConnection(self.manager_bug, self.manager2) diff --git a/tests/unittest.py b/tests/unittest.py index 65d8f2bd9..476e8bff7 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -71,6 +71,8 @@ class SyncBridgeParams: class TestBuilder(Builder): + __test__ = False + def __init__(self) -> None: super().__init__() self.set_network('testnet') @@ -208,9 +210,11 @@ def create_peer(self, network, peer_id=None, wallet=None, tx_storage=None, unloc builder.force_memory_index() if enable_sync_v1 is True: - builder.enable_sync_v1() + # Enable Sync v1.1 (instead of v1.0) + builder.enable_sync_v1_1() elif enable_sync_v1 is False: - builder.disable_sync_v1() + # Disable Sync v1.1 (instead of v1.0) + builder.disable_sync_v1_1() if enable_sync_v2 is True: builder.enable_sync_v2() From 14ca91ecbe6f960f9ddc13fdb959a453389c8fe3 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Fri, 19 May 2023 01:19:33 -0300 Subject: [PATCH 13/24] chore: implement dict and named_tuple utils --- hathor/utils/dict.py | 29 ++++++++++++++ hathor/utils/named_tuple.py | 52 +++++++++++++++++++++++++ tests/utils/__init__.py | 0 tests/utils/test_named_tuple.py | 69 +++++++++++++++++++++++++++++++++ 4 files changed, 150 insertions(+) create mode 100644 hathor/utils/dict.py create mode 100644 hathor/utils/named_tuple.py create mode 100644 tests/utils/__init__.py create mode 100644 tests/utils/test_named_tuple.py diff --git a/hathor/utils/dict.py b/hathor/utils/dict.py new file mode 100644 index 000000000..bf11b0062 --- /dev/null +++ b/hathor/utils/dict.py @@ -0,0 +1,29 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +def deep_merge(first: dict, second: dict) -> None: + """ + Recursively merges two dicts, altering the first one in place. + + >>> dict1 = dict(a=1, b=dict(c=2, d=3), e=dict(f=4)) + >>> dict2 = dict(b=dict(d=5, e=6), e=7) + >>> deep_merge(dict1, dict2) + >>> dict1 == dict(a=1, b=dict(c=2, d=5, e=6), e=7) + True + """ + for key in second: + if key in first and isinstance(first[key], dict) and isinstance(second[key], dict): + deep_merge(first[key], second[key]) + else: + first[key] = second[key] diff --git a/hathor/utils/named_tuple.py b/hathor/utils/named_tuple.py new file mode 100644 index 000000000..b18c768c1 --- /dev/null +++ b/hathor/utils/named_tuple.py @@ -0,0 +1,52 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, NamedTuple, Optional, Type, TypeVar + +import pydantic + +from hathor.utils.pydantic import BaseModel + +T = TypeVar('T', bound=NamedTuple) + + +def validated_named_tuple_from_dict( + named_tuple_type: Type[T], + attributes_dict: dict[str, Any], + *, + validators: Optional[dict[str, classmethod]] = None +) -> T: + """ + Takes an attributes dict and returns a validated instance of the specified NamedTuple subclass. + Performs validation using pydantic. + + Args: + named_tuple_type: the NamedTuple subclass to create an instance from + attributes_dict: a dict with all required attributes for the NamedTuple subclass + validators: custom pydantic validators (read https://docs.pydantic.dev/latest/usage/validators) + + Returns: a validated instance of the specified NamedTuple subclass + """ + model = pydantic.create_model_from_namedtuple( + named_tuple_type, + __base__=BaseModel, + __validators__=validators + ) + + # This intermediate step shouldn't be necessary, but for some reason pydantic.create_model_from_namedtuple + # doesn't support default attribute values, so we do this to add them + all_attributes = named_tuple_type(**attributes_dict) + validated_attributes = model(**all_attributes._asdict()) + + return named_tuple_type(**validated_attributes.dict()) diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/utils/test_named_tuple.py b/tests/utils/test_named_tuple.py new file mode 100644 index 000000000..4b51b2300 --- /dev/null +++ b/tests/utils/test_named_tuple.py @@ -0,0 +1,69 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import NamedTuple + +import pydantic +import pytest +from pydantic import ValidationError + +from hathor.utils.named_tuple import validated_named_tuple_from_dict + + +class TestTuple(NamedTuple): + a: int + b: int + c: str + + @classmethod + def validate_b(cls, b: int) -> int: + if b > 10: + raise ValueError('b cannot be greater than 10') + + return b + + +VALIDATORS = dict( + validate_b=pydantic.validator('b')(TestTuple.validate_b) +) + + +@pytest.mark.parametrize( + ['attributes', 'expected'], + [ + (dict(a=1, b=0, c='a'), TestTuple(1, 0, 'a')), + (dict(a=123, b=5, c='aa'), TestTuple(123, 5, 'aa')), + (dict(a=1010, b=10, c='aaa'), TestTuple(1010, 10, 'aaa')), + ] +) +def test_validated_named_tuple_from_dict(attributes, expected): + result = validated_named_tuple_from_dict(TestTuple, attributes, validators=VALIDATORS) + + assert result == expected + + +@pytest.mark.parametrize( + 'attributes', + [ + dict(a=1, b=11, c='a'), + dict(a=123, b=50, c='aa'), + dict(a=1010, b=100, c='aaa'), + ] +) +def test_validated_named_tuple_from_dict_error(attributes): + with pytest.raises(ValidationError) as e: + validated_named_tuple_from_dict(TestTuple, attributes, validators=VALIDATORS) + + errors = e.value.errors() + assert errors[0]['msg'] == 'b cannot be greater than 10' From ff76a0835ad5fbcc15b60ac61fbfb5988c2f17f2 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Fri, 19 May 2023 01:40:14 -0300 Subject: [PATCH 14/24] chore: implement dict_from_extended_yaml and improve dict_from_yaml --- hathor/utils/yaml.py | 53 +++++++++- tests/utils/fixtures/empty.yml | 0 tests/utils/fixtures/empty_extends.yml | 6 ++ tests/utils/fixtures/invalid_extends.yml | 6 ++ tests/utils/fixtures/number.yml | 1 + tests/utils/fixtures/self_extends.yml | 6 ++ tests/utils/fixtures/valid.yml | 4 + tests/utils/fixtures/valid_extends.yml | 6 ++ tests/utils/test_yaml.py | 127 +++++++++++++++++++++++ 9 files changed, 205 insertions(+), 4 deletions(-) create mode 100644 tests/utils/fixtures/empty.yml create mode 100644 tests/utils/fixtures/empty_extends.yml create mode 100644 tests/utils/fixtures/invalid_extends.yml create mode 100644 tests/utils/fixtures/number.yml create mode 100644 tests/utils/fixtures/self_extends.yml create mode 100644 tests/utils/fixtures/valid.yml create mode 100644 tests/utils/fixtures/valid_extends.yml create mode 100644 tests/utils/test_yaml.py diff --git a/hathor/utils/yaml.py b/hathor/utils/yaml.py index ffaef57a1..6b590f2f4 100644 --- a/hathor/utils/yaml.py +++ b/hathor/utils/yaml.py @@ -11,13 +11,58 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from typing import Any, Dict +import os +from pathlib import Path +from typing import Any, Union import yaml +from hathor.utils.dict import deep_merge + +_EXTENDS_KEY = 'extends' + -def dict_from(*, filepath: str) -> Dict[str, Any]: +def dict_from_yaml(*, filepath: Union[Path, str]) -> dict[str, Any]: """Takes a filepath to a yaml file and returns a dictionary with its contents.""" + if not os.path.isfile(filepath): + raise ValueError(f"'{filepath}' is not a file") + with open(filepath, 'r') as file: - return yaml.safe_load(file) + contents = yaml.safe_load(file) + + if contents is None: + return {} + + if not isinstance(contents, dict): + raise ValueError(f"'{filepath}' cannot be parsed as a dictionary") + + return contents + + +def dict_from_extended_yaml(*, filepath: Union[Path, str]) -> dict[str, Any]: + """ + Takes a filepath to a yaml file and returns a dictionary with its contents. + Supports extending another yaml file via the 'extends' key in the file. + + Note: the 'extends' key is reserved and will not be present in the returned dictionary. + To opt-out of the extension feature, use dict_from_yaml(). + """ + extension_dict = dict_from_yaml(filepath=filepath) + base_file = extension_dict.pop(_EXTENDS_KEY, None) + + if not base_file: + return extension_dict + + root_path = Path(filepath).parent + base_filepath = root_path / str(base_file) + + if not os.path.isfile(base_filepath): + raise ValueError(f"'{base_filepath}' is not a file") + + assert base_filepath.resolve() != Path(filepath).resolve(), 'cannot extend self' + + base_dict = dict_from_yaml(filepath=base_filepath) + + deep_merge(base_dict, extension_dict) + + return base_dict diff --git a/tests/utils/fixtures/empty.yml b/tests/utils/fixtures/empty.yml new file mode 100644 index 000000000..e69de29bb diff --git a/tests/utils/fixtures/empty_extends.yml b/tests/utils/fixtures/empty_extends.yml new file mode 100644 index 000000000..48a29feec --- /dev/null +++ b/tests/utils/fixtures/empty_extends.yml @@ -0,0 +1,6 @@ +extends: + +a: aa +b: + d: dd + e: ee diff --git a/tests/utils/fixtures/invalid_extends.yml b/tests/utils/fixtures/invalid_extends.yml new file mode 100644 index 000000000..2046795e5 --- /dev/null +++ b/tests/utils/fixtures/invalid_extends.yml @@ -0,0 +1,6 @@ +extends: ./unknown_file.yml + +a: aa +b: + d: dd + e: ee diff --git a/tests/utils/fixtures/number.yml b/tests/utils/fixtures/number.yml new file mode 100644 index 000000000..190a18037 --- /dev/null +++ b/tests/utils/fixtures/number.yml @@ -0,0 +1 @@ +123 diff --git a/tests/utils/fixtures/self_extends.yml b/tests/utils/fixtures/self_extends.yml new file mode 100644 index 000000000..d6b41e538 --- /dev/null +++ b/tests/utils/fixtures/self_extends.yml @@ -0,0 +1,6 @@ +extends: self_extends.yml + +a: aa +b: + d: dd + e: ee diff --git a/tests/utils/fixtures/valid.yml b/tests/utils/fixtures/valid.yml new file mode 100644 index 000000000..c54a94253 --- /dev/null +++ b/tests/utils/fixtures/valid.yml @@ -0,0 +1,4 @@ +a: 1 +b: + c: 2 + d: 3 diff --git a/tests/utils/fixtures/valid_extends.yml b/tests/utils/fixtures/valid_extends.yml new file mode 100644 index 000000000..bd73de21e --- /dev/null +++ b/tests/utils/fixtures/valid_extends.yml @@ -0,0 +1,6 @@ +extends: valid.yml + +a: aa +b: + d: dd + e: ee diff --git a/tests/utils/test_yaml.py b/tests/utils/test_yaml.py new file mode 100644 index 000000000..1a2b697ff --- /dev/null +++ b/tests/utils/test_yaml.py @@ -0,0 +1,127 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from pathlib import Path + +import pytest + +from hathor.utils.yaml import dict_from_yaml, dict_from_extended_yaml + + +def test_dict_from_yaml_invalid_filepath(): + with pytest.raises(ValueError) as e: + dict_from_yaml(filepath='fake_file.yml') + + assert str(e.value) == "'fake_file.yml' is not a file" + + +def test_dict_from_yaml_empty(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/empty.yml' + + result = dict_from_yaml(filepath=filepath) + + assert result == {} + + +def test_dict_from_yaml_invalid_contents(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/number.yml' + + with pytest.raises(ValueError) as e: + dict_from_yaml(filepath=filepath) + + assert str(e.value) == f"'{filepath}' cannot be parsed as a dictionary" + + +def test_dict_from_yaml_valid(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/valid.yml' + + result = dict_from_yaml(filepath=filepath) + + assert result == dict(a=1, b=dict(c=2, d=3)) + + +def test_dict_from_extended_yaml_invalid_filepath(): + with pytest.raises(ValueError) as e: + dict_from_extended_yaml(filepath='fake_file.yml') + + assert str(e.value) == "'fake_file.yml' is not a file" + + +def test_dict_from_extended_yaml_empty(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/empty.yml' + + result = dict_from_extended_yaml(filepath=filepath) + + assert result == {} + + +def test_dict_from_extended_yaml_invalid_contents(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/number.yml' + + with pytest.raises(ValueError) as e: + dict_from_extended_yaml(filepath=filepath) + + assert str(e.value) == f"'{filepath}' cannot be parsed as a dictionary" + + +def test_dict_from_extended_yaml_valid(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/valid.yml' + + result = dict_from_extended_yaml(filepath=filepath) + + assert result == dict(a=1, b=dict(c=2, d=3)) + + +def test_dict_from_extended_yaml_empty_extends(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/empty_extends.yml' + + result = dict_from_extended_yaml(filepath=filepath) + + assert result == dict(a='aa', b=dict(d='dd', e='ee')) + + +def test_dict_from_extended_yaml_invalid_extends(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/invalid_extends.yml' + + with pytest.raises(ValueError) as e: + dict_from_extended_yaml(filepath=filepath) + + assert "/fixtures/unknown_file.yml' is not a file" in str(e.value) + + +def test_dict_from_extended_yaml_self_extends(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/self_extends.yml' + + with pytest.raises(AssertionError) as e: + dict_from_extended_yaml(filepath=filepath) + + assert str(e.value) == 'cannot extend self' + + +def test_dict_from_extended_yaml_valid_extends(): + parent_dir = Path(__file__).parent + filepath = parent_dir / 'fixtures/valid_extends.yml' + + result = dict_from_extended_yaml(filepath=filepath) + + assert result == dict(a='aa', b=dict(c=2, d='dd', e='ee')) + From 51b08d2fe01ff9900fb1b8752b89ed07f2c527c1 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Fri, 19 May 2023 01:43:34 -0300 Subject: [PATCH 15/24] chore: update HathorSettings to use new utils --- hathor/conf/settings.py | 82 ++++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index 574d0c968..2bd2ba645 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -14,14 +14,14 @@ import os from math import log -from typing import Any, Dict, List, NamedTuple, Optional, Union +from typing import NamedTuple, Optional, Union import pydantic from hathor.checkpoint import Checkpoint from hathor.feature_activation.settings import Settings as FeatureActivationSettings from hathor.utils import yaml -from hathor.utils.pydantic import BaseModel +from hathor.utils.named_tuple import validated_named_tuple_from_dict DECIMAL_PLACES = 2 @@ -40,7 +40,7 @@ class HathorSettings(NamedTuple): NETWORK_NAME: str # Initial bootstrap servers - BOOTSTRAP_DNS: List[str] = [] + BOOTSTRAP_DNS: list[str] = [] # enable peer whitelist ENABLE_PEER_WHITELIST: bool = False @@ -243,7 +243,7 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: TOKEN_DEPOSIT_PERCENTAGE: float = 0.01 # Array with the settings parameters that are used when calculating the settings hash - P2P_SETTINGS_HASH_FIELDS: List[str] = [ + P2P_SETTINGS_HASH_FIELDS: list[str] = [ 'P2PKH_VERSION_BYTE', 'MULTISIG_VERSION_BYTE', 'MIN_BLOCK_WEIGHT', @@ -359,13 +359,13 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: METRICS_COLLECT_ROCKSDB_DATA_INTERVAL: int = 86400 # 1 day # Block checkpoints - CHECKPOINTS: List[Checkpoint] = [] + CHECKPOINTS: list[Checkpoint] = [] # Used on testing to enable slow asserts that help catch bugs but we don't want to run in production SLOW_ASSERTS: bool = False # List of soft voided transaction. - SOFT_VOIDED_TX_IDS: List[bytes] = [] + SOFT_VOIDED_TX_IDS: list[bytes] = [] # Identifier used in metadata's voided_by to mark a tx as soft-voided. SOFT_VOIDED_ID: bytes = b'tx-non-grata' @@ -394,30 +394,24 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: @classmethod def from_yaml(cls, *, filepath: str) -> 'HathorSettings': """Takes a filepath to a yaml file and returns a validated HathorSettings instance.""" - settings_dict = yaml.dict_from(filepath=filepath) + settings_dict = yaml.dict_from_extended_yaml(filepath=filepath) - return HathorSettings.from_dict(settings_dict) - - @classmethod - def from_dict(cls, settings: Dict[str, Any]) -> 'HathorSettings': - """Takes a settings dict and returns a validated HathorSettings instance.""" - # This intermediate step shouldn't be necessary, but for some reason pydantic.create_model_from_namedtuple - # doesn't support default attribute values, so we do this to add them - all_settings = HathorSettings(**settings) - validated_settings = _ValidatedHathorSettings(**all_settings._asdict()) - - return HathorSettings(**validated_settings.dict()) + return validated_named_tuple_from_dict( + HathorSettings, + settings_dict, + validators=_VALIDATORS + ) -def _parse_checkpoints(checkpoints: Union[Dict[int, str], List[Checkpoint]]) -> List[Checkpoint]: +def _parse_checkpoints(checkpoints: Union[dict[int, str], list[Checkpoint]]) -> list[Checkpoint]: """Parse a dictionary of raw checkpoint data into a list of checkpoints.""" - if isinstance(checkpoints, Dict): + if isinstance(checkpoints, dict): return [ Checkpoint(height, bytes.fromhex(_hash)) for height, _hash in checkpoints.items() ] - if not isinstance(checkpoints, List): + if not isinstance(checkpoints, list): raise TypeError(f'expected \'Dict[int, str]\' or \'List[Checkpoint]\', got {checkpoints}') return checkpoints @@ -434,29 +428,25 @@ def _parse_hex_str(hex_str: Union[str, bytes]) -> bytes: return hex_str -_ValidatedHathorSettings = pydantic.create_model_from_namedtuple( - HathorSettings, - __base__=BaseModel, - __validators__=dict( - _parse_hex_str=pydantic.validator( - 'P2PKH_VERSION_BYTE', - 'MULTISIG_VERSION_BYTE', - 'GENESIS_OUTPUT_SCRIPT', - 'GENESIS_BLOCK_HASH', - 'GENESIS_TX1_HASH', - 'GENESIS_TX2_HASH', - pre=True, - allow_reuse=True - )(_parse_hex_str), - _parse_soft_voided_tx_id=pydantic.validator( - 'SOFT_VOIDED_TX_IDS', - pre=True, - allow_reuse=True, - each_item=True - )(_parse_hex_str), - _parse_checkpoints=pydantic.validator( - 'CHECKPOINTS', - pre=True - )(_parse_checkpoints) - ) +_VALIDATORS = dict( + _parse_hex_str=pydantic.validator( + 'P2PKH_VERSION_BYTE', + 'MULTISIG_VERSION_BYTE', + 'GENESIS_OUTPUT_SCRIPT', + 'GENESIS_BLOCK_HASH', + 'GENESIS_TX1_HASH', + 'GENESIS_TX2_HASH', + pre=True, + allow_reuse=True + )(_parse_hex_str), + _parse_soft_voided_tx_id=pydantic.validator( + 'SOFT_VOIDED_TX_IDS', + pre=True, + allow_reuse=True, + each_item=True + )(_parse_hex_str), + _parse_checkpoints=pydantic.validator( + 'CHECKPOINTS', + pre=True + )(_parse_checkpoints) ) From 3e88702d389ad394a8d051b33afa30fcff085c1f Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Fri, 19 May 2023 02:03:18 -0300 Subject: [PATCH 16/24] chore: lint code --- tests/{utils => utils_modules}/__init__.py | 0 tests/{utils => utils_modules}/fixtures/empty.yml | 0 tests/{utils => utils_modules}/fixtures/empty_extends.yml | 0 tests/{utils => utils_modules}/fixtures/invalid_extends.yml | 0 tests/{utils => utils_modules}/fixtures/number.yml | 0 tests/{utils => utils_modules}/fixtures/self_extends.yml | 0 tests/{utils => utils_modules}/fixtures/valid.yml | 0 tests/{utils => utils_modules}/fixtures/valid_extends.yml | 0 tests/{utils => utils_modules}/test_named_tuple.py | 0 tests/{utils => utils_modules}/test_yaml.py | 3 +-- 10 files changed, 1 insertion(+), 2 deletions(-) rename tests/{utils => utils_modules}/__init__.py (100%) rename tests/{utils => utils_modules}/fixtures/empty.yml (100%) rename tests/{utils => utils_modules}/fixtures/empty_extends.yml (100%) rename tests/{utils => utils_modules}/fixtures/invalid_extends.yml (100%) rename tests/{utils => utils_modules}/fixtures/number.yml (100%) rename tests/{utils => utils_modules}/fixtures/self_extends.yml (100%) rename tests/{utils => utils_modules}/fixtures/valid.yml (100%) rename tests/{utils => utils_modules}/fixtures/valid_extends.yml (100%) rename tests/{utils => utils_modules}/test_named_tuple.py (100%) rename tests/{utils => utils_modules}/test_yaml.py (98%) diff --git a/tests/utils/__init__.py b/tests/utils_modules/__init__.py similarity index 100% rename from tests/utils/__init__.py rename to tests/utils_modules/__init__.py diff --git a/tests/utils/fixtures/empty.yml b/tests/utils_modules/fixtures/empty.yml similarity index 100% rename from tests/utils/fixtures/empty.yml rename to tests/utils_modules/fixtures/empty.yml diff --git a/tests/utils/fixtures/empty_extends.yml b/tests/utils_modules/fixtures/empty_extends.yml similarity index 100% rename from tests/utils/fixtures/empty_extends.yml rename to tests/utils_modules/fixtures/empty_extends.yml diff --git a/tests/utils/fixtures/invalid_extends.yml b/tests/utils_modules/fixtures/invalid_extends.yml similarity index 100% rename from tests/utils/fixtures/invalid_extends.yml rename to tests/utils_modules/fixtures/invalid_extends.yml diff --git a/tests/utils/fixtures/number.yml b/tests/utils_modules/fixtures/number.yml similarity index 100% rename from tests/utils/fixtures/number.yml rename to tests/utils_modules/fixtures/number.yml diff --git a/tests/utils/fixtures/self_extends.yml b/tests/utils_modules/fixtures/self_extends.yml similarity index 100% rename from tests/utils/fixtures/self_extends.yml rename to tests/utils_modules/fixtures/self_extends.yml diff --git a/tests/utils/fixtures/valid.yml b/tests/utils_modules/fixtures/valid.yml similarity index 100% rename from tests/utils/fixtures/valid.yml rename to tests/utils_modules/fixtures/valid.yml diff --git a/tests/utils/fixtures/valid_extends.yml b/tests/utils_modules/fixtures/valid_extends.yml similarity index 100% rename from tests/utils/fixtures/valid_extends.yml rename to tests/utils_modules/fixtures/valid_extends.yml diff --git a/tests/utils/test_named_tuple.py b/tests/utils_modules/test_named_tuple.py similarity index 100% rename from tests/utils/test_named_tuple.py rename to tests/utils_modules/test_named_tuple.py diff --git a/tests/utils/test_yaml.py b/tests/utils_modules/test_yaml.py similarity index 98% rename from tests/utils/test_yaml.py rename to tests/utils_modules/test_yaml.py index 1a2b697ff..b352636b0 100644 --- a/tests/utils/test_yaml.py +++ b/tests/utils_modules/test_yaml.py @@ -15,7 +15,7 @@ import pytest -from hathor.utils.yaml import dict_from_yaml, dict_from_extended_yaml +from hathor.utils.yaml import dict_from_extended_yaml, dict_from_yaml def test_dict_from_yaml_invalid_filepath(): @@ -124,4 +124,3 @@ def test_dict_from_extended_yaml_valid_extends(): result = dict_from_extended_yaml(filepath=filepath) assert result == dict(a='aa', b=dict(c=2, d='dd', e='ee')) - From 06ad097a090c65f023c8bd4b553a3dc5812e4f3d Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Fri, 19 May 2023 11:51:03 -0300 Subject: [PATCH 17/24] chore: change deep_merge so it returns a new dict --- hathor/utils/dict.py | 34 +++++++++++++++++++++++++--------- hathor/utils/yaml.py | 5 ++--- 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/hathor/utils/dict.py b/hathor/utils/dict.py index bf11b0062..b3c988150 100644 --- a/hathor/utils/dict.py +++ b/hathor/utils/dict.py @@ -12,18 +12,34 @@ # See the License for the specific language governing permissions and # limitations under the License. -def deep_merge(first: dict, second: dict) -> None: +from copy import deepcopy + + +def deep_merge(first_dict: dict, second_dict: dict) -> dict: """ - Recursively merges two dicts, altering the first one in place. + Recursively merges two dicts, returning a new one with the merged values. Keeps both input dicts intact. + + Note: will raise RecursionError if there's a circular reference in both dicts. >>> dict1 = dict(a=1, b=dict(c=2, d=3), e=dict(f=4)) >>> dict2 = dict(b=dict(d=5, e=6), e=7) - >>> deep_merge(dict1, dict2) - >>> dict1 == dict(a=1, b=dict(c=2, d=5, e=6), e=7) + >>> result = deep_merge(dict1, dict2) + >>> result == dict(a=1, b=dict(c=2, d=5, e=6), e=7) + True + >>> dict1 == dict(a=1, b=dict(c=2, d=3), e=dict(f=4)) + True + >>> dict2 == dict(b=dict(d=5, e=6), e=7) True """ - for key in second: - if key in first and isinstance(first[key], dict) and isinstance(second[key], dict): - deep_merge(first[key], second[key]) - else: - first[key] = second[key] + merged = deepcopy(first_dict) + + def do_deep_merge(first: dict, second: dict) -> dict: + for key in second: + if key in first and isinstance(first[key], dict) and isinstance(second[key], dict): + do_deep_merge(first[key], second[key]) + else: + first[key] = second[key] + + return first + + return do_deep_merge(merged, second_dict) diff --git a/hathor/utils/yaml.py b/hathor/utils/yaml.py index 6b590f2f4..7b2e07e0f 100644 --- a/hathor/utils/yaml.py +++ b/hathor/utils/yaml.py @@ -62,7 +62,6 @@ def dict_from_extended_yaml(*, filepath: Union[Path, str]) -> dict[str, Any]: assert base_filepath.resolve() != Path(filepath).resolve(), 'cannot extend self' base_dict = dict_from_yaml(filepath=base_filepath) + merged_dict = deep_merge(base_dict, extension_dict) - deep_merge(base_dict, extension_dict) - - return base_dict + return merged_dict From 9c7516fced965b7460c03e3ef966b3983e9b3076 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Fri, 19 May 2023 16:13:53 -0300 Subject: [PATCH 18/24] chore: fix test on windows --- tests/utils_modules/test_yaml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils_modules/test_yaml.py b/tests/utils_modules/test_yaml.py index b352636b0..66c519250 100644 --- a/tests/utils_modules/test_yaml.py +++ b/tests/utils_modules/test_yaml.py @@ -104,7 +104,7 @@ def test_dict_from_extended_yaml_invalid_extends(): with pytest.raises(ValueError) as e: dict_from_extended_yaml(filepath=filepath) - assert "/fixtures/unknown_file.yml' is not a file" in str(e.value) + assert "unknown_file.yml' is not a file" in str(e.value) def test_dict_from_extended_yaml_self_extends(): From e842911d74459d5fe67e78d7789200fdfbdb1601 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Tue, 23 May 2023 16:52:05 -0300 Subject: [PATCH 19/24] chore: implement custom_root arg and extends recursion --- hathor/conf/settings.py | 3 +- hathor/utils/yaml.py | 31 ++++++---- .../fixtures/mainnet_extends.yml | 6 ++ tests/utils_modules/test_yaml.py | 59 ++++++++++--------- 4 files changed, 56 insertions(+), 43 deletions(-) create mode 100644 tests/utils_modules/fixtures/mainnet_extends.yml diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index 2bd2ba645..d1cc73494 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -14,6 +14,7 @@ import os from math import log +from pathlib import Path from typing import NamedTuple, Optional, Union import pydantic @@ -394,7 +395,7 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: @classmethod def from_yaml(cls, *, filepath: str) -> 'HathorSettings': """Takes a filepath to a yaml file and returns a validated HathorSettings instance.""" - settings_dict = yaml.dict_from_extended_yaml(filepath=filepath) + settings_dict = yaml.dict_from_extended_yaml(filepath=filepath, custom_root=Path(__file__).parent) return validated_named_tuple_from_dict( HathorSettings, diff --git a/hathor/utils/yaml.py b/hathor/utils/yaml.py index 7b2e07e0f..3c1794af8 100644 --- a/hathor/utils/yaml.py +++ b/hathor/utils/yaml.py @@ -11,9 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import os from pathlib import Path -from typing import Any, Union +from typing import Any, Optional, Union import yaml @@ -39,29 +40,33 @@ def dict_from_yaml(*, filepath: Union[Path, str]) -> dict[str, Any]: return contents -def dict_from_extended_yaml(*, filepath: Union[Path, str]) -> dict[str, Any]: +def dict_from_extended_yaml(*, filepath: Union[Path, str], custom_root: Optional[Path] = None) -> dict[str, Any]: """ Takes a filepath to a yaml file and returns a dictionary with its contents. - Supports extending another yaml file via the 'extends' key in the file. + + Supports extending another yaml file via the 'extends' key in the file. The 'extends' value can be an absolute path + to a yaml file, or a path relative to the base yaml file. The custom_root arg can be provided to set a custom root + for relative paths, taking lower precedence. Note: the 'extends' key is reserved and will not be present in the returned dictionary. To opt-out of the extension feature, use dict_from_yaml(). """ extension_dict = dict_from_yaml(filepath=filepath) - base_file = extension_dict.pop(_EXTENDS_KEY, None) + file_to_extend = extension_dict.pop(_EXTENDS_KEY, None) - if not base_file: + if not file_to_extend: return extension_dict - root_path = Path(filepath).parent - base_filepath = root_path / str(base_file) + filepath_to_extend = Path(filepath).parent / str(file_to_extend) - if not os.path.isfile(base_filepath): - raise ValueError(f"'{base_filepath}' is not a file") + if not os.path.isfile(filepath_to_extend) and custom_root: + filepath_to_extend = custom_root / str(file_to_extend) - assert base_filepath.resolve() != Path(filepath).resolve(), 'cannot extend self' + try: + dict_to_extend = dict_from_extended_yaml(filepath=filepath_to_extend, custom_root=custom_root) + except RecursionError as e: + raise ValueError('Cannot parse yaml with recursive extensions.') from e - base_dict = dict_from_yaml(filepath=base_filepath) - merged_dict = deep_merge(base_dict, extension_dict) + extended_dict = deep_merge(dict_to_extend, extension_dict) - return merged_dict + return extended_dict diff --git a/tests/utils_modules/fixtures/mainnet_extends.yml b/tests/utils_modules/fixtures/mainnet_extends.yml new file mode 100644 index 000000000..0b1dbbb63 --- /dev/null +++ b/tests/utils_modules/fixtures/mainnet_extends.yml @@ -0,0 +1,6 @@ +extends: mainnet.yml + +a: aa +b: + d: dd + e: ee diff --git a/tests/utils_modules/test_yaml.py b/tests/utils_modules/test_yaml.py index 66c519250..7ef925b1d 100644 --- a/tests/utils_modules/test_yaml.py +++ b/tests/utils_modules/test_yaml.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from pathlib import Path import pytest @@ -18,6 +19,12 @@ from hathor.utils.yaml import dict_from_extended_yaml, dict_from_yaml +def _get_absolute_filepath(filepath: str) -> Path: + parent_dir = Path(__file__).parent + + return parent_dir / filepath + + def test_dict_from_yaml_invalid_filepath(): with pytest.raises(ValueError) as e: dict_from_yaml(filepath='fake_file.yml') @@ -26,17 +33,14 @@ def test_dict_from_yaml_invalid_filepath(): def test_dict_from_yaml_empty(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/empty.yml' - + filepath = _get_absolute_filepath('fixtures/empty.yml') result = dict_from_yaml(filepath=filepath) assert result == {} def test_dict_from_yaml_invalid_contents(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/number.yml' + filepath = _get_absolute_filepath('fixtures/number.yml') with pytest.raises(ValueError) as e: dict_from_yaml(filepath=filepath) @@ -45,9 +49,7 @@ def test_dict_from_yaml_invalid_contents(): def test_dict_from_yaml_valid(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/valid.yml' - + filepath = _get_absolute_filepath('fixtures/valid.yml') result = dict_from_yaml(filepath=filepath) assert result == dict(a=1, b=dict(c=2, d=3)) @@ -61,17 +63,14 @@ def test_dict_from_extended_yaml_invalid_filepath(): def test_dict_from_extended_yaml_empty(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/empty.yml' - + filepath = _get_absolute_filepath('fixtures/empty.yml') result = dict_from_extended_yaml(filepath=filepath) assert result == {} def test_dict_from_extended_yaml_invalid_contents(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/number.yml' + filepath = _get_absolute_filepath('fixtures/number.yml') with pytest.raises(ValueError) as e: dict_from_extended_yaml(filepath=filepath) @@ -80,26 +79,21 @@ def test_dict_from_extended_yaml_invalid_contents(): def test_dict_from_extended_yaml_valid(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/valid.yml' - + filepath = _get_absolute_filepath('fixtures/valid.yml') result = dict_from_extended_yaml(filepath=filepath) assert result == dict(a=1, b=dict(c=2, d=3)) def test_dict_from_extended_yaml_empty_extends(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/empty_extends.yml' - + filepath = _get_absolute_filepath('fixtures/empty_extends.yml') result = dict_from_extended_yaml(filepath=filepath) assert result == dict(a='aa', b=dict(d='dd', e='ee')) def test_dict_from_extended_yaml_invalid_extends(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/invalid_extends.yml' + filepath = _get_absolute_filepath('fixtures/invalid_extends.yml') with pytest.raises(ValueError) as e: dict_from_extended_yaml(filepath=filepath) @@ -107,20 +101,27 @@ def test_dict_from_extended_yaml_invalid_extends(): assert "unknown_file.yml' is not a file" in str(e.value) -def test_dict_from_extended_yaml_self_extends(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/self_extends.yml' +def test_dict_from_extended_yaml_recursive_extends(): + filepath = _get_absolute_filepath('fixtures/self_extends.yml') - with pytest.raises(AssertionError) as e: + with pytest.raises(ValueError) as e: dict_from_extended_yaml(filepath=filepath) - assert str(e.value) == 'cannot extend self' + assert str(e.value) == 'Cannot parse yaml with recursive extensions.' def test_dict_from_extended_yaml_valid_extends(): - parent_dir = Path(__file__).parent - filepath = parent_dir / 'fixtures/valid_extends.yml' - + filepath = _get_absolute_filepath('fixtures/valid_extends.yml') result = dict_from_extended_yaml(filepath=filepath) assert result == dict(a='aa', b=dict(c=2, d='dd', e='ee')) + + +def test_dict_from_yaml_mainnet_extends(): + from hathor.conf import MAINNET_SETTINGS_FILEPATH + + filepath = _get_absolute_filepath('fixtures/mainnet_extends.yml') + mainnet_dict = dict_from_yaml(filepath=MAINNET_SETTINGS_FILEPATH) + result = dict_from_extended_yaml(filepath=filepath, custom_root=Path(MAINNET_SETTINGS_FILEPATH).parent) + + assert result == dict(**mainnet_dict, a='aa', b=dict(d='dd', e='ee')) From 688c29bf5f2cc9f2da2b73e5f561dfc511ff1ef4 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Tue, 23 May 2023 22:50:04 -0300 Subject: [PATCH 20/24] chore: fix bug in validated_named_tuple_from_dict --- hathor/utils/named_tuple.py | 3 +- tests/utils_modules/test_named_tuple.py | 54 +++++++++++++++++-------- 2 files changed, 39 insertions(+), 18 deletions(-) diff --git a/hathor/utils/named_tuple.py b/hathor/utils/named_tuple.py index b18c768c1..2745cf3d9 100644 --- a/hathor/utils/named_tuple.py +++ b/hathor/utils/named_tuple.py @@ -48,5 +48,6 @@ def validated_named_tuple_from_dict( # doesn't support default attribute values, so we do this to add them all_attributes = named_tuple_type(**attributes_dict) validated_attributes = model(**all_attributes._asdict()) + validated_attributes_dict = {k: v for k, v in validated_attributes} - return named_tuple_type(**validated_attributes.dict()) + return named_tuple_type(**validated_attributes_dict) diff --git a/tests/utils_modules/test_named_tuple.py b/tests/utils_modules/test_named_tuple.py index 4b51b2300..e56aaf8f4 100644 --- a/tests/utils_modules/test_named_tuple.py +++ b/tests/utils_modules/test_named_tuple.py @@ -19,51 +19,71 @@ from pydantic import ValidationError from hathor.utils.named_tuple import validated_named_tuple_from_dict +from hathor.utils.pydantic import BaseModel -class TestTuple(NamedTuple): +class InnerTuple(NamedTuple): + x: str + + +class InnerModel(BaseModel): + y: str + + +class OuterTuple(NamedTuple): a: int - b: int - c: str + b: InnerTuple + c: InnerModel @classmethod - def validate_b(cls, b: int) -> int: - if b > 10: - raise ValueError('b cannot be greater than 10') + def validate_a(cls, a: int) -> int: + if a > 10: + raise ValueError('"a" cannot be greater than 10') - return b + return a VALIDATORS = dict( - validate_b=pydantic.validator('b')(TestTuple.validate_b) + validate_a=pydantic.validator('a')(OuterTuple.validate_a) ) @pytest.mark.parametrize( ['attributes', 'expected'], [ - (dict(a=1, b=0, c='a'), TestTuple(1, 0, 'a')), - (dict(a=123, b=5, c='aa'), TestTuple(123, 5, 'aa')), - (dict(a=1010, b=10, c='aaa'), TestTuple(1010, 10, 'aaa')), + ( + dict(a=0, b=('b',), c=dict(y='c')), + OuterTuple(a=0, b=InnerTuple(x='b'), c=InnerModel(y='c')) + ), + ( + dict(a=5, b=('bb',), c=dict(y='cc')), + OuterTuple(a=5, b=InnerTuple(x='bb'), c=InnerModel(y='cc')) + ), + ( + dict(a=10, b=('bbb',), c=dict(y='ccc')), + OuterTuple(a=10, b=InnerTuple(x='bbb'), c=InnerModel(y='ccc')) + ), ] ) def test_validated_named_tuple_from_dict(attributes, expected): - result = validated_named_tuple_from_dict(TestTuple, attributes, validators=VALIDATORS) + result = validated_named_tuple_from_dict(OuterTuple, attributes, validators=VALIDATORS) + assert isinstance(result.b, InnerTuple) + assert isinstance(result.c, InnerModel) assert result == expected @pytest.mark.parametrize( 'attributes', [ - dict(a=1, b=11, c='a'), - dict(a=123, b=50, c='aa'), - dict(a=1010, b=100, c='aaa'), + dict(a=11, b=('b',), c=dict(y='c')), + dict(a=50, b=('bb',), c=dict(y='cc')), + dict(a=100, b=('bbb',), c=dict(y='ccc')), ] ) def test_validated_named_tuple_from_dict_error(attributes): with pytest.raises(ValidationError) as e: - validated_named_tuple_from_dict(TestTuple, attributes, validators=VALIDATORS) + validated_named_tuple_from_dict(OuterTuple, attributes, validators=VALIDATORS) errors = e.value.errors() - assert errors[0]['msg'] == 'b cannot be greater than 10' + assert errors[0]['msg'] == '"a" cannot be greater than 10' From 5005cb66f9e0a5e88713d88fa4fe12d6a6d440dc Mon Sep 17 00:00:00 2001 From: Jan Segre Date: Thu, 18 May 2023 22:32:44 +0200 Subject: [PATCH 21/24] feat: add safeguards for partially validated transactions [part 2/2] --- hathor/consensus/consensus.py | 11 +- hathor/manager.py | 9 +- hathor/transaction/base_transaction.py | 47 +++--- hathor/transaction/storage/cache_storage.py | 8 +- hathor/transaction/storage/exceptions.py | 4 + hathor/transaction/storage/memory_storage.py | 2 +- hathor/transaction/storage/rocksdb_storage.py | 7 +- .../storage/transaction_storage.py | 88 +++++++++- hathor/transaction/storage/tx_allow_scope.py | 65 ++++++++ hathor/transaction/transaction_metadata.py | 6 +- tests/others/test_init_manager.py | 4 +- tests/tx/test_tx_storage.py | 152 +++++++++++++++++- tests/tx/test_validation_states.py | 102 ++++++++++++ 13 files changed, 452 insertions(+), 53 deletions(-) create mode 100644 hathor/transaction/storage/tx_allow_scope.py create mode 100644 tests/tx/test_validation_states.py diff --git a/hathor/consensus/consensus.py b/hathor/consensus/consensus.py index fcb030201..3e419faca 100644 --- a/hathor/consensus/consensus.py +++ b/hathor/consensus/consensus.py @@ -70,6 +70,8 @@ def create_context(self) -> ConsensusAlgorithmContext: @cpu.profiler(key=lambda self, base: 'consensus!{}'.format(base.hash.hex())) def update(self, base: BaseTransaction) -> None: + assert base.storage is not None + assert base.storage.is_only_valid_allowed() try: self._unsafe_update(base) except Exception: @@ -107,11 +109,16 @@ def _unsafe_update(self, base: BaseTransaction) -> None: if new_best_height < best_height: self.log.warn('height decreased, re-checking mempool', prev_height=best_height, new_height=new_best_height, prev_block_tip=best_tip.hex(), new_block_tip=new_best_tip.hex()) - to_remove = storage.get_transactions_that_became_invalid() + # XXX: this method will mark as INVALID all transactions in the mempool that became invalid because of a + # reward lock + to_remove = storage.compute_transactions_that_became_invalid() if to_remove: self.log.warn('some transactions on the mempool became invalid and will be removed', count=len(to_remove)) - storage.remove_transactions(to_remove) + # XXX: because transactions in `to_remove` are marked as invalid, we need this context to be able to + # remove them + with storage.allow_invalid_context(): + storage.remove_transactions(to_remove) for tx_removed in to_remove: context.pubsub.publish(HathorEvents.CONSENSUS_TX_REMOVED, tx_hash=tx_removed.hash) diff --git a/hathor/manager.py b/hathor/manager.py index 108a6001b..a3065a55d 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -413,8 +413,9 @@ def _initialize_components(self) -> None: # self.start_profiler() if self._full_verification: self.log.debug('reset all metadata') - for tx in self.tx_storage.get_all_transactions(): - tx.reset_metadata() + with self.tx_storage.allow_partially_validated_context(): + for tx in self.tx_storage.get_all_transactions(): + tx.reset_metadata() self.log.debug('load blocks and transactions') for tx in self.tx_storage._topological_sort_dfs(): @@ -459,9 +460,11 @@ def _initialize_components(self) -> None: self.tx_storage.indexes.mempool_tips.update(tx) # XXX: move to indexes.update if self.tx_storage.indexes.deps is not None: self.sync_v2_step_validations([tx]) + self.tx_storage.save_transaction(tx, only_metadata=True) else: assert tx.validate_basic(skip_block_weight_verification=skip_block_weight_verification) - self.tx_storage.save_transaction(tx, only_metadata=True) + with self.tx_storage.allow_partially_validated_context(): + self.tx_storage.save_transaction(tx, only_metadata=True) else: # TODO: deal with invalid tx if not tx_meta.validation.is_final(): diff --git a/hathor/transaction/base_transaction.py b/hathor/transaction/base_transaction.py index c983ef739..8615b099f 100644 --- a/hathor/transaction/base_transaction.py +++ b/hathor/transaction/base_transaction.py @@ -42,7 +42,7 @@ TxValidationError, WeightError, ) -from hathor.transaction.transaction_metadata import TransactionMetadata +from hathor.transaction.transaction_metadata import TransactionMetadata, ValidationState from hathor.transaction.util import VerboseCallback, int_to_bytes, unpack, unpack_len from hathor.util import classproperty @@ -482,19 +482,25 @@ def can_validate_full(self) -> bool: return True return all_exist and all_valid + def set_validation(self, validation: ValidationState) -> None: + """ This method will set the internal validation state AND the appropriate voided_by marker. + + NOTE: THIS METHOD WILL NOT SAVE THE TRANSACTION + """ + meta = self.get_metadata() + meta.validation = validation + if validation.is_fully_connected(): + self._unmark_partially_validated() + else: + self._mark_partially_validated() + def validate_checkpoint(self, checkpoints: List[Checkpoint]) -> bool: """ Run checkpoint validations and update the validation state. If no exception is raised, the ValidationState will end up as `CHECKPOINT` and return `True`. """ - from hathor.transaction.transaction_metadata import ValidationState - - meta = self.get_metadata() - self.verify_checkpoint(checkpoints) - - meta.validation = ValidationState.CHECKPOINT - self.mark_partially_validated() + self.set_validation(ValidationState.CHECKPOINT) return True def validate_basic(self, skip_block_weight_verification: bool = False) -> bool: @@ -502,14 +508,8 @@ def validate_basic(self, skip_block_weight_verification: bool = False) -> bool: If no exception is raised, the ValidationState will end up as `BASIC` and return `True`. """ - from hathor.transaction.transaction_metadata import ValidationState - - meta = self.get_metadata() - self.verify_basic(skip_block_weight_verification=skip_block_weight_verification) - - meta.validation = ValidationState.BASIC - self.mark_partially_validated() + self.set_validation(ValidationState.BASIC) return True def validate_full(self, skip_block_weight_verification: bool = False, sync_checkpoints: bool = False, @@ -523,9 +523,7 @@ def validate_full(self, skip_block_weight_verification: bool = False, sync_check meta = self.get_metadata() # skip full validation when it is a checkpoint if meta.validation.is_checkpoint(): - meta.validation = ValidationState.CHECKPOINT_FULL - # at last, remove the partially validated mark - self.unmark_partially_validated() + self.set_validation(ValidationState.CHECKPOINT_FULL) return True # XXX: in some cases it might be possible that this transaction is verified by a checkpoint but we went @@ -536,16 +534,11 @@ def validate_full(self, skip_block_weight_verification: bool = False, sync_check self.verify_basic(skip_block_weight_verification=skip_block_weight_verification) self.verify(reject_locked_reward=reject_locked_reward) - if sync_checkpoints: - meta.validation = ValidationState.CHECKPOINT_FULL - else: - meta.validation = ValidationState.FULL - - # at last, remove the partially validated mark - self.unmark_partially_validated() + validation = ValidationState.CHECKPOINT_FULL if sync_checkpoints else ValidationState.FULL + self.set_validation(validation) return True - def mark_partially_validated(self) -> None: + def _mark_partially_validated(self) -> None: """ This function is used to add the partially-validated mark from the voided-by metadata. It is idempotent: calling it multiple time has the same effect as calling it once. But it must only be called @@ -555,7 +548,7 @@ def mark_partially_validated(self) -> None: assert not tx_meta.validation.is_fully_connected() tx_meta.add_voided_by(settings.PARTIALLY_VALIDATED_ID) - def unmark_partially_validated(self) -> None: + def _unmark_partially_validated(self) -> None: """ This function is used to remove the partially-validated mark from the voided-by metadata. It is idempotent: calling it multiple time has the same effect as calling it once. But it must only be called diff --git a/hathor/transaction/storage/cache_storage.py b/hathor/transaction/storage/cache_storage.py index f662b22f5..dfc6b0fec 100644 --- a/hathor/transaction/storage/cache_storage.py +++ b/hathor/transaction/storage/cache_storage.py @@ -13,7 +13,7 @@ # limitations under the License. from collections import OrderedDict -from typing import Any, Optional, Set +from typing import Any, Iterator, Optional, Set from twisted.internet import threads @@ -208,9 +208,11 @@ def _get_transaction(self, hash_bytes: bytes) -> BaseTransaction: assert tx is not None return tx - def get_all_transactions(self): + def _get_all_transactions(self) -> Iterator[BaseTransaction]: self._flush_to_storage(self.dirty_txs.copy()) - for tx in self.store.get_all_transactions(): + # XXX: explicitly use _get_all_transaction instead of get_all_transactions because there will already be a + # TransactionCacheStorage.get_all_transactions outer method + for tx in self.store._get_all_transactions(): tx.storage = self self._save_to_weakref(tx) yield tx diff --git a/hathor/transaction/storage/exceptions.py b/hathor/transaction/storage/exceptions.py index b587f426c..1f7ff5ae0 100644 --- a/hathor/transaction/storage/exceptions.py +++ b/hathor/transaction/storage/exceptions.py @@ -41,3 +41,7 @@ class PartialMigrationError(HathorError): class OutOfOrderMigrationError(HathorError): """A migration was run before another that was before it""" + + +class TransactionNotInAllowedScopeError(TransactionDoesNotExist): + """You are trying to get a transaction that is not allowed in the current scope, treated as non-existent""" diff --git a/hathor/transaction/storage/memory_storage.py b/hathor/transaction/storage/memory_storage.py index af07758f9..e4cd2cf7e 100644 --- a/hathor/transaction/storage/memory_storage.py +++ b/hathor/transaction/storage/memory_storage.py @@ -90,7 +90,7 @@ def _get_transaction(self, hash_bytes: bytes) -> BaseTransaction: else: raise TransactionDoesNotExist(hash_bytes.hex()) - def get_all_transactions(self, *, include_partial: bool = False) -> Iterator[BaseTransaction]: + def _get_all_transactions(self) -> Iterator[BaseTransaction]: for tx in self.transactions.values(): tx = self._clone(tx) if tx.hash in self.metadata: diff --git a/hathor/transaction/storage/rocksdb_storage.py b/hathor/transaction/storage/rocksdb_storage.py index 1a03df316..5daa51815 100644 --- a/hathor/transaction/storage/rocksdb_storage.py +++ b/hathor/transaction/storage/rocksdb_storage.py @@ -122,6 +122,7 @@ def _get_transaction(self, hash_bytes: bytes) -> 'BaseTransaction': if not tx: raise TransactionDoesNotExist(hash_bytes.hex()) + assert tx._metadata is not None assert tx.hash == hash_bytes self._save_to_weakref(tx) @@ -146,7 +147,7 @@ def _get_tx(self, hash_bytes: bytes, tx_data: bytes) -> 'BaseTransaction': self._save_to_weakref(tx) return tx - def get_all_transactions(self, *, include_partial: bool = False) -> Iterator['BaseTransaction']: + def _get_all_transactions(self) -> Iterator['BaseTransaction']: tx: Optional['BaseTransaction'] items = self._db.iteritems(self._cf_tx) @@ -163,10 +164,6 @@ def get_all_transactions(self, *, include_partial: bool = False) -> Iterator['Ba tx = self._get_tx(hash_bytes, tx_data) assert tx is not None - if not include_partial: - assert tx._metadata is not None - if not tx._metadata.validation.is_fully_connected(): - continue yield tx def is_empty(self) -> bool: diff --git a/hathor/transaction/storage/transaction_storage.py b/hathor/transaction/storage/transaction_storage.py index 476557102..14ae9b2a5 100644 --- a/hathor/transaction/storage/transaction_storage.py +++ b/hathor/transaction/storage/transaction_storage.py @@ -15,6 +15,7 @@ import hashlib from abc import ABC, abstractmethod, abstractproperty from collections import defaultdict, deque +from contextlib import AbstractContextManager from threading import Lock from typing import Any, Dict, Iterator, List, NamedTuple, Optional, Set, Tuple, Type, cast from weakref import WeakValueDictionary @@ -28,8 +29,13 @@ from hathor.pubsub import PubSubManager from hathor.transaction.base_transaction import BaseTransaction from hathor.transaction.block import Block -from hathor.transaction.storage.exceptions import TransactionDoesNotExist, TransactionIsNotABlock +from hathor.transaction.storage.exceptions import ( + TransactionDoesNotExist, + TransactionIsNotABlock, + TransactionNotInAllowedScopeError, +) from hathor.transaction.storage.migrations import BaseMigration, MigrationState, add_min_height_metadata +from hathor.transaction.storage.tx_allow_scope import TxAllowScope, tx_allow_context from hathor.transaction.transaction import Transaction from hathor.transaction.transaction_metadata import TransactionMetadata from hathor.util import not_none @@ -94,6 +100,9 @@ def __init__(self) -> None: # This is a global lock used to prevent concurrent access when getting the tx lock in the dict above self._weakref_lock: Lock = Lock() + # Flag to allow/disallow partially validated vertices. + self.allow_scope: TxAllowScope = TxAllowScope.VALID + # Cache for the best block tips # This cache is updated in the consensus algorithm. self._best_block_tips_cache: Optional[List[bytes]] = None @@ -330,6 +339,43 @@ def get_transaction_from_weakref(self, hash_bytes: bytes) -> Optional[BaseTransa return None return self._tx_weakref.get(hash_bytes, None) + # TODO: check if the method bellow is currently needed + def allow_only_valid_context(self) -> AbstractContextManager[None]: + """This method is used to temporarily reset the storage back to only allow valid transactions. + + The implementation will OVERRIDE the current scope to allowing only valid transactions on the observed + storage. + """ + return tx_allow_context(self, allow_scope=TxAllowScope.VALID) + + def allow_partially_validated_context(self) -> AbstractContextManager[None]: + """This method is used to temporarily make the storage allow partially validated transactions. + + The implementation will INCLUDE allowing partially valid transactions to the current allow scope. + """ + new_allow_scope = self.allow_scope | TxAllowScope.PARTIAL + return tx_allow_context(self, allow_scope=new_allow_scope) + + def allow_invalid_context(self) -> AbstractContextManager[None]: + """This method is used to temporarily make the storage allow invalid transactions. + + The implementation will INCLUDE allowing invalid transactions to the current allow scope. + """ + new_allow_scope = self.allow_scope | TxAllowScope.INVALID + return tx_allow_context(self, allow_scope=new_allow_scope) + + def is_only_valid_allowed(self) -> bool: + """Whether only valid transactions are allowed to be returned/accepted by the storage, the default state.""" + return self.allow_scope is TxAllowScope.VALID + + def is_partially_validated_allowed(self) -> bool: + """Whether partially validated transactions are allowed to be returned/accepted by the storage.""" + return TxAllowScope.PARTIAL in self.allow_scope + + def is_invalid_allowed(self) -> bool: + """Whether invalid transactions are allowed to be returned/accepted by the storage.""" + return TxAllowScope.INVALID in self.allow_scope + def _enable_weakref(self) -> None: """ Weakref should never be disabled unless you know exactly what you are doing. """ @@ -354,7 +400,7 @@ def save_transaction(self: 'TransactionStorage', tx: BaseTransaction, *, only_me self.pre_save_validation(tx, meta) def pre_save_validation(self, tx: BaseTransaction, tx_meta: TransactionMetadata) -> None: - """ Must be run before every save, only raises AssertionError. + """ Must be run before every save, will raise AssertionError or TransactionNotInAllowedScopeError A failure means there is a bug in the code that allowed the condition to reach the "save" code. This is a last second measure to prevent persisting a bad transaction/metadata. @@ -365,6 +411,20 @@ def pre_save_validation(self, tx: BaseTransaction, tx_meta: TransactionMetadata) assert tx.hash is not None assert tx_meta.hash is not None assert tx.hash == tx_meta.hash, f'{tx.hash.hex()} != {tx_meta.hash.hex()}' + self._validate_partial_marker_consistency(tx_meta) + self._validate_transaction_in_scope(tx) + + def post_get_validation(self, tx: BaseTransaction) -> None: + """ Must be run before every save, will raise AssertionError or TransactionNotInAllowedScopeError + + A failure means there is a bug in the code that allowed the condition to reach the "get" code. This is a last + second measure to prevent getting a transaction while using the wrong scope. + """ + tx_meta = tx.get_metadata() + self._validate_partial_marker_consistency(tx_meta) + self._validate_transaction_in_scope(tx) + + def _validate_partial_marker_consistency(self, tx_meta: TransactionMetadata) -> None: voided_by = tx_meta.get_frozen_voided_by() # XXX: PARTIALLY_VALIDATED_ID must be included if the tx is fully connected and must not be included otherwise has_partially_validated_marker = settings.PARTIALLY_VALIDATED_ID in voided_by @@ -372,6 +432,11 @@ def pre_save_validation(self, tx: BaseTransaction, tx_meta: TransactionMetadata) assert (not has_partially_validated_marker) == validation_is_fully_connected, \ 'Inconsistent ValidationState and voided_by' + def _validate_transaction_in_scope(self, tx: BaseTransaction) -> None: + if not self.allow_scope.is_allowed(tx): + tx_meta = tx.get_metadata() + raise TransactionNotInAllowedScopeError(tx.hash_hex, self.allow_scope.name, tx_meta.validation.name) + @abstractmethod def remove_transaction(self, tx: BaseTransaction) -> None: """Remove the tx. @@ -483,6 +548,7 @@ def get_transaction(self, hash_bytes: bytes) -> BaseTransaction: tx = self._get_transaction(hash_bytes) else: tx = self._get_transaction(hash_bytes) + self.post_get_validation(tx) return tx def get_metadata(self, hash_bytes: bytes) -> Optional[TransactionMetadata]: @@ -497,10 +563,16 @@ def get_metadata(self, hash_bytes: bytes) -> Optional[TransactionMetadata]: except TransactionDoesNotExist: return None + def get_all_transactions(self) -> Iterator[BaseTransaction]: + """Return all vertices (transactions and blocks) within the allowed scope. + """ + for tx in self._get_all_transactions(): + if self.allow_scope.is_allowed(tx): + yield tx + @abstractmethod - def get_all_transactions(self, *, include_partial: bool = False) -> Iterator[BaseTransaction]: - # TODO: verify the following claim: - """Return all transactions that are not blocks. + def _get_all_transactions(self) -> Iterator[BaseTransaction]: + """Internal implementation that iterates over all transactions/blocks. """ raise NotImplementedError @@ -950,14 +1022,14 @@ def iter_mempool_from_best_index(self) -> Iterator[Transaction]: else: yield from self.iter_mempool_from_tx_tips() - def get_transactions_that_became_invalid(self) -> List[BaseTransaction]: + def compute_transactions_that_became_invalid(self) -> List[BaseTransaction]: """ This method will look for transactions in the mempool that have became invalid due to the reward lock. """ from hathor.transaction.transaction_metadata import ValidationState to_remove: List[BaseTransaction] = [] for tx in self.iter_mempool_from_best_index(): if tx.is_spent_reward_locked(): - tx.get_metadata().validation = ValidationState.INVALID + tx.set_validation(ValidationState.INVALID) to_remove.append(tx) return to_remove @@ -1001,6 +1073,8 @@ def reset_indexes(self) -> None: """Reset all indexes. This function should not be called unless you know what you are doing.""" assert self.indexes is not None, 'Cannot reset indexes because they have not been enabled.' self.indexes.force_clear_all() + self.update_best_block_tips_cache(None) + self._all_tips_cache = None def remove_cache(self) -> None: """Remove all caches in case we don't need it.""" diff --git a/hathor/transaction/storage/tx_allow_scope.py b/hathor/transaction/storage/tx_allow_scope.py new file mode 100644 index 000000000..f490abd0d --- /dev/null +++ b/hathor/transaction/storage/tx_allow_scope.py @@ -0,0 +1,65 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from contextlib import contextmanager +from enum import Flag, auto +from typing import TYPE_CHECKING, Generator + +from hathor.conf import HathorSettings +from hathor.transaction.base_transaction import BaseTransaction + +if TYPE_CHECKING: + from hathor.transaction.storage import TransactionStorage # noqa: F401 + + +settings = HathorSettings() + + +class TxAllowScope(Flag): + """ This enum is used internally to mark which "type" of transactions to allow the database to read/write + + In this context "type" it means validation level, the supported "types" are enumerated in this class and for the + purpose of filtering it can be any combination of the supported types. + """ + VALID = auto() + PARTIAL = auto() + INVALID = auto() + + def is_allowed(self, tx: BaseTransaction) -> bool: + """True means it is allowed to be used in the storage (as argument or as return), False means not allowed.""" + tx_meta = tx.get_metadata() + # XXX: partial/invalid/fully_connected never overlap and cover all possible validation states + # see hathor.transaction.transaction_metadata.ValidationState for more details + validation = tx_meta.validation + if validation.is_partial() and TxAllowScope.PARTIAL not in self: + return False + if validation.is_invalid() and TxAllowScope.INVALID not in self: + return False + # XXX: not allowing valid transactions is really specific, should we allow it? + if validation.is_fully_connected() and TxAllowScope.VALID not in self: + return False + return True + + +@contextmanager +def tx_allow_context(tx_storage: 'TransactionStorage', *, allow_scope: TxAllowScope) -> Generator[None, None, None]: + """This is used to wrap the storage with a temporary allow-scope that is reverted when the context exits""" + from hathor.transaction.storage import TransactionStorage + assert isinstance(tx_storage, TransactionStorage) + previous_allow_scope = tx_storage.allow_scope + try: + tx_storage.allow_scope = allow_scope + yield + finally: + tx_storage.allow_scope = previous_allow_scope diff --git a/hathor/transaction/transaction_metadata.py b/hathor/transaction/transaction_metadata.py index effb32a45..f2cec5ad6 100644 --- a/hathor/transaction/transaction_metadata.py +++ b/hathor/transaction/transaction_metadata.py @@ -69,7 +69,7 @@ def is_at_least_basic(self) -> bool: def is_valid(self) -> bool: """Short-hand property.""" - return self in {ValidationState.FULL, ValidationState.CHECKPOINT} + return self in {ValidationState.FULL, ValidationState.CHECKPOINT, ValidationState.CHECKPOINT_FULL} def is_checkpoint(self) -> bool: """Short-hand property.""" @@ -79,6 +79,10 @@ def is_fully_connected(self) -> bool: """Short-hand property.""" return self in {ValidationState.FULL, ValidationState.CHECKPOINT_FULL} + def is_partial(self) -> bool: + """Short-hand property.""" + return self in {ValidationState.INITIAL, ValidationState.BASIC, ValidationState.CHECKPOINT} + def is_invalid(self) -> bool: """Short-hand property.""" return self is ValidationState.INVALID diff --git a/tests/others/test_init_manager.py b/tests/others/test_init_manager.py index 2fe481543..76d1d2f2e 100644 --- a/tests/others/test_init_manager.py +++ b/tests/others/test_init_manager.py @@ -25,12 +25,12 @@ def __init__(self, *args, **kwargs): def set_first_tx(self, tx: BaseTransaction) -> None: self._first_tx = tx - def get_all_transactions(self, *, include_partial: bool = False) -> Iterator[BaseTransaction]: + def _get_all_transactions(self) -> Iterator[BaseTransaction]: skip_hash = None if self._first_tx: yield self._first_tx skip_hash = self._first_tx.hash - for tx in super().get_all_transactions(include_partial=include_partial): + for tx in super()._get_all_transactions(): if tx.hash != skip_hash: yield tx diff --git a/tests/tx/test_tx_storage.py b/tests/tx/test_tx_storage.py index 97ef9d9dc..e5a749959 100644 --- a/tests/tx/test_tx_storage.py +++ b/tests/tx/test_tx_storage.py @@ -226,17 +226,165 @@ def test_save_tx(self): def test_pre_save_validation_invalid_tx_1(self): self.tx.get_metadata().validation = ValidationState.BASIC with self.assertRaises(AssertionError): - self.validate_save(self.tx) + # XXX: avoid using validate_save because an exception could be raised for other reasons + self.tx_storage.save_transaction(self.tx) def test_pre_save_validation_invalid_tx_2(self): self.tx.get_metadata().add_voided_by(settings.PARTIALLY_VALIDATED_ID) with self.assertRaises(AssertionError): - self.validate_save(self.tx) + with self.tx_storage.allow_partially_validated_context(): + # XXX: avoid using validate_save because an exception could be raised for other reasons + self.tx_storage.save_transaction(self.tx) def test_pre_save_validation_success(self): self.tx.get_metadata().validation = ValidationState.BASIC self.tx.get_metadata().add_voided_by(settings.PARTIALLY_VALIDATED_ID) + with self.tx_storage.allow_partially_validated_context(): + # XXX: it's good to use validate_save now since we don't expect any exceptions to be raised + self.validate_save(self.tx) + + def test_allow_scope_get_all_transactions(self): + self.tx.get_metadata().validation = ValidationState.BASIC + self.tx.get_metadata().add_voided_by(settings.PARTIALLY_VALIDATED_ID) + with self.tx_storage.allow_partially_validated_context(): + self.tx_storage.save_transaction(self.tx) + only_valid_txs = list(self.tx_storage.get_all_transactions()) + self.assertNotIn(self.tx, only_valid_txs) + with self.tx_storage.allow_partially_validated_context(): + txs_that_may_be_partial = list(self.tx_storage.get_all_transactions()) + self.assertIn(self.tx, txs_that_may_be_partial) + + def test_allow_scope_topological_sort_dfs(self): + self.tx.get_metadata().validation = ValidationState.BASIC + self.tx.get_metadata().add_voided_by(settings.PARTIALLY_VALIDATED_ID) + with self.tx_storage.allow_partially_validated_context(): + self.tx_storage.save_transaction(self.tx) + only_valid_txs = list(self.tx_storage._topological_sort_dfs()) + self.assertNotIn(self.tx, only_valid_txs) + with self.tx_storage.allow_partially_validated_context(): + txs_that_may_be_partial = list(self.tx_storage._topological_sort_dfs()) + self.assertIn(self.tx, txs_that_may_be_partial) + + def test_allow_partially_validated_context(self): + from hathor.transaction.storage.exceptions import TransactionNotInAllowedScopeError + self.tx.get_metadata().validation = ValidationState.BASIC + self.tx.get_metadata().add_voided_by(settings.PARTIALLY_VALIDATED_ID) + self.assertTrue(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + # should fail because it is out of the allowed scope + with self.assertRaises(TransactionNotInAllowedScopeError): + # XXX: avoid using validate_save because an exception could be raised for other reasons + self.tx_storage.save_transaction(self.tx) + # should succeed because a custom scope is being used + with self.tx_storage.allow_partially_validated_context(): + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertTrue(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + self.validate_save(self.tx) + self.assertTrue(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + # should fail because it is out of the allowed scope + with self.assertRaises(TransactionNotInAllowedScopeError): + self.tx_storage.get_transaction(self.tx.hash) + # should return None since TransactionNotInAllowedScopeError inherits TransactionDoesNotExist + self.assertIsNone(self.tx_storage.get_metadata(self.tx.hash)) + # should succeed because a custom scope is being used + with self.tx_storage.allow_partially_validated_context(): + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertTrue(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + self.tx_storage.get_transaction(self.tx.hash) + self.assertIsNotNone(self.tx_storage.get_metadata(self.tx.hash)) + + def test_allow_invalid_context(self): + from hathor.transaction.storage.exceptions import TransactionNotInAllowedScopeError self.validate_save(self.tx) + self.tx.get_metadata().validation = ValidationState.INVALID + # XXX: should this apply to invalid too? note that we never save invalid transactions so using the + # PARTIALLY_VALIDATED_ID marker is artificial just for testing + self.tx.get_metadata().add_voided_by(settings.PARTIALLY_VALIDATED_ID) + self.assertTrue(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + # should fail because it is out of the allowed scope + with self.assertRaises(TransactionNotInAllowedScopeError): + # XXX: avoid using validate_save because an exception could be raised for other reasons + self.tx_storage.save_transaction(self.tx) + # should succeed because a custom scope is being used + with self.tx_storage.allow_invalid_context(): + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertTrue(self.tx_storage.is_invalid_allowed()) + self.validate_save(self.tx) + self.assertTrue(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + # should fail because it is out of the allowed scope + with self.assertRaises(TransactionNotInAllowedScopeError): + self.tx_storage.get_transaction(self.tx.hash) + # should return None since TransactionNotInAllowedScopeError inherits TransactionDoesNotExist + self.assertIsNone(self.tx_storage.get_metadata(self.tx.hash)) + # should succeed because a custom scope is being used + with self.tx_storage.allow_invalid_context(): + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertTrue(self.tx_storage.is_invalid_allowed()) + self.tx_storage.get_transaction(self.tx.hash) + self.assertIsNotNone(self.tx_storage.get_metadata(self.tx.hash)) + + def test_allow_scope_context_composing(self): + self.assertTrue(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + with self.tx_storage.allow_invalid_context(): + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertTrue(self.tx_storage.is_invalid_allowed()) + with self.tx_storage.allow_partially_validated_context(): + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertTrue(self.tx_storage.is_partially_validated_allowed()) + self.assertTrue(self.tx_storage.is_invalid_allowed()) + with self.tx_storage.allow_only_valid_context(): + self.assertTrue(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertTrue(self.tx_storage.is_partially_validated_allowed()) + self.assertTrue(self.tx_storage.is_invalid_allowed()) + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertTrue(self.tx_storage.is_invalid_allowed()) + self.assertTrue(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + + def test_allow_scope_context_stacking(self): + self.assertTrue(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + with self.tx_storage.allow_partially_validated_context(): + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertTrue(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + with self.tx_storage.allow_partially_validated_context(): + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertTrue(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + with self.tx_storage.allow_partially_validated_context(): + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertTrue(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertTrue(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + self.assertFalse(self.tx_storage.is_only_valid_allowed()) + self.assertTrue(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) + self.assertTrue(self.tx_storage.is_only_valid_allowed()) + self.assertFalse(self.tx_storage.is_partially_validated_allowed()) + self.assertFalse(self.tx_storage.is_invalid_allowed()) def test_save_token_creation_tx(self): tx = create_tokens(self.manager, propagate=False) diff --git a/tests/tx/test_validation_states.py b/tests/tx/test_validation_states.py new file mode 100644 index 000000000..b844690f4 --- /dev/null +++ b/tests/tx/test_validation_states.py @@ -0,0 +1,102 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from hathor.transaction.transaction_metadata import ValidationState + + +def test_validation_states_list_unchanged(): + # XXX: if these change there are some code that make certain assumptions that should be reviewd, in particular: + # - hathor.transaction.storage.transaction_storage.tx_allow_scope.TxAllowScope.is_allowed + assert list(ValidationState), [ + ValidationState.INITIAL, + ValidationState.BASIC, + ValidationState.CHECKPOINT, + ValidationState.FULL, + ValidationState.CHECKPOINT_FULL, + ValidationState.INVALID, + ] + + +def test_validation_states_properties(): + # ValidationState.INITIAL + assert ValidationState.INITIAL.is_initial() is True + assert ValidationState.INITIAL.is_at_least_basic() is False + assert ValidationState.INITIAL.is_valid() is False + assert ValidationState.INITIAL.is_checkpoint() is False + assert ValidationState.INITIAL.is_fully_connected() is False + assert ValidationState.INITIAL.is_partial() is True + assert ValidationState.INITIAL.is_invalid() is False + assert ValidationState.INITIAL.is_final() is False + # ValidationState.BASIC + assert ValidationState.BASIC.is_initial() is False + assert ValidationState.BASIC.is_at_least_basic() is True + assert ValidationState.BASIC.is_valid() is False + assert ValidationState.BASIC.is_checkpoint() is False + assert ValidationState.BASIC.is_fully_connected() is False + assert ValidationState.BASIC.is_partial() is True + assert ValidationState.BASIC.is_invalid() is False + assert ValidationState.BASIC.is_final() is False + # ValidationState.CHECKPOINT + assert ValidationState.CHECKPOINT.is_initial() is False + assert ValidationState.CHECKPOINT.is_at_least_basic() is True + assert ValidationState.CHECKPOINT.is_valid() is True + assert ValidationState.CHECKPOINT.is_checkpoint() is True + assert ValidationState.CHECKPOINT.is_fully_connected() is False + assert ValidationState.CHECKPOINT.is_partial() is True + assert ValidationState.CHECKPOINT.is_invalid() is False + assert ValidationState.CHECKPOINT.is_final() is False + # ValidationState.FULL + assert ValidationState.FULL.is_initial() is False + assert ValidationState.FULL.is_at_least_basic() is True + assert ValidationState.FULL.is_valid() is True + assert ValidationState.FULL.is_checkpoint() is False + assert ValidationState.FULL.is_fully_connected() is True + assert ValidationState.FULL.is_partial() is False + assert ValidationState.FULL.is_invalid() is False + assert ValidationState.FULL.is_final() is True + # ValidationState.CHECKPOINT_FULL + assert ValidationState.CHECKPOINT_FULL.is_initial() is False + assert ValidationState.CHECKPOINT_FULL.is_at_least_basic() is True + assert ValidationState.CHECKPOINT_FULL.is_valid() is True + assert ValidationState.CHECKPOINT_FULL.is_checkpoint() is True + assert ValidationState.CHECKPOINT_FULL.is_fully_connected() is True + assert ValidationState.CHECKPOINT_FULL.is_partial() is False + assert ValidationState.CHECKPOINT_FULL.is_invalid() is False + assert ValidationState.CHECKPOINT_FULL.is_final() is True + # ValidationState.INVALID + assert ValidationState.INVALID.is_initial() is False + assert ValidationState.INVALID.is_at_least_basic() is False + assert ValidationState.INVALID.is_valid() is False + assert ValidationState.INVALID.is_checkpoint() is False + assert ValidationState.INVALID.is_fully_connected() is False + assert ValidationState.INVALID.is_partial() is False + assert ValidationState.INVALID.is_invalid() is True + assert ValidationState.INVALID.is_final() is True + + +def test_validation_states_partition_properties(): + # these set of properties must not overlap and must cover all states: + # - is_partial + # - is_fully_connected + # - is_invalid + # this means that: + # - for each state at most one of these properties must be true + # - for each state at least one of these properties must be true + properties = [ + ValidationState.is_partial, + ValidationState.is_fully_connected, + ValidationState.is_invalid, + ] + for state in ValidationState: + assert sum(int(prop(state)) for prop in properties) == 1 From 7f35116b777e54a320f10ca4ce27f9adb44e133e Mon Sep 17 00:00:00 2001 From: Marcelo Salhab Brogliato Date: Tue, 23 May 2023 10:02:57 -0500 Subject: [PATCH 22/24] feat(ws): Change default configuration of WS_MAX_SUBS_ADDRS_CONN and WS_MAX_SUBS_ADDRS_EMPTY to infinite --- hathor/builder/sysctl_builder.py | 7 ++- hathor/conf/settings.py | 4 +- hathor/sysctl/__init__.py | 2 + hathor/sysctl/p2p/manager.py | 10 ----- hathor/sysctl/websocket/__init__.py | 13 ++++++ hathor/sysctl/websocket/manager.py | 70 +++++++++++++++++++++++++++++ hathor/websocket/factory.py | 13 ++++-- tests/sysctl/test_websocket.py | 44 ++++++++++++++++++ 8 files changed, 146 insertions(+), 17 deletions(-) create mode 100644 hathor/sysctl/websocket/__init__.py create mode 100644 hathor/sysctl/websocket/manager.py create mode 100644 tests/sysctl/test_websocket.py diff --git a/hathor/builder/sysctl_builder.py b/hathor/builder/sysctl_builder.py index a28c02fd7..60b2cb0ed 100644 --- a/hathor/builder/sysctl_builder.py +++ b/hathor/builder/sysctl_builder.py @@ -13,7 +13,7 @@ # limitations under the License. from hathor.builder import BuildArtifacts -from hathor.sysctl import ConnectionsManagerSysctl, Sysctl +from hathor.sysctl import ConnectionsManagerSysctl, Sysctl, WebsocketManagerSysctl class SysctlBuilder: @@ -25,4 +25,9 @@ def build(self) -> Sysctl: """Build the sysctl tree.""" root = Sysctl() root.put_child('p2p', ConnectionsManagerSysctl(self.artifacts.p2p_manager)) + + ws_factory = self.artifacts.manager.metrics.websocket_factory + if ws_factory is not None: + root.put_child('ws', WebsocketManagerSysctl(ws_factory)) + return root diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index d1cc73494..7aaa14322 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -230,10 +230,10 @@ def MAXIMUM_NUMBER_OF_HALVINGS(self) -> int: PUSHTX_MAX_OUTPUT_SCRIPT_SIZE: int = 256 # Maximum number of subscribed addresses per websocket connection - WS_MAX_SUBS_ADDRS_CONN: int = 200000 + WS_MAX_SUBS_ADDRS_CONN: Optional[int] = None # Maximum number of subscribed addresses that do not have any outputs (also per websocket connection) - WS_MAX_SUBS_ADDRS_EMPTY: int = 100 + WS_MAX_SUBS_ADDRS_EMPTY: Optional[int] = None # Whether miners are assumed to mine txs by default STRATUM_MINE_TXS_DEFAULT: bool = True diff --git a/hathor/sysctl/__init__.py b/hathor/sysctl/__init__.py index 34525e51c..dc2ef2a67 100644 --- a/hathor/sysctl/__init__.py +++ b/hathor/sysctl/__init__.py @@ -14,8 +14,10 @@ from hathor.sysctl.p2p.manager import ConnectionsManagerSysctl from hathor.sysctl.sysctl import Sysctl +from hathor.sysctl.websocket.manager import WebsocketManagerSysctl __all__ = [ 'Sysctl', 'ConnectionsManagerSysctl', + 'WebsocketManagerSysctl', ] diff --git a/hathor/sysctl/p2p/manager.py b/hathor/sysctl/p2p/manager.py index 3fc4779c4..880f201ca 100644 --- a/hathor/sysctl/p2p/manager.py +++ b/hathor/sysctl/p2p/manager.py @@ -69,14 +69,10 @@ def __init__(self, connections: ConnectionsManager) -> None: self.set_always_enable_sync_readtxt, ) - ############# - def set_force_sync_rotate(self) -> None: """Force a sync rotate.""" self.connections._sync_rotate_if_needed(force=True) - ############# - def get_global_send_tips_rate_limit(self) -> Tuple[int, float]: """Return the global rate limiter for SEND_TIPS.""" limit = self.connections.rate_limiter.get_limit(self.connections.GlobalRateLimiter.SEND_TIPS) @@ -97,8 +93,6 @@ def set_global_send_tips_rate_limit(self, max_hits: int, window_seconds: float) raise SysctlException('window_seconds must be >= 0') self.connections.enable_rate_limiter(max_hits, window_seconds) - ############# - def get_lc_sync_update_interval(self) -> float: """Return the interval to rotate sync (in seconds).""" return self.connections.lc_sync_update_interval @@ -112,8 +106,6 @@ def set_lc_sync_update_interval(self, value: float) -> None: self.connections.lc_sync_update.stop() self.connections.lc_sync_update.start(self.connections.lc_sync_update_interval, now=False) - ############# - def get_always_enable_sync(self) -> List[str]: """Return the list of sync-always-enabled peers.""" return list(self.connections.always_enable_sync) @@ -131,8 +123,6 @@ def set_always_enable_sync_readtxt(self, file_path: str) -> None: values = parse_text(fp.read()) self.connections.set_always_enable_sync(values) - ############# - def get_max_enabled_sync(self) -> int: """Return the maximum number of peers running sync simultaneously.""" return self.connections.MAX_ENABLED_SYNC diff --git a/hathor/sysctl/websocket/__init__.py b/hathor/sysctl/websocket/__init__.py new file mode 100644 index 000000000..caba4a1be --- /dev/null +++ b/hathor/sysctl/websocket/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hathor/sysctl/websocket/manager.py b/hathor/sysctl/websocket/manager.py new file mode 100644 index 000000000..c371cf2f9 --- /dev/null +++ b/hathor/sysctl/websocket/manager.py @@ -0,0 +1,70 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from hathor.sysctl.exception import SysctlException +from hathor.sysctl.sysctl import Sysctl +from hathor.websocket.factory import HathorAdminWebsocketFactory + + +class WebsocketManagerSysctl(Sysctl): + def __init__(self, factory: HathorAdminWebsocketFactory) -> None: + super().__init__() + self.factory = factory + + self.register( + 'max_subs_addrs_conn', + self.get_max_subs_addrs_conn, + self.set_max_subs_addrs_conn, + ) + self.register( + 'max_subs_addrs_empty', + self.get_max_subs_addrs_empty, + self.set_max_subs_addrs_empty, + ) + + def get_max_subs_addrs_conn(self) -> int: + """Return the maximum number of subscribed addresses per websocket connection. + Note: -1 means unlimited""" + value = self.factory.max_subs_addrs_conn + if value is None: + return -1 + return value + + def set_max_subs_addrs_conn(self, value: int) -> None: + """Change the maximum number of subscribed addresses per websocket connection. + Use -1 for unlimited""" + if value == -1: + self.factory.max_subs_addrs_conn = None + return + if value < 0: + raise SysctlException('value must be >= 0 or -1') + self.factory.max_subs_addrs_conn = value + + def get_max_subs_addrs_empty(self) -> int: + """Return the maximum number of subscribed addresses that do not have any outputs per websocket connection. + Note: -1 means unlimited""" + value = self.factory.max_subs_addrs_empty + if value is None: + return -1 + return value + + def set_max_subs_addrs_empty(self, value: int) -> None: + """Change the maximum number of subscribed addresses that do not have any outputs per websocket connection. + Use -1 for unlimited""" + if value == -1: + self.factory.max_subs_addrs_empty = None + return + if value < 0: + raise SysctlException('value must be >= 0 or -1') + self.factory.max_subs_addrs_empty = value diff --git a/hathor/websocket/factory.py b/hathor/websocket/factory.py index 7c4b636e9..4e672d7c1 100644 --- a/hathor/websocket/factory.py +++ b/hathor/websocket/factory.py @@ -78,6 +78,9 @@ class HathorAdminWebsocketFactory(WebSocketServerFactory): """ protocol = HathorAdminWebsocketProtocol + max_subs_addrs_conn: Optional[int] = settings.WS_MAX_SUBS_ADDRS_CONN + max_subs_addrs_empty: Optional[int] = settings.WS_MAX_SUBS_ADDRS_EMPTY + def buildProtocol(self, addr): return self.protocol(self) @@ -318,13 +321,15 @@ def _handle_subscribe_address(self, connection: HathorAdminWebsocketProtocol, me """ Handler for subscription to an address, consideirs subscription limits.""" addr: str = message['address'] subs: Set[str] = connection.subscribed_to - if len(subs) >= settings.WS_MAX_SUBS_ADDRS_CONN: + if self.max_subs_addrs_conn is not None and len(subs) >= self.max_subs_addrs_conn: payload = json_dumpb({'message': 'Reached maximum number of subscribed ' - f'addresses ({settings.WS_MAX_SUBS_ADDRS_CONN}).', + f'addresses ({self.max_subs_addrs_conn}).', 'type': 'subscribe_address', 'success': False}) - elif self.address_index and _count_empty(subs, self.address_index) >= settings.WS_MAX_SUBS_ADDRS_EMPTY: + elif self.max_subs_addrs_empty is not None and ( + self.address_index and _count_empty(subs, self.address_index) >= self.max_subs_addrs_empty + ): payload = json_dumpb({'message': 'Reached maximum number of subscribed ' - f'addresses without output ({settings.WS_MAX_SUBS_ADDRS_EMPTY}).', + f'addresses without output ({self.max_subs_addrs_empty}).', 'type': 'subscribe_address', 'success': False}) else: self.address_connections[addr].add(connection) diff --git a/tests/sysctl/test_websocket.py b/tests/sysctl/test_websocket.py new file mode 100644 index 000000000..f8dc44f08 --- /dev/null +++ b/tests/sysctl/test_websocket.py @@ -0,0 +1,44 @@ +from hathor.sysctl import WebsocketManagerSysctl +from hathor.sysctl.exception import SysctlException +from hathor.websocket.factory import HathorAdminWebsocketFactory +from tests import unittest + + +class WebsocketSysctlTestCase(unittest.TestCase): + def test_max_subs_addrs_conn(self): + ws_factory = HathorAdminWebsocketFactory() + sysctl = WebsocketManagerSysctl(ws_factory) + + sysctl.set('max_subs_addrs_conn', 10) + self.assertEqual(ws_factory.max_subs_addrs_conn, 10) + self.assertEqual(sysctl.get('max_subs_addrs_conn'), 10) + + sysctl.set('max_subs_addrs_conn', 0) + self.assertEqual(ws_factory.max_subs_addrs_conn, 0) + self.assertEqual(sysctl.get('max_subs_addrs_conn'), 0) + + sysctl.set('max_subs_addrs_conn', -1) + self.assertIsNone(ws_factory.max_subs_addrs_conn) + self.assertEqual(sysctl.get('max_subs_addrs_conn'), -1) + + with self.assertRaises(SysctlException): + sysctl.set('max_subs_addrs_conn', -2) + + def test_max_subs_addrs_empty(self): + ws_factory = HathorAdminWebsocketFactory() + sysctl = WebsocketManagerSysctl(ws_factory) + + sysctl.set('max_subs_addrs_empty', 10) + self.assertEqual(ws_factory.max_subs_addrs_empty, 10) + self.assertEqual(sysctl.get('max_subs_addrs_empty'), 10) + + sysctl.set('max_subs_addrs_empty', 0) + self.assertEqual(ws_factory.max_subs_addrs_empty, 0) + self.assertEqual(sysctl.get('max_subs_addrs_empty'), 0) + + sysctl.set('max_subs_addrs_empty', -1) + self.assertIsNone(ws_factory.max_subs_addrs_empty) + self.assertEqual(sysctl.get('max_subs_addrs_empty'), -1) + + with self.assertRaises(SysctlException): + sysctl.set('max_subs_addrs_empty', -2) From 94e99cc46cb90b01ebf11dcdd2f810cd24cabc9d Mon Sep 17 00:00:00 2001 From: Jan Segre Date: Tue, 23 May 2023 20:33:34 +0200 Subject: [PATCH 23/24] refactor(settings): make HathorSettings always return the same instance This wasn't _exactly_ the case before this commit. On one side when using the deprecated module settings the same module would always be loaded, and the same `SETTINGS` property would be accessed, but reloading the module (or at least leaving that up to importlib) unnecessary. On the new YAML system the file is reloaded everytime, this is also unnecessary. This commit will make it the last HathorSettings instantiated will be returned, as long as the source is the same, otherwise an exception is raised. There is a little small caveat that is fixed in this commit is that in the YAML file could in theory be altered between different calls to HathorSettings, which would lead to different content being loaded silently. What happens now is that the first time it is loaded is what is used for every call, any change in the file won't have any effect. --- hathor/builder/cli_builder.py | 5 +- hathor/conf/get_settings.py | 88 +++++++++++++++++++---------------- 2 files changed, 51 insertions(+), 42 deletions(-) diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index 7d3a9e040..420917a0e 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -55,7 +55,7 @@ def check_or_raise(self, condition: bool, message: str) -> None: def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorManager: import hathor from hathor.conf import HathorSettings - from hathor.conf.get_settings import get_settings_filepath, get_settings_module + from hathor.conf.get_settings import get_settings_source from hathor.daa import TestMode, _set_test_mode from hathor.event.storage import EventMemoryStorage, EventRocksDBStorage, EventStorage from hathor.event.websocket.factory import EventWebsocketFactory @@ -74,8 +74,7 @@ def create_manager(self, reactor: PosixReactorBase, args: Namespace) -> HathorMa settings = HathorSettings() # only used for logging its location - settings_module = get_settings_module() - settings_source = settings_module.__file__ if settings_module is not None else get_settings_filepath() + settings_source = get_settings_source() self.log = logger.new() self.reactor = reactor diff --git a/hathor/conf/get_settings.py b/hathor/conf/get_settings.py index d555204f1..2cf8b46d7 100644 --- a/hathor/conf/get_settings.py +++ b/hathor/conf/get_settings.py @@ -14,8 +14,7 @@ import importlib import os -from types import ModuleType -from typing import Optional +from typing import NamedTuple, Optional from structlog import get_logger @@ -24,8 +23,14 @@ logger = get_logger() -_settings_filepath: Optional[str] = None -_config_file: Optional[str] = None + +class _SettingsMetadata(NamedTuple): + source: str + is_yaml: bool + settings: Settings + + +_settings_singleton: Optional[_SettingsMetadata] = None def HathorSettings() -> Settings: @@ -37,52 +42,57 @@ def HathorSettings() -> Settings: If neither is set, or if the module import fails, the mainnet configuration is returned. """ - settings_module = get_settings_module() - if settings_module is not None: - log = logger.new() - log.warn( - "Setting a config module via the 'HATHOR_CONFIG_FILE' env var will be deprecated soon. " - "Use the '--config-yaml' CLI option or the 'HATHOR_CONFIG_YAML' env var to set a yaml filepath instead." - ) - settings = getattr(settings_module, 'SETTINGS') - assert isinstance(settings, Settings) - return settings + settings_module_filepath = os.environ.get('HATHOR_CONFIG_FILE') + if settings_module_filepath is not None: + return _load_settings_singleton(settings_module_filepath, is_yaml=False) - settings_filepath = get_settings_filepath() + settings_yaml_filepath = os.environ.get('HATHOR_CONFIG_YAML', conf.MAINNET_SETTINGS_FILEPATH) + return _load_settings_singleton(settings_yaml_filepath, is_yaml=True) - return Settings.from_yaml(filepath=settings_filepath) +def get_settings_source() -> str: + """ Returns the path of the settings module or YAML file that was loaded. + + XXX: Will raise an assertion error if HathorSettings() wasn't used before. + """ + global _settings_singleton + assert _settings_singleton is not None, 'HathorSettings() not called before' + return _settings_singleton.source -def get_settings_module() -> Optional[ModuleType]: - global _config_file - # Import config file for network - config_file = os.environ.get('HATHOR_CONFIG_FILE') - if _config_file is None: - _config_file = config_file - elif _config_file != config_file: - raise Exception('loading config twice with a different file') - if not config_file: - return None +def _load_settings_singleton(source: str, *, is_yaml: bool) -> Settings: + global _settings_singleton - try: - module = importlib.import_module(config_file) - except ModuleNotFoundError: - default_file = 'hathor.conf.mainnet' - module = importlib.import_module(default_file) + if _settings_singleton is not None: + if _settings_singleton.is_yaml != is_yaml: + raise Exception('loading config twice with a different file type') + if _settings_singleton.source != source: + raise Exception('loading config twice with a different file') - return module + return _settings_singleton.settings + settings_loader = _load_yaml_settings if is_yaml else _load_module_settings + _settings_singleton = _SettingsMetadata( + source=source, + is_yaml=is_yaml, + settings=settings_loader(source) + ) -def get_settings_filepath() -> str: - global _settings_filepath + return _settings_singleton.settings - new_settings_filepath = os.environ.get('HATHOR_CONFIG_YAML', conf.MAINNET_SETTINGS_FILEPATH) - if _settings_filepath is not None and _settings_filepath != new_settings_filepath: - raise Exception('loading config twice with a different file') +def _load_module_settings(module_path: str) -> Settings: + log = logger.new() + log.warn( + "Setting a config module via the 'HATHOR_CONFIG_FILE' env var will be deprecated soon. " + "Use the '--config-yaml' CLI option or the 'HATHOR_CONFIG_YAML' env var to set a yaml filepath instead." + ) + settings_module = importlib.import_module(module_path) + settings = getattr(settings_module, 'SETTINGS') + assert isinstance(settings, Settings) + return settings - _settings_filepath = new_settings_filepath - return new_settings_filepath +def _load_yaml_settings(filepath: str) -> Settings: + return Settings.from_yaml(filepath=filepath) From 91d264c540c2139a76c73c8972fcace52661bbb1 Mon Sep 17 00:00:00 2001 From: Luis Helder Date: Tue, 25 Apr 2023 14:48:57 -0300 Subject: [PATCH 24/24] chore(bump): v0.54.0 --- hathor/cli/openapi_files/openapi_base.json | 2 +- hathor/version.py | 2 +- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hathor/cli/openapi_files/openapi_base.json b/hathor/cli/openapi_files/openapi_base.json index 8c367fdbf..8a45e68ac 100644 --- a/hathor/cli/openapi_files/openapi_base.json +++ b/hathor/cli/openapi_files/openapi_base.json @@ -7,7 +7,7 @@ ], "info": { "title": "Hathor API", - "version": "0.53.0" + "version": "0.54.0" }, "consumes": [ "application/json" diff --git a/hathor/version.py b/hathor/version.py index e7e15ffaa..dee76ceaf 100644 --- a/hathor/version.py +++ b/hathor/version.py @@ -19,7 +19,7 @@ from structlog import get_logger -BASE_VERSION = '0.53.0' +BASE_VERSION = '0.54.0' DEFAULT_VERSION_SUFFIX = "local" BUILD_VERSION_FILE_PATH = "./BUILD_VERSION" diff --git a/pyproject.toml b/pyproject.toml index 9d3bb09cc..6c1170a29 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ [tool.poetry] name = "hathor" -version = "0.53.0" +version = "0.54.0" description = "Hathor Network full-node" authors = ["Hathor Team "] license = "Apache-2.0"