diff --git a/hathor/builder/builder.py b/hathor/builder/builder.py index c53c10f2f..3c5cccf5f 100644 --- a/hathor/builder/builder.py +++ b/hathor/builder/builder.py @@ -182,8 +182,6 @@ def __init__(self) -> None: self._enable_stratum_server: Optional[bool] = None - self._full_verification: Optional[bool] = None - self._soft_voided_tx_ids: Optional[set[bytes]] = None self._execution_manager: ExecutionManager | None = None @@ -239,9 +237,6 @@ def build(self) -> BuildArtifacts: kwargs: dict[str, Any] = {} - if self._full_verification is not None: - kwargs['full_verification'] = self._full_verification - if self._enable_event_queue is not None: kwargs['enable_event_queue'] = self._enable_event_queue @@ -778,21 +773,6 @@ def disable_sync_v2(self) -> 'Builder': self._sync_v2_support = SyncSupportLevel.DISABLED return self - def set_full_verification(self, full_verification: bool) -> 'Builder': - self.check_if_can_modify() - self._full_verification = full_verification - return self - - def enable_full_verification(self) -> 'Builder': - self.check_if_can_modify() - self._full_verification = True - return self - - def disable_full_verification(self) -> 'Builder': - self.check_if_can_modify() - self._full_verification = False - return self - def enable_ipv6(self) -> 'Builder': self.check_if_can_modify() self._enable_ipv6 = True diff --git a/hathor/builder/cli_builder.py b/hathor/builder/cli_builder.py index f51b01ff2..2b18b8830 100644 --- a/hathor/builder/cli_builder.py +++ b/hathor/builder/cli_builder.py @@ -234,8 +234,6 @@ def create_manager(self, reactor: Reactor) -> HathorManager: self.log.debug('enable utxo index') tx_storage.indexes.enable_utxo_index() - self.check_or_raise(not self._args.x_full_verification, '--x-full-verification is deprecated') - soft_voided_tx_ids = set(settings.SOFT_VOIDED_TX_IDS) consensus_algorithm = ConsensusAlgorithm( soft_voided_tx_ids, @@ -331,7 +329,6 @@ def create_manager(self, reactor: Reactor) -> HathorManager: wallet=self.wallet, checkpoints=settings.CHECKPOINTS, environment_info=get_environment_info(args=str(self._args), peer_id=str(peer.id)), - full_verification=False, enable_event_queue=self._args.x_enable_event_queue or self._args.enable_event_queue, bit_signaling_service=bit_signaling_service, verification_service=verification_service, diff --git a/hathor/cli/events_simulator/events_simulator.py b/hathor/cli/events_simulator/events_simulator.py index 23fe64418..6319a280c 100644 --- a/hathor/cli/events_simulator/events_simulator.py +++ b/hathor/cli/events_simulator/events_simulator.py @@ -61,7 +61,6 @@ def execute(args: Namespace, reactor: 'ReactorProtocol') -> None: simulator = Simulator(args.seed) simulator.start() builder = simulator.get_default_builder() \ - .disable_full_verification() \ .enable_event_queue() manager = simulator.create_peer(builder) diff --git a/hathor/cli/run_node.py b/hathor/cli/run_node.py index bccadb0f3..e1ef12520 100644 --- a/hathor/cli/run_node.py +++ b/hathor/cli/run_node.py @@ -124,7 +124,6 @@ def create_parser(cls) -> ArgumentParser: parser.add_argument('--cache-interval', type=int, help='Cache flush interval') parser.add_argument('--recursion-limit', type=int, help='Set python recursion limit') parser.add_argument('--allow-mining-without-peers', action='store_true', help='Allow mining without peers') - parser.add_argument('--x-full-verification', action='store_true', help=SUPPRESS) # deprecated parser.add_argument('--procname-prefix', help='Add a prefix to the process name', default='') parser.add_argument('--allow-non-standard-script', action='store_true', help='Accept non-standard scripts on ' '/push-tx API') diff --git a/hathor/cli/run_node_args.py b/hathor/cli/run_node_args.py index 6f076253f..884d03da4 100644 --- a/hathor/cli/run_node_args.py +++ b/hathor/cli/run_node_args.py @@ -59,7 +59,6 @@ class RunNodeArgs(BaseModel, extra=Extra.allow): cache_interval: Optional[int] recursion_limit: Optional[int] allow_mining_without_peers: bool - x_full_verification: bool procname_prefix: str allow_non_standard_script: bool max_output_script_size: Optional[int] diff --git a/hathor/conf/settings.py b/hathor/conf/settings.py index 654ca398f..8ca0013fa 100644 --- a/hathor/conf/settings.py +++ b/hathor/conf/settings.py @@ -359,10 +359,6 @@ def GENESIS_TX2_TIMESTAMP(self) -> int: # Amount in which tx min weight reaches the middle point between the minimum and maximum weight MIN_TX_WEIGHT_K: int = 100 - # When the node is being initialized (with a full verification) we don't verify - # the difficulty of all blocks, we execute the validation every N blocks only - VERIFY_WEIGHT_EVERY_N_BLOCKS: int = 1000 - # Capabilities CAPABILITY_WHITELIST: str = 'whitelist' CAPABILITY_SYNC_VERSION: str = 'sync-version' diff --git a/hathor/manager.py b/hathor/manager.py index 4ac9326f4..b44407eb0 100644 --- a/hathor/manager.py +++ b/hathor/manager.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime import sys import time from cProfile import Profile @@ -53,7 +52,6 @@ from hathor.reward_lock import is_spent_reward_locked from hathor.stratum import StratumFactory from hathor.transaction import BaseTransaction, Block, MergeMinedBlock, Transaction, TxVersion -from hathor.transaction.exceptions import TxValidationError from hathor.transaction.storage.exceptions import TransactionDoesNotExist from hathor.transaction.storage.transaction_storage import TransactionStorage from hathor.transaction.storage.tx_allow_scope import TxAllowScope @@ -115,7 +113,6 @@ def __init__( checkpoints: Optional[list[Checkpoint]] = None, rng: Optional[Random] = None, environment_info: Optional[EnvironmentInfo] = None, - full_verification: bool = False, enable_event_queue: bool = False, poa_block_producer: PoaBlockProducer | None = None, # Websocket factory @@ -223,10 +220,6 @@ def __init__( # Thread pool used to resolve pow when sending tokens self.pow_thread_pool = ThreadPool(minthreads=0, maxthreads=settings.MAX_POW_THREADS, name='Pow thread pool') - # Full verification execute all validations for transactions and blocks when initializing the node - # Can be activated on the command line with --full-verification - self._full_verification = full_verification - # List of whitelisted peers self.peers_whitelist: list[PeerId] = [] @@ -272,33 +265,16 @@ def start(self) -> None: ) sys.exit(-1) - # If it's a full verification, we save on the storage that we are starting it - # this is required because if we stop the initilization in the middle, the metadata - # saved on the storage is not reliable anymore, only if we finish it - if self._full_verification: - self.tx_storage.start_full_verification() - else: - # If it's a fast initialization and the last time a full initialization stopped in the middle - # we can't allow the full node to continue, so we need to remove the storage and do a full sync - # or execute an initialization with full verification - if self.tx_storage.is_running_full_verification(): - self.log.error( - 'Error initializing node. The last time you started your node you did a full verification ' - 'that was stopped in the middle. The storage is not reliable anymore and, because of that, ' - 'you must initialize with a full verification again or remove your storage and do a full sync.' - ) - sys.exit(-1) - - # If self.tx_storage.is_running_manager() is True, the last time the node was running it had a sudden crash - # because of that, we must run a full verification because some storage data might be wrong. - # The metadata is the only piece of the storage that may be wrong, not the blocks and transactions. - if self.tx_storage.is_running_manager(): - self.log.error( - 'Error initializing node. The last time you executed your full node it wasn\'t stopped correctly. ' - 'The storage is not reliable anymore and, because of that, so you must run a full verification ' - 'or remove your storage and do a full sync.' - ) - sys.exit(-1) + # If self.tx_storage.is_running_manager() is True, the last time the node was running it had a sudden crash + # because of that, we must run a sync from scratch or from a snapshot. + # The metadata is the only piece of the storage that may be wrong, not the blocks and transactions. + if self.tx_storage.is_running_manager(): + self.log.error( + 'Error initializing node. The last time you executed your full node it wasn\'t stopped correctly. ' + 'The storage is not reliable anymore and, because of that you must remove your storage and do a' + 'sync from scratch or from a snapshot.' + ) + sys.exit(-1) if self._enable_event_queue: self._event_manager.start(str(self.my_peer.id)) @@ -312,16 +288,7 @@ def start(self) -> None: self.tx_storage.disable_lock() # Open scope for initialization. self.tx_storage.set_allow_scope(TxAllowScope.VALID | TxAllowScope.PARTIAL | TxAllowScope.INVALID) - # Initialize manager's components. - if self._full_verification: - self.tx_storage.reset_indexes() - self._initialize_components_full_verification() - # Before calling self._initialize_components_full_verification() I start 'full verification' mode and - # after that I need to finish it. It's just to know if the full node has stopped a full initialization - # in the middle. - self.tx_storage.finish_full_verification() - else: - self._initialize_components_new() + self._initialize_components() self.tx_storage.set_allow_scope(TxAllowScope.VALID) self.tx_storage.enable_lock() @@ -414,159 +381,7 @@ def stop_profiler(self, save_to: Optional[str] = None) -> None: if save_to: self.profiler.dump_stats(save_to) - def _initialize_components_full_verification(self) -> None: - """You are not supposed to run this method manually. You should run `doStart()` to initialize the - manager. - - This method runs through all transactions, verifying them and updating our wallet. - """ - assert not self._enable_event_queue, 'this method cannot be used if the events feature is enabled.' - assert self._full_verification - - self.log.info('initialize') - if self.wallet: - self.wallet._manually_initialize() - t0 = time.time() - t1 = t0 - cnt = 0 - cnt2 = 0 - t2 = t0 - h = 0 - - block_count = 0 - tx_count = 0 - - self.tx_storage.pre_init() - assert self.tx_storage.indexes is not None - - self._verify_soft_voided_txs() - - # Checkpoints as {height: hash} - checkpoint_heights = {} - for cp in self.checkpoints: - checkpoint_heights[cp.height] = cp.hash - - # self.start_profiler() - self.log.debug('reset all metadata') - for tx in self.tx_storage.get_all_transactions(): - tx.reset_metadata() - - self.log.debug('load blocks and transactions') - for tx in self.tx_storage._topological_sort_dfs(): - tx_meta = tx.get_metadata() - - t2 = time.time() - dt = LogDuration(t2 - t1) - dcnt = cnt - cnt2 - tx_rate = '?' if dt == 0 else dcnt / dt - h = max(h, (tx.static_metadata.height if isinstance(tx, Block) else 0)) - if dt > 30: - ts_date = datetime.datetime.fromtimestamp(self.tx_storage.latest_timestamp) - if h == 0: - self.log.debug('start loading transactions...') - else: - self.log.info('load transactions...', tx_rate=tx_rate, tx_new=dcnt, dt=dt, - total=cnt, latest_ts=ts_date, height=h) - t1 = t2 - cnt2 = cnt - cnt += 1 - - # It's safe to skip block weight verification during initialization because - # we trust the difficulty stored in metadata - skip_block_weight_verification = True - if block_count % self._settings.VERIFY_WEIGHT_EVERY_N_BLOCKS == 0: - skip_block_weight_verification = False - - try: - # TODO: deal with invalid tx - tx._update_parents_children_metadata() - - if self.tx_storage.can_validate_full(tx): - tx.update_initial_metadata() - if tx.is_genesis: - assert tx.validate_checkpoint(self.checkpoints) - assert self.verification_service.validate_full( - tx, - skip_block_weight_verification=skip_block_weight_verification - ) - self.tx_storage.add_to_indexes(tx) - with self.tx_storage.allow_only_valid_context(): - self.consensus_algorithm.unsafe_update(tx) - self.tx_storage.indexes.update(tx) - if self.tx_storage.indexes.mempool_tips is not None: - self.tx_storage.indexes.mempool_tips.update(tx) # XXX: move to indexes.update - self.tx_storage.save_transaction(tx, only_metadata=True) - else: - assert self.verification_service.validate_basic( - tx, - skip_block_weight_verification=skip_block_weight_verification - ) - self.tx_storage.save_transaction(tx, only_metadata=True) - except (InvalidNewTransaction, TxValidationError): - self.log.error('unexpected error when initializing', tx=tx, exc_info=True) - raise - - if tx.is_block: - block_count += 1 - - # this works because blocks on the best chain are iterated from lower to higher height - assert tx_meta.validation.is_at_least_basic() - assert isinstance(tx, Block) - blk_height = tx.get_height() - if not tx_meta.voided_by and tx_meta.validation.is_fully_connected(): - # XXX: this might not be needed when making a full init because the consensus should already have - self.tx_storage.indexes.height.add_reorg(blk_height, tx.hash, tx.timestamp) - - # Check if it's a checkpoint block - if blk_height in checkpoint_heights: - if tx.hash == checkpoint_heights[blk_height]: - del checkpoint_heights[blk_height] - else: - # If the hash is different from checkpoint hash, we stop the node - self.log.error('Error initializing the node. Checkpoint validation error.') - sys.exit() - else: - tx_count += 1 - - if time.time() - t2 > 1: - dt = LogDuration(time.time() - t2) - self.log.warn('tx took too long to load', tx=tx.hash_hex, dt=dt) - - # we have to have a best_block by now - # assert best_block is not None - - self.tx_storage.indexes._manually_initialize(self.tx_storage) - - self.log.debug('done loading transactions') - - # Check if all checkpoints in database are ok - my_best_height = self.tx_storage.get_height_best_block() - if checkpoint_heights: - # If I have checkpoints that were not validated I must check if they are all in a height I still don't have - first = min(list(checkpoint_heights.keys())) - if first <= my_best_height: - # If the height of the first checkpoint not validated is lower than the height of the best block - # Then it's missing this block - self.log.error('Error initializing the node. Checkpoint validation error.') - sys.exit() - - best_height = self.tx_storage.get_height_best_block() - if best_height != h: - self.log.warn('best height doesn\'t match', best_height=best_height, max_height=h) - - # self.stop_profiler(save_to='profiles/initializing.prof') - self.state = self.NodeState.READY - - total_load_time = LogDuration(t2 - t0) - tx_rate = '?' if total_load_time == 0 else cnt / total_load_time - - environment_info = self.environment_info.as_dict() if self.environment_info else {} - - # Changing the field names in this log could impact log collectors that parse them - self.log.info('ready', vertex_count=cnt, tx_rate=tx_rate, total_load_time=total_load_time, height=h, - blocks=block_count, txs=tx_count, **environment_info) - - def _initialize_components_new(self) -> None: + def _initialize_components(self) -> None: """You are not supposed to run this method manually. You should run `doStart()` to initialize the manager. @@ -593,10 +408,6 @@ def _initialize_components_new(self) -> None: started_at=started_at, last_started_at=last_started_at) self._verify_soft_voided_txs() - - # TODO: move support for full-verification here, currently we rely on the original _initialize_components - # method for full-verification to work, if we implement it here we'll reduce a lot of duplicate and - # complex code self.tx_storage.indexes._manually_initialize(self.tx_storage) # Verify if all checkpoints that exist in the database are correct diff --git a/hathor/simulator/simulator.py b/hathor/simulator/simulator.py index c776b7da7..ce5730520 100644 --- a/hathor/simulator/simulator.py +++ b/hathor/simulator/simulator.py @@ -81,7 +81,6 @@ def get_default_builder(self) -> Builder: return Builder() \ .set_peer(PrivatePeer.auto_generated()) \ .set_soft_voided_tx_ids(set()) \ - .enable_full_verification() \ .enable_sync_v2() \ .use_memory() \ .set_settings(self.settings) diff --git a/hathor/transaction/storage/transaction_storage.py b/hathor/transaction/storage/transaction_storage.py index 989d94e9d..cb98e0ccc 100644 --- a/hathor/transaction/storage/transaction_storage.py +++ b/hathor/transaction/storage/transaction_storage.py @@ -83,9 +83,6 @@ class TransactionStorage(ABC): # Key storage attribute to save if the network stored is the expected network _network_attribute: str = 'network' - # Key storage attribute to save if the full node is running a full verification - _running_full_verification_attribute: str = 'running_full_verification' - # Key storage attribute to save if the manager is running _manager_running_attribute: str = 'manager_running' @@ -909,22 +906,6 @@ def set_network(self, network: str) -> None: """ return self.add_value(self._network_attribute, network) - def start_full_verification(self) -> None: - """ Save full verification on storage - """ - self.add_value(self._running_full_verification_attribute, '1') - - def finish_full_verification(self) -> None: - """ Remove from storage that the full node is initializing with a full verification - """ - self.remove_value(self._running_full_verification_attribute) - - def is_running_full_verification(self) -> bool: - """ Return if the full node is initializing with a full verification - or was running a full verification and was stopped in the middle - """ - return self.get_value(self._running_full_verification_attribute) == '1' - def start_running_manager(self, execution_manager: ExecutionManager) -> None: """ Save on storage that manager is running """ diff --git a/tests/event/event_simulation_tester.py b/tests/event/event_simulation_tester.py index e04f05466..4df16abeb 100644 --- a/tests/event/event_simulation_tester.py +++ b/tests/event/event_simulation_tester.py @@ -34,7 +34,6 @@ class BaseEventSimulationTester(SimulatorTestCase): def _create_artifacts(self) -> None: peer = PrivatePeer.auto_generated() builder = self.builder.set_peer(peer) \ - .disable_full_verification() \ .enable_event_queue() artifacts = self.simulator.create_artifacts(builder) diff --git a/tests/event/test_event_manager.py b/tests/event/test_event_manager.py index cebd50ae3..6c79ac44f 100644 --- a/tests/event/test_event_manager.py +++ b/tests/event/test_event_manager.py @@ -13,7 +13,6 @@ def setUp(self) -> None: self.manager = self.create_peer( self.network, enable_event_queue=True, - full_verification=False, event_storage=self.event_storage ) diff --git a/tests/event/test_event_reorg.py b/tests/event/test_event_reorg.py index 873aeea88..249291be5 100644 --- a/tests/event/test_event_reorg.py +++ b/tests/event/test_event_reorg.py @@ -13,7 +13,6 @@ def setUp(self) -> None: self.manager = self.create_peer( self.network, enable_event_queue=True, - full_verification=False, event_storage=self.event_storage ) diff --git a/tests/feature_activation/test_feature_simulation.py b/tests/feature_activation/test_feature_simulation.py index cfa97f822..88883742c 100644 --- a/tests/feature_activation/test_feature_simulation.py +++ b/tests/feature_activation/test_feature_simulation.py @@ -681,8 +681,7 @@ def get_rocksdb_directory(self) -> str: def get_simulator_builder_from_dir(self, rocksdb_directory: str) -> Builder: return self.simulator.get_default_builder() \ - .use_rocksdb(path=rocksdb_directory) \ - .disable_full_verification() + .use_rocksdb(path=rocksdb_directory) def get_simulator_builder(self) -> Builder: rocksdb_directory = self.get_rocksdb_directory() diff --git a/tests/others/test_init_manager.py b/tests/others/test_init_manager.py index 7d60ed8fb..df6145104 100644 --- a/tests/others/test_init_manager.py +++ b/tests/others/test_init_manager.py @@ -1,18 +1,25 @@ from typing import Iterator +from hathor.conf.settings import HathorSettings from hathor.pubsub import PubSubManager from hathor.simulator.utils import add_new_block, add_new_blocks +from hathor.storage import RocksDBStorage from hathor.transaction import BaseTransaction -from hathor.transaction.storage import TransactionMemoryStorage +from hathor.transaction.storage import TransactionRocksDBStorage +from hathor.transaction.vertex_parser import VertexParser from tests import unittest from tests.unittest import TestBuilder from tests.utils import add_blocks_unlock_reward, add_new_double_spending, add_new_transactions -class ModifiedTransactionMemoryStorage(TransactionMemoryStorage): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._first_tx = None +class ModifiedTransactionRocksDBStorage(TransactionRocksDBStorage): + def __init__(self, path: str, settings: HathorSettings): + super().__init__( + rocksdb_storage=RocksDBStorage(path=path), + settings=settings, + vertex_parser=VertexParser(settings=settings), + ) + self._first_tx: BaseTransaction | None = None def set_first_tx(self, tx: BaseTransaction) -> None: self._first_tx = tx @@ -30,7 +37,8 @@ def _get_all_transactions(self) -> Iterator[BaseTransaction]: class SimpleManagerInitializationTestCase(unittest.TestCase): def setUp(self): super().setUp() - self.tx_storage = ModifiedTransactionMemoryStorage(settings=self._settings) + self.path = self.mkdtemp() + self.tx_storage = ModifiedTransactionRocksDBStorage(path=self.path, settings=self._settings) self.pubsub = PubSubManager(self.clock) def test_invalid_arguments(self): @@ -89,7 +97,8 @@ def test_wrong_stop(self): class ManagerInitializationTestCase(unittest.TestCase): def setUp(self): super().setUp() - self.tx_storage = ModifiedTransactionMemoryStorage(settings=self._settings) + self.path = self.mkdtemp() + self.tx_storage = ModifiedTransactionRocksDBStorage(path=self.path, settings=self._settings) self.network = 'testnet' self.manager = self.create_peer(self.network, tx_storage=self.tx_storage) @@ -128,8 +137,14 @@ def test_init_good_order(self): self.assertEqual(seen, self.all_hashes) # a new manager must be successfully initialized - self.tx_storage.reset_indexes() - self.create_peer('testnet', tx_storage=self.tx_storage) + self.manager.stop() + self.tx_storage._rocksdb_storage.close() + new_storage = ModifiedTransactionRocksDBStorage(path=self.path, settings=self._settings) + artifacts = self.get_builder().set_tx_storage(new_storage).build() + artifacts.manager.start() + self.clock.run() + self.clock.advance(5) + assert set(tx.hash for tx in artifacts.manager.tx_storage.get_all_transactions()) == self.all_hashes def test_init_unfavorable_order(self): """We force the first element of `get_all_transactions` to be a transaction @@ -147,23 +162,34 @@ def test_init_unfavorable_order(self): self.assertEqual(seen, self.all_hashes) # a new manager must be successfully initialized - self.tx_storage.reset_indexes() - self.create_peer('testnet', tx_storage=self.tx_storage) + self.manager.stop() + self.tx_storage._rocksdb_storage.close() + new_storage = ModifiedTransactionRocksDBStorage(path=self.path, settings=self._settings) + artifacts = self.get_builder().set_tx_storage(new_storage).build() + artifacts.manager.start() + self.clock.run() + self.clock.advance(5) + assert set(tx.hash for tx in artifacts.manager.tx_storage.get_all_transactions()) == self.all_hashes def test_init_not_voided_tips(self): # add a bunch of blocks and transactions for i in range(30): - add_new_block(self.manager, advance_clock=15) - add_new_transactions(self.manager, 5, advance_clock=15) + blk = add_new_block(self.manager, advance_clock=15) + txs = add_new_transactions(self.manager, 5, advance_clock=15) + self.all_hashes.add(blk.hash) + self.all_hashes.update(x.hash for x in txs) # add a bunch of conflicting transactions, these will all become voided for i in range(50): - add_new_double_spending(self.manager) + tx = add_new_double_spending(self.manager) + self.all_hashes.add(tx.hash) # finish up with another bunch of blocks and transactions for i in range(30): - add_new_block(self.manager, advance_clock=15) - add_new_transactions(self.manager, 5, advance_clock=15) + blk = add_new_block(self.manager, advance_clock=15) + txs = add_new_transactions(self.manager, 5, advance_clock=15) + self.all_hashes.add(blk.hash) + self.all_hashes.update(x.hash for x in txs) # not the point of this test, but just a sanity check self.assertConsensusValid(self.manager) @@ -172,9 +198,15 @@ def test_init_not_voided_tips(self): self.assertEqual(50, sum(bool(tx.get_metadata().voided_by) for tx in self.tx_storage.get_all_transactions())) # create a new manager (which will initialize in the self.create_peer call) - self.tx_storage.reset_indexes() self.manager.stop() - manager = self.create_peer(self.network, tx_storage=self.tx_storage, full_verification=False) + self.tx_storage._rocksdb_storage.close() + new_storage = ModifiedTransactionRocksDBStorage(path=self.path, settings=self._settings) + artifacts = self.get_builder().set_tx_storage(new_storage).build() + manager = artifacts.manager + manager.start() + self.clock.run() + self.clock.advance(5) + assert set(tx.hash for tx in manager.tx_storage.get_all_transactions()) == self.all_hashes # make sure none of its tx tips are voided all_tips = manager.generate_parent_txs(None).get_all_tips() diff --git a/tests/p2p/test_sync_v2.py b/tests/p2p/test_sync_v2.py index 579175ae2..ea279af8a 100644 --- a/tests/p2p/test_sync_v2.py +++ b/tests/p2p/test_sync_v2.py @@ -41,7 +41,7 @@ def _get_partial_blocks(self, tx_storage: TransactionStorage) -> set[VertexId]: partial_blocks.add(tx.hash) return partial_blocks - def _run_restart_test(self, *, full_verification: bool, use_tx_storage_cache: bool) -> None: + def _run_restart_test(self, *, use_tx_storage_cache: bool) -> None: manager1 = self.create_peer() manager1.allow_mining_without_peers() @@ -106,11 +106,6 @@ def _run_restart_test(self, *, full_verification: bool, use_tx_storage_cache: bo .set_peer(peer) \ .use_rocksdb(path) - if full_verification: - builder3.enable_full_verification() - else: - builder3.disable_full_verification() - if use_tx_storage_cache: builder3.use_tx_storage_cache() @@ -146,17 +141,11 @@ def _run_restart_test(self, *, full_verification: bool, use_tx_storage_cache: bo self.assertEqual(manager1.tx_storage.get_vertices_count(), manager3.tx_storage.get_vertices_count()) self.assertConsensusEqualSyncV2(manager1, manager3) - def test_restart_fullnode_full_verification(self) -> None: - self._run_restart_test(full_verification=True, use_tx_storage_cache=False) - def test_restart_fullnode_quick(self) -> None: - self._run_restart_test(full_verification=False, use_tx_storage_cache=False) + self._run_restart_test(use_tx_storage_cache=False) def test_restart_fullnode_quick_with_cache(self) -> None: - self._run_restart_test(full_verification=False, use_tx_storage_cache=True) - - def test_restart_fullnode_full_verification_with_cache(self) -> None: - self._run_restart_test(full_verification=True, use_tx_storage_cache=True) + self._run_restart_test(use_tx_storage_cache=True) def test_exceeds_streaming_and_mempool_limits(self) -> None: manager1 = self.create_peer() diff --git a/tests/poa/test_poa_simulation.py b/tests/poa/test_poa_simulation.py index 60893457a..32946d4d1 100644 --- a/tests/poa/test_poa_simulation.py +++ b/tests/poa/test_poa_simulation.py @@ -75,7 +75,7 @@ def _assert_height_weight_signer_id( class PoaSimulationTest(SimulatorTestCase): def _get_manager(self, signer: PoaSigner | None = None) -> HathorManager: - builder = self.simulator.get_default_builder().disable_full_verification() + builder = self.simulator.get_default_builder() if signer: builder.set_poa_signer(signer) artifacts = self.simulator.create_artifacts(builder) @@ -419,8 +419,7 @@ def test_new_signer_added(self) -> None: builder_1b = self.simulator.get_default_builder() \ .set_tx_storage(storage_1a) \ - .set_poa_signer(signer1) \ - .disable_full_verification() + .set_poa_signer(signer1) artifacts_1b = self.simulator.create_artifacts(builder_1b) manager_1b = artifacts_1b.manager manager_1b.allow_mining_without_peers() diff --git a/tests/tx/test_tx_storage.py b/tests/tx/test_tx_storage.py index ee45c4c5e..87ce51bbb 100644 --- a/tests/tx/test_tx_storage.py +++ b/tests/tx/test_tx_storage.py @@ -568,13 +568,6 @@ def handle_error(err): yield gatherResults(deferreds) self.tx_storage._disable_weakref() - def test_full_verification_attribute(self): - self.assertFalse(self.tx_storage.is_running_full_verification()) - self.tx_storage.start_full_verification() - self.assertTrue(self.tx_storage.is_running_full_verification()) - self.tx_storage.finish_full_verification() - self.assertFalse(self.tx_storage.is_running_full_verification()) - def test_key_value_attribute(self): attr = 'test' val = 'a' diff --git a/tests/unittest.py b/tests/unittest.py index 0992b636b..1b3d6af5b 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -194,7 +194,6 @@ def create_peer( # type: ignore[no-untyped-def] unlock_wallet: bool = True, wallet_index: bool = False, capabilities: list[str] | None = None, - full_verification: bool = True, checkpoints: list[Checkpoint] | None = None, utxo_index: bool = False, event_manager: EventManager | None = None, @@ -210,7 +209,6 @@ def create_peer( # type: ignore[no-untyped-def] settings = self._settings._replace(NETWORK_NAME=network) builder = self.get_builder() \ - .set_full_verification(full_verification) \ .set_settings(settings) if checkpoints is not None: