From cb8cc56cbe09ac08cb2fd8f29f60bc3ae4fbd906 Mon Sep 17 00:00:00 2001 From: Gabriel Levcovitz Date: Fri, 13 Jun 2025 10:47:13 -0300 Subject: [PATCH] feat(nano): add indexes and more --- hathor/dag_builder/artifacts.py | 5 +- hathor/indexes/blueprint_history_index.py | 79 +++++++++++ hathor/indexes/blueprint_timestamp_index.py | 39 ++++++ hathor/indexes/nc_creation_index.py | 39 ++++++ hathor/indexes/nc_history_index.py | 90 ++++++++++++ hathor/indexes/rocksdb_address_index.py | 2 +- .../rocksdb_blueprint_history_index.py | 42 ++++++ hathor/indexes/rocksdb_nc_history_index.py | 53 +++++++ hathor/indexes/rocksdb_tx_group_index.py | 118 ++++++++++++---- .../indexes/rocksdb_vertex_timestamp_index.py | 131 ++++++++++++++++++ hathor/indexes/tx_group_index.py | 28 +++- hathor/indexes/vertex_timestamp_index.py | 96 +++++++++++++ hathor/nanocontracts/nc_types/__init__.py | 10 +- .../nanocontracts/nc_types/address_nc_type.py | 9 +- .../nc_types/fixed_size_bytes_nc_type.py | 84 +++++++++++ .../nc_types/token_uid_nc_type.py | 88 ++++++++++++ hathor/nanocontracts/storage/block_storage.py | 35 ++++- hathor/transaction/headers/nano_header.py | 9 ++ hathor/utils/api.py | 18 ++- hathor/wallet/keypair.py | 14 ++ 20 files changed, 943 insertions(+), 46 deletions(-) create mode 100644 hathor/indexes/blueprint_history_index.py create mode 100644 hathor/indexes/blueprint_timestamp_index.py create mode 100644 hathor/indexes/nc_creation_index.py create mode 100644 hathor/indexes/nc_history_index.py create mode 100644 hathor/indexes/rocksdb_blueprint_history_index.py create mode 100644 hathor/indexes/rocksdb_nc_history_index.py create mode 100644 hathor/indexes/rocksdb_vertex_timestamp_index.py create mode 100644 hathor/indexes/vertex_timestamp_index.py create mode 100644 hathor/nanocontracts/nc_types/fixed_size_bytes_nc_type.py create mode 100644 hathor/nanocontracts/nc_types/token_uid_nc_type.py diff --git a/hathor/dag_builder/artifacts.py b/hathor/dag_builder/artifacts.py index 29fb1d367..b0a4ae0fe 100644 --- a/hathor/dag_builder/artifacts.py +++ b/hathor/dag_builder/artifacts.py @@ -63,7 +63,10 @@ def propagate_with(self, manager: HathorManager, *, up_to: str | None = None) -> for node, vertex in self.list: if found_begin: - assert manager.on_new_tx(vertex) + try: + assert manager.on_new_tx(vertex) + except Exception as e: + raise Exception(f'failed on_new_tx({node.name})') from e self._last_propagated = node.name if node.name == self._last_propagated: diff --git a/hathor/indexes/blueprint_history_index.py b/hathor/indexes/blueprint_history_index.py new file mode 100644 index 000000000..961b0555c --- /dev/null +++ b/hathor/indexes/blueprint_history_index.py @@ -0,0 +1,79 @@ +# Copyright 2025 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from abc import abstractmethod +from typing import Iterator + +from hathor.indexes.scope import Scope +from hathor.indexes.tx_group_index import TxGroupIndex +from hathor.transaction import BaseTransaction, Transaction + +SCOPE = Scope( + include_blocks=False, + include_txs=True, + include_voided=True, +) + + +class BlueprintHistoryIndex(TxGroupIndex[bytes]): + """Index of all Nano Contracts of a Blueprint.""" + + def get_scope(self) -> Scope: + return SCOPE + + def init_loop_step(self, tx: BaseTransaction) -> None: + self.add_tx(tx) + + @abstractmethod + def add_tx(self, tx: BaseTransaction) -> None: + """Add tx to this index. + """ + raise NotImplementedError + + @abstractmethod + def remove_tx(self, tx: BaseTransaction) -> None: + """Remove tx from this index. + """ + raise NotImplementedError + + def _extract_keys(self, tx: BaseTransaction) -> Iterator[bytes]: + if not tx.is_nano_contract(): + return + assert isinstance(tx, Transaction) + nano_header = tx.get_nano_header() + if not nano_header.is_creating_a_new_contract(): + return + yield nano_header.nc_id + + def get_newest(self, blueprint_id: bytes) -> Iterator[bytes]: + """Get a list of nano_contract_ids sorted by timestamp for a given blueprint_id starting from the newest.""" + return self._get_sorted_from_key(blueprint_id, reverse=True) + + def get_oldest(self, blueprint_id: bytes) -> Iterator[bytes]: + """Get a list of nano_contract_ids sorted by timestamp for a given blueprint_id starting from the oldest.""" + return self._get_sorted_from_key(blueprint_id) + + def get_older(self, blueprint_id: bytes, tx_start: BaseTransaction) -> Iterator[bytes]: + """ + Get a list of nano_contract_ids sorted by timestamp for a given blueprint_id that are older than tx_start. + """ + return self._get_sorted_from_key(blueprint_id, tx_start=tx_start, reverse=True) + + def get_newer(self, blueprint_id: bytes, tx_start: BaseTransaction) -> Iterator[bytes]: + """ + Get a list of nano_contract_ids sorted by timestamp for a given blueprint_id that are newer than tx_start. + """ + return self._get_sorted_from_key(blueprint_id, tx_start=tx_start) diff --git a/hathor/indexes/blueprint_timestamp_index.py b/hathor/indexes/blueprint_timestamp_index.py new file mode 100644 index 000000000..1cc8a4291 --- /dev/null +++ b/hathor/indexes/blueprint_timestamp_index.py @@ -0,0 +1,39 @@ +# Copyright 2025 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import final + +from hathor.indexes.rocksdb_vertex_timestamp_index import RocksDBVertexTimestampIndex +from hathor.indexes.scope import Scope +from hathor.transaction import BaseTransaction + +SCOPE = Scope( + include_blocks=False, + include_txs=True, + include_voided=True, +) + + +class BlueprintTimestampIndex(RocksDBVertexTimestampIndex): + """Index of on-chain Blueprints sorted by their timestamps.""" + cf_name = b'blueprint-index' + db_name = 'on-chain-blueprints' + + def get_scope(self) -> Scope: + return SCOPE + + @final + def _should_add(self, tx: BaseTransaction) -> bool: + from hathor.nanocontracts import OnChainBlueprint + return isinstance(tx, OnChainBlueprint) diff --git a/hathor/indexes/nc_creation_index.py b/hathor/indexes/nc_creation_index.py new file mode 100644 index 000000000..d60cd166a --- /dev/null +++ b/hathor/indexes/nc_creation_index.py @@ -0,0 +1,39 @@ +# Copyright 2025 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from hathor.indexes.rocksdb_vertex_timestamp_index import RocksDBVertexTimestampIndex +from hathor.indexes.scope import Scope +from hathor.transaction import BaseTransaction, Transaction + +SCOPE = Scope( + include_blocks=False, + include_txs=True, + include_voided=True, +) + + +class NCCreationIndex(RocksDBVertexTimestampIndex): + """Index of Nano Contract creation txs sorted by their timestamps.""" + cf_name = b'nc-creation-index' + db_name = 'nc-creation' + + def get_scope(self) -> Scope: + return SCOPE + + def _should_add(self, tx: BaseTransaction) -> bool: + if not tx.is_nano_contract(): + return False + assert isinstance(tx, Transaction) + nano_header = tx.get_nano_header() + return nano_header.is_creating_a_new_contract() diff --git a/hathor/indexes/nc_history_index.py b/hathor/indexes/nc_history_index.py new file mode 100644 index 000000000..6099ccaa1 --- /dev/null +++ b/hathor/indexes/nc_history_index.py @@ -0,0 +1,90 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import abstractmethod +from typing import Iterable, Optional + +from structlog import get_logger +from typing_extensions import override + +from hathor.indexes.scope import Scope +from hathor.indexes.tx_group_index import TxGroupIndex +from hathor.transaction import BaseTransaction, Transaction + +logger = get_logger() + +SCOPE = Scope( + include_blocks=False, + include_txs=True, + include_voided=True, +) + + +class NCHistoryIndex(TxGroupIndex[bytes]): + """Index of all transactions of a Nano Contract.""" + + def get_scope(self) -> Scope: + return SCOPE + + def init_loop_step(self, tx: BaseTransaction) -> None: + self.add_tx(tx) + + @abstractmethod + def add_tx(self, tx: BaseTransaction) -> None: + """Add tx to this index. + """ + raise NotImplementedError + + @abstractmethod + def remove_tx(self, tx: BaseTransaction) -> None: + """Remove tx from this index. + """ + raise NotImplementedError + + @override + def _extract_keys(self, tx: BaseTransaction) -> Iterable[bytes]: + if not tx.is_nano_contract(): + return + assert isinstance(tx, Transaction) + nano_header = tx.get_nano_header() + yield nano_header.get_contract_id() + + def get_sorted_from_contract_id(self, contract_id: bytes) -> Iterable[bytes]: + """Get a list of tx_ids sorted by timestamp for a given contract_id. + """ + return self._get_sorted_from_key(contract_id) + + def get_newest(self, contract_id: bytes) -> Iterable[bytes]: + """Get a list of tx_ids sorted by timestamp for a given contract_id starting from the newest. + """ + return self._get_sorted_from_key(contract_id, reverse=True) + + def get_older(self, contract_id: bytes, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: + """Get a list of tx_ids sorted by timestamp for a given contract_id that are older than tx_start. + """ + return self._get_sorted_from_key(contract_id, tx_start=tx_start, reverse=True) + + def get_newer(self, contract_id: bytes, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: + """Get a list of tx_ids sorted by timestamp for a given contract_id that are newer than tx_start. + """ + return self._get_sorted_from_key(contract_id, tx_start=tx_start) + + @abstractmethod + def get_transaction_count(self, contract_id: bytes) -> int: + """Get the count of transactions for the given contract_id.""" + raise NotImplementedError + + def get_last_tx_timestamp(self, contract_id: bytes) -> int | None: + """Get the timestamp of the last tx in the given contract_id, or None if it doesn't exist.""" + return self.get_latest_tx_timestamp(contract_id) diff --git a/hathor/indexes/rocksdb_address_index.py b/hathor/indexes/rocksdb_address_index.py index 0cc829abc..6288a956c 100644 --- a/hathor/indexes/rocksdb_address_index.py +++ b/hathor/indexes/rocksdb_address_index.py @@ -66,7 +66,7 @@ def add_tx(self, tx: BaseTransaction) -> None: self._publish_tx(tx) def get_from_address(self, address: str) -> list[bytes]: - return list(self._get_from_key(address)) + return list(self._get_sorted_from_key(address)) def get_sorted_from_address(self, address: str, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: return self._get_sorted_from_key(address, tx_start) diff --git a/hathor/indexes/rocksdb_blueprint_history_index.py b/hathor/indexes/rocksdb_blueprint_history_index.py new file mode 100644 index 000000000..e833eb12f --- /dev/null +++ b/hathor/indexes/rocksdb_blueprint_history_index.py @@ -0,0 +1,42 @@ +# Copyright 2025 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import rocksdb +from typing_extensions import override + +from hathor.indexes.blueprint_history_index import BlueprintHistoryIndex +from hathor.indexes.rocksdb_tx_group_index import RocksDBTxGroupIndex +from hathor.indexes.rocksdb_utils import RocksDBIndexUtils + +_CF_NAME_BLUEPRINT_HISTORY_INDEX = b'blueprint-history-index' +_DB_NAME: str = 'blueprint-history' + + +class RocksDBBlueprintHistoryIndex(RocksDBTxGroupIndex[bytes], BlueprintHistoryIndex, RocksDBIndexUtils): + _KEY_SIZE = 32 + + def __init__(self, db: rocksdb.DB) -> None: + RocksDBTxGroupIndex.__init__(self, db, _CF_NAME_BLUEPRINT_HISTORY_INDEX) + + @override + def _serialize_key(self, key: bytes) -> bytes: + return key + + @override + def _deserialize_key(self, key_bytes: bytes) -> bytes: + return key_bytes + + @override + def get_db_name(self) -> str | None: + return _DB_NAME diff --git a/hathor/indexes/rocksdb_nc_history_index.py b/hathor/indexes/rocksdb_nc_history_index.py new file mode 100644 index 000000000..eb968a8ea --- /dev/null +++ b/hathor/indexes/rocksdb_nc_history_index.py @@ -0,0 +1,53 @@ +# Copyright 2023 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Optional + +from structlog import get_logger + +from hathor.indexes.nc_history_index import NCHistoryIndex +from hathor.indexes.rocksdb_tx_group_index import RocksDBTxGroupIndex +from hathor.indexes.rocksdb_utils import RocksDBIndexUtils + +if TYPE_CHECKING: # pragma: no cover + import rocksdb + +logger = get_logger() + +_CF_NAME_NC_HISTORY_INDEX = b'nc-history-index' +_CF_NAME_NC_HISTORY_INDEX_STATS = b'nc-history-index-stats' +_DB_NAME: str = 'nc-history' + + +class RocksDBNCHistoryIndex(RocksDBTxGroupIndex[bytes], NCHistoryIndex, RocksDBIndexUtils): + """RocksDB-persistent index of all transactions of a Nano Contract.""" + + _KEY_SIZE = 32 + + def __init__(self, db: 'rocksdb.DB', *, cf_name: Optional[bytes] = None) -> None: + RocksDBTxGroupIndex.__init__(self, db, cf_name or _CF_NAME_NC_HISTORY_INDEX, _CF_NAME_NC_HISTORY_INDEX_STATS) + + def _serialize_key(self, key: bytes) -> bytes: + return key + + def _deserialize_key(self, key_bytes: bytes) -> bytes: + return key_bytes + + def get_db_name(self) -> Optional[str]: + # XXX: we don't need it to be parametrizable, so this is fine + return _DB_NAME + + def get_transaction_count(self, contract_id: bytes) -> int: + assert self._stats is not None + return self._stats.get_group_count(contract_id) diff --git a/hathor/indexes/rocksdb_tx_group_index.py b/hathor/indexes/rocksdb_tx_group_index.py index f640fbafa..611e8c75e 100644 --- a/hathor/indexes/rocksdb_tx_group_index.py +++ b/hathor/indexes/rocksdb_tx_group_index.py @@ -13,21 +13,56 @@ # limitations under the License. from abc import abstractmethod -from typing import TYPE_CHECKING, Iterable, Optional, Sized, TypeVar +from typing import Callable, Iterator, Optional, Sized, TypeVar +import rocksdb from structlog import get_logger +from typing_extensions import override -from hathor.indexes.rocksdb_utils import RocksDBIndexUtils +from hathor.indexes.rocksdb_utils import RocksDBIndexUtils, incr_key from hathor.indexes.tx_group_index import TxGroupIndex from hathor.transaction import BaseTransaction - -if TYPE_CHECKING: # pragma: no cover - import rocksdb +from hathor.transaction.util import bytes_to_int, int_to_bytes logger = get_logger() KT = TypeVar('KT', bound=Sized) +GROUP_COUNT_VALUE_SIZE = 4 # in bytes + + +class _RocksDBTxGroupStatsIndex(RocksDBIndexUtils): + def __init__( + self, + db: rocksdb.DB, + cf_name: bytes, + serialize_key: Callable[[KT], bytes], + ) -> None: + self.log = logger.new() + super().__init__(db, cf_name) + self._serialize_key = serialize_key + + def increase_group_count(self, key: KT) -> None: + """Increase the group count for the provided key.""" + self._increment_group_count(key, amount=1) + + def decrease_group_count(self, key: KT) -> None: + """Decrease the group count for the provided key.""" + self._increment_group_count(key, amount=-1) + + def _increment_group_count(self, key: KT, *, amount: int) -> None: + """Increment the group count for the provided key with the provided amount.""" + count_key = self._serialize_key(key) + count = self.get_group_count(key) + new_count_bytes = int_to_bytes(number=count + amount, size=GROUP_COUNT_VALUE_SIZE) + self._db.put((self._cf, count_key), new_count_bytes) + + def get_group_count(self, key: KT) -> int: + """Return the group count for the provided key.""" + count_key = self._serialize_key(key) + count_bytes = self._db.get((self._cf, count_key)) or b'' + return bytes_to_int(count_bytes) + class RocksDBTxGroupIndex(TxGroupIndex[KT], RocksDBIndexUtils): """RocksDB implementation of the TxGroupIndex. This class is abstract and cannot be used directly. @@ -46,13 +81,15 @@ class RocksDBTxGroupIndex(TxGroupIndex[KT], RocksDBIndexUtils): """ _KEY_SIZE: int - _CF_NAME: bytes - def __init__(self, db: 'rocksdb.DB', cf_name: bytes) -> None: + def __init__(self, db: rocksdb.DB, cf_name: bytes, stats_cf_name: bytes | None = None) -> None: self.log = logger.new() RocksDBIndexUtils.__init__(self, db, cf_name) + self._stats = _RocksDBTxGroupStatsIndex(db, stats_cf_name, self._serialize_key) if stats_cf_name else None def force_clear(self) -> None: + if self._stats: + self._stats.clear() self.clear() @abstractmethod @@ -65,11 +102,6 @@ def _deserialize_key(self, _bytes: bytes) -> KT: """Deserialize RocksDB's key.""" raise NotImplementedError - @abstractmethod - def _extract_keys(self, tx: BaseTransaction) -> Iterable[KT]: - """Extract the keys related to a given tx. The transaction will be added to all extracted keys.""" - raise NotImplementedError - def _to_rocksdb_key(self, key: KT, tx: Optional[BaseTransaction] = None) -> bytes: import struct rocksdb_key = self._serialize_key(key) @@ -94,24 +126,44 @@ def _from_rocksdb_key(self, rocksdb_key: bytes) -> tuple[KT, int, bytes]: def add_tx(self, tx: BaseTransaction) -> None: for key in self._extract_keys(tx): - self.log.debug('put key', key=key) - self._db.put((self._cf, self._to_rocksdb_key(key, tx)), b'') + self.add_single_key(key, tx) + + def add_single_key(self, key: KT, tx: BaseTransaction) -> None: + self.log.debug('put key', key=key) + internal_key = self._to_rocksdb_key(key, tx) + if self._db.get((self._cf, internal_key)) is not None: + return + self._db.put((self._cf, internal_key), b'') + if self._stats: + self._stats.increase_group_count(key) def remove_tx(self, tx: BaseTransaction) -> None: for key in self._extract_keys(tx): - self.log.debug('delete key', key=key) - self._db.delete((self._cf, self._to_rocksdb_key(key, tx))) - - def _get_from_key(self, key: KT) -> Iterable[bytes]: - return self._util_get_from_key(key) - - def _get_sorted_from_key(self, key: KT, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: - return self._util_get_from_key(key, tx_start) - - def _util_get_from_key(self, key: KT, tx: Optional[BaseTransaction] = None) -> Iterable[bytes]: + self.remove_single_key(key, tx) + + def remove_single_key(self, key: KT, tx: BaseTransaction) -> None: + self.log.debug('delete key', key=key) + internal_key = self._to_rocksdb_key(key, tx) + if self._db.get((self._cf, internal_key)) is None: + return + self._db.delete((self._cf, internal_key)) + if self._stats: + self._stats.decrease_group_count(key) + + def _get_sorted_from_key( + self, + key: KT, + tx_start: Optional[BaseTransaction] = None, + reverse: bool = False + ) -> Iterator[bytes]: self.log.debug('seek to', key=key) it = self._db.iterkeys(self._cf) - it.seek(self._to_rocksdb_key(key, tx)) + if reverse: + it = reversed(it) + # when reversed we increment the key by 1, which effectively goes to the end of a prefix + it.seek_for_prev(incr_key(self._to_rocksdb_key(key, tx_start))) + else: + it.seek(self._to_rocksdb_key(key, tx_start)) for _cf, rocksdb_key in it: key2, _, tx_hash = self._from_rocksdb_key(rocksdb_key) if key2 != key: @@ -136,3 +188,19 @@ def _is_key_empty(self, key: KT) -> bool: is_empty = key2 != key self.log.debug('seek empty', is_empty=is_empty) return is_empty + + @override + def get_latest_tx_timestamp(self, key: KT) -> int | None: + it = self._db.iterkeys(self._cf) + it = reversed(it) + # when reversed we increment the key by 1, which effectively goes to the end of a prefix + it.seek_for_prev(incr_key(self._to_rocksdb_key(key))) + try: + _cf, rocksdb_key = next(it) + except StopIteration: + return None + key2, tx_timestamp, _ = self._from_rocksdb_key(rocksdb_key) + if key2 != key: + return None + assert key2 == key + return tx_timestamp diff --git a/hathor/indexes/rocksdb_vertex_timestamp_index.py b/hathor/indexes/rocksdb_vertex_timestamp_index.py new file mode 100644 index 000000000..6fae6bf9b --- /dev/null +++ b/hathor/indexes/rocksdb_vertex_timestamp_index.py @@ -0,0 +1,131 @@ +# Copyright 2025 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import struct +from abc import ABC +from typing import Iterator, final + +import rocksdb +from structlog import get_logger +from typing_extensions import override + +from hathor.indexes.rocksdb_utils import RocksDBIndexUtils, incr_key +from hathor.indexes.vertex_timestamp_index import VertexTimestampIndex +from hathor.transaction import BaseTransaction, Vertex + +logger = get_logger() + + +class RocksDBVertexTimestampIndex(VertexTimestampIndex, RocksDBIndexUtils, ABC): + cf_name: bytes + db_name: str + + """ + This index uses the following key format: + + key = [tx.timestamp][tx.hash] + |--4 bytes---||--32b--| + + It works nicely because rocksdb uses a tree sorted by key under the hood. + """ + + def __init__(self, db: rocksdb.DB) -> None: + self.log = logger.new() + RocksDBIndexUtils.__init__(self, db, self.cf_name) + + @final + @override + def get_db_name(self) -> str | None: + return self.db_name + + @final + @override + def force_clear(self) -> None: + self.clear() + + @staticmethod + @final + def _to_key(vertex: Vertex) -> bytes: + """Make a key for a vertex.""" + key = bytearray() + key.extend(struct.pack('>I', vertex.timestamp)) + assert len(vertex.hash) == 32 + key.extend(vertex.hash) + assert len(key) == 4 + 32 + return bytes(key) + + @staticmethod + @final + def _from_key(key: bytes) -> tuple[int, bytes]: + """Parse a key on the column-family.""" + assert len(key) == 4 + 32 + timestamp: int + (timestamp,) = struct.unpack('>I', key[:4]) + tx_hash = key[4:] + assert len(tx_hash) == 32 + return timestamp, tx_hash + + @final + @override + def _add_tx(self, tx: BaseTransaction) -> None: + key = self._to_key(tx) + self.log.debug('put key', key=key) + self._db.put((self._cf, key), b'') + + @final + @override + def del_tx(self, tx: BaseTransaction) -> None: + key = self._to_key(tx) + self.log.debug('delete key', key=key) + self._db.delete((self._cf, key)) + + @final + @override + def _iter_sorted( + self, + *, + tx_start: BaseTransaction | None, + reverse: bool, + inclusive: bool = False, + ) -> Iterator[bytes]: + it = self._db.iterkeys(self._cf) + if reverse: + it = reversed(it) + if tx_start is None: + self.log.debug('seek to last') + it.seek_to_last() + else: + # when reversed we increment the key by 1, which effectively goes to the end of a prefix + self.log.debug('seek to', tx=tx_start) + it.seek_for_prev(incr_key(self._to_key(tx_start))) + else: + if tx_start is None: + self.log.debug('seek to first') + it.seek_to_first() + else: + self.log.debug('seek to', tx=tx_start) + it.seek(self._to_key(tx_start)) + + it = (self._from_key(key) for _cf, key in it) + try: + _timestamp, first_tx_hash = next(it) + except StopIteration: + return + if inclusive or not tx_start or tx_start.hash != first_tx_hash: + yield first_tx_hash + + for _timestamp, tx_hash in it: + self.log.debug('seek found', tx=tx_hash.hex()) + yield tx_hash + self.log.debug('seek end') diff --git a/hathor/indexes/tx_group_index.py b/hathor/indexes/tx_group_index.py index 139245fe9..810cafdf7 100644 --- a/hathor/indexes/tx_group_index.py +++ b/hathor/indexes/tx_group_index.py @@ -13,7 +13,7 @@ # limitations under the License. from abc import abstractmethod -from typing import Generic, Iterable, Optional, Sized, TypeVar +from typing import Generic, Iterable, Iterator, Optional, Sized, TypeVar from structlog import get_logger @@ -44,16 +44,21 @@ def remove_tx(self, tx: BaseTransaction) -> None: raise NotImplementedError @abstractmethod - def _get_from_key(self, key: KT) -> Iterable[bytes]: - """Get all transactions that have a given key.""" + def _extract_keys(self, tx: BaseTransaction) -> Iterable[KT]: + """Extract the keys related to a given tx. The transaction will be added to all extracted keys.""" raise NotImplementedError @abstractmethod - def _get_sorted_from_key(self, key: KT, tx_start: Optional[BaseTransaction] = None) -> Iterable[bytes]: + def _get_sorted_from_key(self, + key: KT, + tx_start: Optional[BaseTransaction] = None, + reverse: bool = False) -> Iterator[bytes]: """Get all transactions that have a given key, sorted by timestamp. `tx_start` serves as a pagination marker, indicating the starting position for the iteration. When tx_start is None, the iteration begins from the initial element. + + `reverse` is used to get the list in the reverse order """ raise NotImplementedError @@ -61,3 +66,18 @@ def _get_sorted_from_key(self, key: KT, tx_start: Optional[BaseTransaction] = No def _is_key_empty(self, key: KT) -> bool: """Check whether a key is empty.""" raise NotImplementedError + + @abstractmethod + def get_latest_tx_timestamp(self, key: KT) -> int | None: + """Get the timestamp of the latest tx in the given key, or None if the key is not found.""" + raise NotImplementedError + + @abstractmethod + def add_single_key(self, key: KT, tx: BaseTransaction) -> None: + """Add a single key to the index.""" + raise NotImplementedError + + @abstractmethod + def remove_single_key(self, key: KT, tx: BaseTransaction) -> None: + """Remove a single key from the index.""" + raise NotImplementedError diff --git a/hathor/indexes/vertex_timestamp_index.py b/hathor/indexes/vertex_timestamp_index.py new file mode 100644 index 000000000..0c2c845c0 --- /dev/null +++ b/hathor/indexes/vertex_timestamp_index.py @@ -0,0 +1,96 @@ +# Copyright 2025 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from typing import Iterator, final + +from typing_extensions import override + +from hathor.indexes.base_index import BaseIndex +from hathor.transaction import BaseTransaction + + +class VertexTimestampIndex(BaseIndex, ABC): + """This is an abstract index to easily sort a certain type of vertex by its timestamp.""" + # TODO: Update the TimestampIndex to use this abstraction. Maybe the TxGroupIndex could be adapted too. + + @final + @override + def init_loop_step(self, tx: BaseTransaction) -> None: + self.add_tx(tx) + + @abstractmethod + def _should_add(self, tx: BaseTransaction) -> bool: + """Return whether a tx should be added to this index.""" + raise NotImplementedError + + @final + def add_tx(self, tx: BaseTransaction) -> None: + """Add a tx to this index.""" + if self._should_add(tx): + self._add_tx(tx) + + @final + def manually_add_tx(self, tx: BaseTransaction) -> None: + self._add_tx(tx) + + @abstractmethod + def _add_tx(self, tx: BaseTransaction) -> None: + """Internal method to actually add a tx to this index.""" + raise NotImplementedError + + @abstractmethod + def del_tx(self, tx: BaseTransaction) -> None: + """Delete a tx from this index.""" + raise NotImplementedError + + @final + def get_newest(self) -> Iterator[bytes]: + """Get tx ids from newest to oldest.""" + return self._iter_sorted(tx_start=None, reverse=True) + + @final + def get_oldest(self) -> Iterator[bytes]: + """Get tx ids from oldest to newest.""" + return self._iter_sorted(tx_start=None, reverse=False) + + @final + def get_older(self, *, tx_start: BaseTransaction, inclusive: bool = False) -> Iterator[bytes]: + """ + Get tx ids sorted by timestamp that are older than `tx_start`. + The `inclusive` param sets whether `tx_start` should be included. + """ + return self._iter_sorted(tx_start=tx_start, reverse=True, inclusive=inclusive) + + @final + def get_newer(self, *, tx_start: BaseTransaction, inclusive: bool = False) -> Iterator[bytes]: + """ + Get tx ids sorted by timestamp that are newer than `tx_start`. + The `inclusive` param sets whether `tx_start` should be included. + """ + return self._iter_sorted(tx_start=tx_start, reverse=False, inclusive=inclusive) + + @abstractmethod + def _iter_sorted( + self, + *, + tx_start: BaseTransaction | None, + reverse: bool, + inclusive: bool = False, + ) -> Iterator[bytes]: + """ + Internal method to get all txs sorted by timestamp starting from an optional `tx_start`. + The `inclusive` param sets whether `tx_start` should be included. + """ + raise NotImplementedError diff --git a/hathor/nanocontracts/nc_types/__init__.py b/hathor/nanocontracts/nc_types/__init__.py index e15c4dbf3..d48e2af2a 100644 --- a/hathor/nanocontracts/nc_types/__init__.py +++ b/hathor/nanocontracts/nc_types/__init__.py @@ -21,6 +21,7 @@ from hathor.nanocontracts.nc_types.bytes_nc_type import BytesLikeNCType, BytesNCType from hathor.nanocontracts.nc_types.collection_nc_type import DequeNCType, FrozenSetNCType, ListNCType, SetNCType from hathor.nanocontracts.nc_types.dataclass_nc_type import DataclassNCType +from hathor.nanocontracts.nc_types.fixed_size_bytes_nc_type import Bytes32NCType from hathor.nanocontracts.nc_types.map_nc_type import DictNCType from hathor.nanocontracts.nc_types.namedtuple_nc_type import NamedTupleNCType from hathor.nanocontracts.nc_types.nc_type import NCType @@ -28,6 +29,7 @@ from hathor.nanocontracts.nc_types.optional_nc_type import OptionalNCType from hathor.nanocontracts.nc_types.sized_int_nc_type import Int32NCType from hathor.nanocontracts.nc_types.str_nc_type import StrNCType +from hathor.nanocontracts.nc_types.token_uid_nc_type import TokenUidNCType from hathor.nanocontracts.nc_types.tuple_nc_type import TupleNCType from hathor.nanocontracts.nc_types.utils import TypeAliasMap, TypeToNCTypeMap from hathor.nanocontracts.nc_types.varint_nc_type import VarInt32NCType, VarUint32NCType @@ -110,12 +112,12 @@ # hathor types: Address: AddressNCType, Amount: VarUint32NCType, - BlueprintId: BytesLikeNCType[BlueprintId], - ContractId: BytesLikeNCType[ContractId], + BlueprintId: Bytes32NCType, + ContractId: Bytes32NCType, Timestamp: Int32NCType, - TokenUid: BytesLikeNCType[TokenUid], + TokenUid: TokenUidNCType, TxOutputScript: BytesLikeNCType[TxOutputScript], - VertexId: BytesLikeNCType[VertexId], + VertexId: Bytes32NCType, } # This mapping includes all supported NCType classes, should only be used for parsing function calls diff --git a/hathor/nanocontracts/nc_types/address_nc_type.py b/hathor/nanocontracts/nc_types/address_nc_type.py index 16aa515ea..e3f2e93c9 100644 --- a/hathor/nanocontracts/nc_types/address_nc_type.py +++ b/hathor/nanocontracts/nc_types/address_nc_type.py @@ -20,8 +20,6 @@ from hathor.nanocontracts.nc_types.nc_type import NCType from hathor.nanocontracts.types import Address from hathor.serialization import Deserializer, Serializer -from hathor.serialization.consts import DEFAULT_BYTES_MAX_LENGTH -from hathor.serialization.encoding.bytes import decode_bytes, encode_bytes from hathor.transaction.headers.nano_header import ADDRESS_LEN_BYTES from hathor.utils.typing import is_subclass @@ -47,11 +45,14 @@ def _check_value(self, value: Address, /, *, deep: bool) -> None: @override def _serialize(self, serializer: Serializer, value: Address, /) -> None: - encode_bytes(serializer.with_max_bytes(DEFAULT_BYTES_MAX_LENGTH), value) + data = bytes(value) + assert len(data) == ADDRESS_LEN_BYTES # XXX: double check + serializer.write_bytes(data) @override def _deserialize(self, deserializer: Deserializer, /) -> Address: - return Address(decode_bytes(deserializer.with_max_bytes(DEFAULT_BYTES_MAX_LENGTH))) + data = bytes(deserializer.read_bytes(ADDRESS_LEN_BYTES)) + return Address(data) @override def _json_to_value(self, json_value: NCType.Json, /) -> Address: diff --git a/hathor/nanocontracts/nc_types/fixed_size_bytes_nc_type.py b/hathor/nanocontracts/nc_types/fixed_size_bytes_nc_type.py new file mode 100644 index 000000000..63ffc7949 --- /dev/null +++ b/hathor/nanocontracts/nc_types/fixed_size_bytes_nc_type.py @@ -0,0 +1,84 @@ +# Copyright 2025 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import ClassVar, TypeVar + +from typing_extensions import Self, override + +from hathor.nanocontracts.nc_types.nc_type import NCType +from hathor.serialization import Deserializer, Serializer +from hathor.utils.typing import is_subclass + +B = TypeVar('B', bound=bytes) + + +class _FixedSizeBytesNCType(NCType[B]): + _is_hashable = True + _size: ClassVar[int] + _actual_type: type[B] + + def __init__(self, actual_type: type[B]) -> None: + self._actual_type = actual_type + + @override + @classmethod + def _from_type(cls, type_: type[B], /, *, type_map: NCType.TypeMap) -> Self: + if not is_subclass(type_, bytes): + raise TypeError('expected bytes-like type') + return cls(type_) + + def _filter_in(self, value: B, /) -> bytes: + """Mechanism to convert B into bytes before serializing.""" + return bytes(value) + + def _filter_out(self, data: bytes, /) -> B: + """Mechanism to convert bytes into B after deserializing.""" + return self._actual_type(data) + + @override + def _check_value(self, value: B, /, *, deep: bool) -> None: + if not isinstance(value, bytes): + raise TypeError(f'expected bytes type, not {type(value)}') + data = self._filter_in(value) + if len(data) != self._size: + raise TypeError( + f'value has {len(value)} bytes, expected ' + f'{self._actual_type.__name__} to always have {self._size} bytes' + ) + + @override + def _serialize(self, serializer: Serializer, value: B, /) -> None: + data = bytes(value) + assert len(data) == self._size # XXX: double check + serializer.write_bytes(data) + + @override + def _deserialize(self, deserializer: Deserializer, /) -> B: + return self._filter_out(bytes(deserializer.read_bytes(self._size))) + + @override + def _json_to_value(self, json_value: NCType.Json, /) -> B: + if not isinstance(json_value, str): + raise ValueError('expected str') + return self._filter_out(bytes.fromhex(json_value)) + + @override + def _value_to_json(self, value: bytes, /) -> NCType.Json: + return value.hex() + + +class Bytes32NCType(_FixedSizeBytesNCType[B]): + _size = 32 diff --git a/hathor/nanocontracts/nc_types/token_uid_nc_type.py b/hathor/nanocontracts/nc_types/token_uid_nc_type.py new file mode 100644 index 000000000..e7b3185ea --- /dev/null +++ b/hathor/nanocontracts/nc_types/token_uid_nc_type.py @@ -0,0 +1,88 @@ +# Copyright 2025 Hathor Labs +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing_extensions import Self, override + +from hathor.conf.settings import HATHOR_TOKEN_UID +from hathor.nanocontracts.nc_types.fixed_size_bytes_nc_type import Bytes32NCType +from hathor.nanocontracts.nc_types.nc_type import NCType +from hathor.nanocontracts.types import TokenUid +from hathor.serialization import Deserializer, Serializer +from hathor.serialization.compound_encoding.optional import decode_optional, encode_optional +from hathor.utils.typing import is_subclass + +TOKEN_SIZE = 32 +HATHOR_TOKEN_HEX = HATHOR_TOKEN_UID.hex() + + +class TokenUidNCType(NCType[TokenUid]): + _is_hashable = True + + def __init__(self) -> None: + self._bytes32_nc_type = Bytes32NCType(bytes) + + @override + @classmethod + def _from_type(cls, type_: type[TokenUid], /, *, type_map: NCType.TypeMap) -> Self: + # XXX: TokenUid is a NewType it cannot be used to make this check, when we have a custom class it will be + # possible to use it here instead of bytes + if not is_subclass(type_, bytes): + raise TypeError('expected bytes type') + return cls() + + @override + def _check_value(self, value: TokenUid, /, *, deep: bool) -> None: + if not isinstance(value, bytes): + raise TypeError('expected bytes instance') + data = bytes(value) + if data == HATHOR_TOKEN_UID: + return + elif len(data) != TOKEN_SIZE: + raise TypeError( + f'value has {len(value)} bytes, expected ' + f'TokenUid to always have {TOKEN_SIZE} bytes' + ) + + @override + def _serialize(self, serializer: Serializer, value: TokenUid, /) -> None: + # TokenUid is mapped to bytes | None, None represents the native token + raw_value: bytes | None = None if value == HATHOR_TOKEN_UID else value + encode_optional(serializer, raw_value, self._bytes32_nc_type.serialize) + + @override + def _deserialize(self, deserializer: Deserializer, /) -> TokenUid: + # bytes | None is mapped back to TokenUid, None represents the native token + raw_value = decode_optional(deserializer, self._bytes32_nc_type.deserialize) + value = HATHOR_TOKEN_UID if raw_value is None else raw_value + return TokenUid(value) + + @override + def _json_to_value(self, json_value: NCType.Json, /) -> TokenUid: + if not isinstance(json_value, str): + raise ValueError('expected str') + if json_value == HATHOR_TOKEN_HEX: + return TokenUid(HATHOR_TOKEN_UID) + data = bytes.fromhex(json_value) + if len(data) != TOKEN_SIZE: + raise ValueError('TokenUid must either be a null byte or have 32 bytes') + return TokenUid(data) + + @override + def _value_to_json(self, data: TokenUid, /) -> NCType.Json: + if data == HATHOR_TOKEN_UID: + return HATHOR_TOKEN_HEX + else: + return data.hex() diff --git a/hathor/nanocontracts/storage/block_storage.py b/hathor/nanocontracts/storage/block_storage.py index c36cf4c44..b084be687 100644 --- a/hathor/nanocontracts/storage/block_storage.py +++ b/hathor/nanocontracts/storage/block_storage.py @@ -21,12 +21,15 @@ from hathor.nanocontracts.storage.contract_storage import NCContractStorage from hathor.nanocontracts.storage.patricia_trie import NodeId, PatriciaTrie from hathor.nanocontracts.storage.token_proxy import TokenProxy -from hathor.nanocontracts.types import ContractId, TokenUid +from hathor.nanocontracts.types import Address, ContractId, TokenUid +from hathor.transaction.headers.nano_header import ADDRESS_SEQNUM_SIZE +from hathor.utils import leb128 class _Tag(Enum): CONTRACT = b'\0' TOKEN = b'\1' + ADDRESS = b'\2' class ContractKey(NamedTuple): @@ -43,6 +46,13 @@ def __bytes__(self): return _Tag.TOKEN.value + self.token_id +class AddressKey(NamedTuple): + address: Address + + def __bytes__(self): + return _Tag.ADDRESS.value + self.address + + class NCBlockStorage: """This is the storage used by NanoContracts. @@ -128,3 +138,26 @@ def create_token(self, token_id: TokenUid, token_name: str, token_symbol: str) - token_description = TokenDescription(token_id=token_id, token_name=token_name, token_symbol=token_symbol) token_description_bytes = self._TOKEN_DESCRIPTION_NC_TYPE.to_bytes(token_description) self._block_trie.update(bytes(key), token_description_bytes) + + def get_address_seqnum(self, address: Address) -> int: + """Get the latest seqnum for an address. + + For clarity, new transactions must have a GREATER seqnum to be able to be executed.""" + key = AddressKey(address) + try: + seqnum_bytes = self._block_trie.get(bytes(key)) + except KeyError: + return -1 + else: + seqnum, buf = leb128.decode_unsigned(seqnum_bytes, max_bytes=ADDRESS_SEQNUM_SIZE) + assert len(buf) == 0 + return seqnum + + def set_address_seqnum(self, address: Address, seqnum: int) -> None: + """Update seqnum for an adress.""" + assert seqnum >= 0 + old_seqnum = self.get_address_seqnum(address) + assert seqnum > old_seqnum + key = AddressKey(address) + seqnum_bytes = leb128.encode_unsigned(seqnum, max_bytes=ADDRESS_SEQNUM_SIZE) + self._block_trie.update(bytes(key), seqnum_bytes) diff --git a/hathor/transaction/headers/nano_header.py b/hathor/transaction/headers/nano_header.py index e98269e6b..cb6c4c66b 100644 --- a/hathor/transaction/headers/nano_header.py +++ b/hathor/transaction/headers/nano_header.py @@ -34,6 +34,7 @@ from hathor.transaction.block import Block ADDRESS_LEN_BYTES: int = 25 +ADDRESS_SEQNUM_SIZE: int = 8 # bytes _NC_SCRIPT_LEN_MAX_BYTES: int = 2 @@ -93,6 +94,9 @@ def _validate_authorities(self, token_uid: TokenUid) -> None: class NanoHeader(VertexBaseHeader): tx: Transaction + # Sequence number for the caller. + nc_seqnum: int + # nc_id equals to the blueprint_id when a Nano Contract is being created. # nc_id equals to the contract_id when a method is being called. nc_id: VertexId @@ -133,6 +137,9 @@ def deserialize( nc_id, buf = unpack_len(32, buf) if verbose: verbose('nc_id', nc_id) + nc_seqnum, buf = leb128.decode_unsigned(buf, max_bytes=ADDRESS_SEQNUM_SIZE) + if verbose: + verbose('nc_seqnum', nc_seqnum) (nc_method_len,), buf = unpack('!B', buf) if verbose: verbose('nc_method_len', nc_method_len) @@ -168,6 +175,7 @@ def deserialize( return cls( tx=tx, + nc_seqnum=nc_seqnum, nc_id=nc_id, nc_method=decoded_nc_method, nc_args_bytes=nc_args_bytes, @@ -185,6 +193,7 @@ def _serialize_without_header_id(self, *, skip_signature: bool) -> deque[bytes]: ret: deque[bytes] = deque() ret.append(self.nc_id) + ret.append(leb128.encode_unsigned(self.nc_seqnum, max_bytes=ADDRESS_SEQNUM_SIZE)) ret.append(int_to_bytes(len(encoded_method), 1)) ret.append(encoded_method) ret.append(int_to_bytes(len(self.nc_args_bytes), 2)) diff --git a/hathor/utils/api.py b/hathor/utils/api.py index 52728c67a..a074f4b58 100644 --- a/hathor/utils/api.py +++ b/hathor/utils/api.py @@ -15,7 +15,7 @@ import cgi from typing import Type, TypeVar, Union -from pydantic import Field, ValidationError, validator +from pydantic import Field, ValidationError from twisted.web.http import Request from hathor.api_util import get_args @@ -31,7 +31,6 @@ class QueryParams(BaseModel): Subclass this class defining your query parameters as attributes and their respective types, then call the from_request() class method to instantiate your class from the provided request. """ - _list_to_single_item_validator = validator('*', pre=True, allow_reuse=True)(single_or_none) @classmethod def from_request(cls: Type[T], request: Request) -> Union[T, 'ErrorResponse']: @@ -43,10 +42,17 @@ def from_request(cls: Type[T], request: Request) -> Union[T, 'ErrorResponse']: encoding = options.get('charset', encoding) raw_args = get_args(request).items() - args = { - key.decode(encoding): [value.decode(encoding) for value in values] - for key, values in raw_args - } + args: dict[str, str | None | list[str]] = {} + for key, values in raw_args: + decoded_key = key.decode(encoding) + decoded_values: list[str] = [value.decode(encoding) for value in values] + if not decoded_key.endswith('[]'): + try: + args[decoded_key] = single_or_none(decoded_values) + except Exception as error: + return ErrorResponse(error=str(error)) + else: + args[decoded_key] = decoded_values try: return cls.parse_obj(args) diff --git a/hathor/wallet/keypair.py b/hathor/wallet/keypair.py index d526e1c48..82d1a211d 100644 --- a/hathor/wallet/keypair.py +++ b/hathor/wallet/keypair.py @@ -91,6 +91,20 @@ def to_json(self) -> dict[str, Any]: 'used': self.used, } + def p2pkh_create_input_data(self, password: bytes, data: bytes) -> bytes: + """Return a script input to solve the p2pkh script generated by this key pair.""" + from cryptography.hazmat.primitives import hashes + + from hathor.crypto.util import get_public_key_bytes_compressed + from hathor.transaction.scripts import P2PKH + + private_key = self.get_private_key(password) + public_key = private_key.public_key() + public_key_bytes = get_public_key_bytes_compressed(public_key) + signature = private_key.sign(data, ec.ECDSA(hashes.SHA256())) + script_input = P2PKH.create_input_data(public_key_bytes, signature) + return script_input + @classmethod def from_json(cls, json_data: dict[str, Any]) -> 'KeyPair': priv_key_bytes = base64.b64decode(json_data['privKey'])