diff --git a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp index dda25f56e491..14c67abb4f0e 100644 --- a/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp +++ b/barretenberg/cpp/src/barretenberg/world_state/world_state.cpp @@ -587,16 +587,6 @@ WorldStateStatusFull WorldState::sync_block(const StateReference& block_state_re const std::vector& public_writes) { validate_trees_are_equally_synched(); - WorldStateStatusFull status; - if (is_same_state_reference(WorldStateRevision::uncommitted(), block_state_ref) && - is_archive_tip(WorldStateRevision::uncommitted(), block_header_hash)) { - std::pair result = commit(status); - if (!result.first) { - throw std::runtime_error(result.second); - } - populate_status_summary(status); - return status; - } rollback(); Fork::SharedPtr fork = retrieve_fork(CANONICAL_FORK_ID); @@ -658,22 +648,32 @@ WorldStateStatusFull WorldState::sync_block(const StateReference& block_state_re signal.wait_for_level(); - if (!success) { - throw std::runtime_error("Failed to sync block: " + err_message); - } + // Check resulting state and commit if successful + WorldStateStatusFull status; + try { + if (!success) { + throw std::runtime_error("Failed to sync block: " + err_message); + } - if (!is_archive_tip(WorldStateRevision::uncommitted(), block_header_hash)) { - throw std::runtime_error("Can't synch block: block header hash is not the tip of the archive tree"); - } + if (!is_archive_tip(WorldStateRevision::uncommitted(), block_header_hash)) { + throw std::runtime_error("Can't synch block: block header hash is not the tip of the archive tree"); + } - if (!is_same_state_reference(WorldStateRevision::uncommitted(), block_state_ref)) { - throw std::runtime_error("Can't synch block: block state does not match world state"); - } + if (!is_same_state_reference(WorldStateRevision::uncommitted(), block_state_ref)) { + throw std::runtime_error("Can't synch block: block state does not match world state"); + } - std::pair result = commit(status); - if (!result.first) { - throw std::runtime_error(result.second); + std::pair result = commit(status); + if (!result.first) { + throw std::runtime_error(result.second); + } + } catch (const std::exception& e) { + // We failed, rollback any uncommitted state before leaving + rollback(); + throw; } + + // Success return the status populate_status_summary(status); return status; } @@ -726,6 +726,9 @@ WorldStateStatusSummary WorldState::set_finalized_blocks(const block_number_t& t } WorldStateStatusFull WorldState::unwind_blocks(const block_number_t& toBlockNumber) { + // Ensure no uncommitted state + rollback(); + WorldStateRevision revision{ .forkId = CANONICAL_FORK_ID, .blockNumber = 0, .includeUncommitted = false }; std::array responses; get_all_tree_info(revision, responses); diff --git a/yarn-project/aztec-node/src/aztec-node/config.ts b/yarn-project/aztec-node/src/aztec-node/config.ts index 4f724369710f..a9484bff95ef 100644 --- a/yarn-project/aztec-node/src/aztec-node/config.ts +++ b/yarn-project/aztec-node/src/aztec-node/config.ts @@ -94,7 +94,7 @@ export function getConfigEnvVars(): AztecNodeConfig { type ConfigRequiredToBuildKeyStore = TxSenderConfig & SequencerClientConfig & SharedNodeConfig & ValidatorClientConfig; -function createKeyStoreFromWeb3Signer(config: ConfigRequiredToBuildKeyStore) { +function createKeyStoreFromWeb3Signer(config: ConfigRequiredToBuildKeyStore): KeyStore | undefined { const validatorKeyStores: ValidatorKeyStore[] = []; if ( @@ -124,7 +124,7 @@ function createKeyStoreFromWeb3Signer(config: ConfigRequiredToBuildKeyStore) { return keyStore; } -function createKeyStoreFromPrivateKeys(config: ConfigRequiredToBuildKeyStore) { +function createKeyStoreFromPrivateKeys(config: ConfigRequiredToBuildKeyStore): KeyStore | undefined { const validatorKeyStores: ValidatorKeyStore[] = []; const ethPrivateKeys = config.validatorPrivateKeys ? config.validatorPrivateKeys.getValue().map(x => ethPrivateKeySchema.parse(x)) @@ -158,7 +158,9 @@ function createKeyStoreFromPrivateKeys(config: ConfigRequiredToBuildKeyStore) { return keyStore; } -export function createKeyStoreForValidator(config: TxSenderConfig & SequencerClientConfig & SharedNodeConfig) { +export function createKeyStoreForValidator( + config: TxSenderConfig & SequencerClientConfig & SharedNodeConfig, +): KeyStore | undefined { if (config.web3SignerUrl !== undefined && config.web3SignerUrl.length > 0) { return createKeyStoreFromWeb3Signer(config); } diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index fdb9b1e6e646..92b0bafbbaa0 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -108,7 +108,12 @@ import { getTelemetryClient, trackSpan, } from '@aztec/telemetry-client'; -import { NodeKeystoreAdapter, ValidatorClient, createValidatorClient } from '@aztec/validator-client'; +import { + NodeKeystoreAdapter, + ValidatorClient, + createBlockProposalHandler, + createValidatorClient, +} from '@aztec/validator-client'; import { createWorldStateSynchronizer } from '@aztec/world-state'; import { createPublicClient, fallback, http } from 'viem'; @@ -212,6 +217,8 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { } } + await keyStoreManager?.validateSigners(); + // If we are a validator, verify our configuration before doing too much more. if (!config.disableValidator) { if (keyStoreManager === undefined) { @@ -300,12 +307,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { deps.p2pClientDeps, ); - // Start world state and wait for it to sync to the archiver. - await worldStateSynchronizer.start(); - - // Start p2p. Note that it depends on world state to be running. - await p2pClient.start(); - + // We should really not be modifying the config object config.txPublicSetupAllowList = config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions()); const blockBuilder = new BlockBuilder( @@ -316,8 +318,52 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { telemetry, ); + // We'll accumulate sentinel watchers here const watchers: Watcher[] = []; + // Create validator client if required + const validatorClient = createValidatorClient(config, { + p2pClient, + telemetry, + dateProvider, + epochCache, + blockBuilder, + blockSource: archiver, + l1ToL2MessageSource: archiver, + keyStoreManager, + }); + + // If we have a validator client, register it as a source of offenses for the slasher, + // and have it register callbacks on the p2p client *before* we start it, otherwise messages + // like attestations or auths will fail. + if (validatorClient) { + watchers.push(validatorClient); + if (!options.dontStartSequencer) { + await validatorClient.registerHandlers(); + } + } + + // If there's no validator client but alwaysReexecuteBlockProposals is enabled, + // create a BlockProposalHandler to reexecute block proposals for monitoring + if (!validatorClient && config.alwaysReexecuteBlockProposals) { + log.info('Setting up block proposal reexecution for monitoring'); + createBlockProposalHandler(config, { + blockBuilder, + epochCache, + blockSource: archiver, + l1ToL2MessageSource: archiver, + p2pClient, + dateProvider, + telemetry, + }).registerForReexecution(p2pClient); + } + + // Start world state and wait for it to sync to the archiver. + await worldStateSynchronizer.start(); + + // Start p2p. Note that it depends on world state to be running. + await p2pClient.start(); + const validatorsSentinel = await createSentinel(epochCache, archiver, p2pClient, config); if (validatorsSentinel) { // we can run a sentinel without trying to slash. @@ -349,21 +395,6 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable { watchers.push(attestationsBlockWatcher); } - const validatorClient = createValidatorClient(config, { - p2pClient, - telemetry, - dateProvider, - epochCache, - blockBuilder, - blockSource: archiver, - l1ToL2MessageSource: archiver, - keyStoreManager, - }); - - if (validatorClient) { - watchers.push(validatorClient); - } - log.verbose(`All Aztec Node subsystems synced`); // Validator enabled, create/start relevant service diff --git a/yarn-project/aztec/src/bin/index.ts b/yarn-project/aztec/src/bin/index.ts index 987df9d69b01..21fd878e0b0d 100644 --- a/yarn-project/aztec/src/bin/index.ts +++ b/yarn-project/aztec/src/bin/index.ts @@ -2,7 +2,7 @@ // import { injectCommands as injectBuilderCommands } from '@aztec/builder'; import { injectCommands as injectWalletCommands } from '@aztec/cli-wallet'; -import { enrichEnvironmentWithChainConfig } from '@aztec/cli/config'; +import { enrichEnvironmentWithChainConfig, enrichEnvironmentWithNetworkConfig } from '@aztec/cli/config'; import { injectCommands as injectContractCommands } from '@aztec/cli/contracts'; import { injectCommands as injectDevnetCommands } from '@aztec/cli/devnet'; import { injectCommands as injectInfrastructureCommands } from '@aztec/cli/infrastructure'; @@ -39,7 +39,9 @@ async function main() { networkValue = args[networkIndex].split('=')[1] || args[networkIndex + 1]; } - await enrichEnvironmentWithChainConfig(getActiveNetworkName(networkValue)); + const networkName = getActiveNetworkName(networkValue); + await enrichEnvironmentWithChainConfig(networkName); + await enrichEnvironmentWithNetworkConfig(networkName); const cliVersion = getCliVersion(); let program = new Command('aztec'); diff --git a/yarn-project/aztec/src/cli/cmds/start_prover_agent.ts b/yarn-project/aztec/src/cli/cmds/start_prover_agent.ts index c00b4c34114a..cc57529f9a88 100644 --- a/yarn-project/aztec/src/cli/cmds/start_prover_agent.ts +++ b/yarn-project/aztec/src/cli/cmds/start_prover_agent.ts @@ -45,7 +45,12 @@ export async function startProverAgent( await preloadCrsDataForServerSideProving(config, userLog); - const fetch = makeTracedFetch([1, 2, 3], false, makeUndiciFetch(new Agent({ connections: 10 }))); + const fetch = makeTracedFetch( + // retry connections every 3s, up to 30s before giving up + [1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], + false, + makeUndiciFetch(new Agent({ connections: 10 })), + ); const broker = createProvingJobBrokerClient(config.proverBrokerUrl, getVersions(), fetch); const telemetry = initTelemetryClient(extractRelevantOptions(options, telemetryClientConfigMappings, 'tel')); diff --git a/yarn-project/aztec/src/cli/cmds/start_prover_broker.ts b/yarn-project/aztec/src/cli/cmds/start_prover_broker.ts index 3d46c83072e5..71253d5a4f42 100644 --- a/yarn-project/aztec/src/cli/cmds/start_prover_broker.ts +++ b/yarn-project/aztec/src/cli/cmds/start_prover_broker.ts @@ -1,4 +1,5 @@ import { getL1Config } from '@aztec/cli/config'; +import { getPublicClient } from '@aztec/ethereum'; import type { NamespacedApiHandlers } from '@aztec/foundation/json-rpc/server'; import type { LogFn } from '@aztec/foundation/log'; import { @@ -11,7 +12,7 @@ import { getProverNodeBrokerConfigFromEnv } from '@aztec/prover-node'; import type { ProvingJobBroker } from '@aztec/stdlib/interfaces/server'; import { getConfigEnvVars as getTelemetryClientConfig, initTelemetryClient } from '@aztec/telemetry-client'; -import { extractRelevantOptions } from '../util.js'; +import { extractRelevantOptions, setupUpdateMonitor } from '../util.js'; export async function startProverBroker( options: any, @@ -33,6 +34,7 @@ export async function startProverBroker( throw new Error('L1 registry address is required to start Aztec Node without --deploy-aztec-contracts option'); } + const followsCanonicalRollup = typeof config.rollupVersion !== 'number'; const { addresses, config: rollupConfig } = await getL1Config( config.l1Contracts.registryAddress, config.l1RpcUrls, @@ -45,6 +47,18 @@ export async function startProverBroker( const client = initTelemetryClient(getTelemetryClientConfig()); const broker = await createAndStartProvingBroker(config, client); + + if (options.autoUpdate !== 'disabled' && options.autoUpdateUrl) { + await setupUpdateMonitor( + options.autoUpdate, + new URL(options.autoUpdateUrl), + followsCanonicalRollup, + getPublicClient(config), + config.l1Contracts.registryAddress, + signalHandlers, + ); + } + services.proverBroker = [broker, ProvingJobBrokerSchema]; signalHandlers.push(() => broker.stop()); diff --git a/yarn-project/aztec/src/cli/cmds/start_prover_node.ts b/yarn-project/aztec/src/cli/cmds/start_prover_node.ts index 50727496a2a8..d185a4c5f553 100644 --- a/yarn-project/aztec/src/cli/cmds/start_prover_node.ts +++ b/yarn-project/aztec/src/cli/cmds/start_prover_node.ts @@ -73,7 +73,12 @@ export async function startProverNode( if (proverConfig.proverBrokerUrl) { // at 1TPS we'd enqueue ~1k tube proofs and ~1k AVM proofs immediately // set a lower connection limit such that we don't overload the server - const fetch = makeTracedFetch([1, 2, 3], false, makeUndiciFetch(new Agent({ connections: 100 }))); + // Keep retrying up to 30s + const fetch = makeTracedFetch( + [1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], + false, + makeUndiciFetch(new Agent({ connections: 100 })), + ); broker = createProvingJobBrokerClient(proverConfig.proverBrokerUrl, getVersions(proverConfig), fetch); } else if (options.proverBroker) { ({ broker } = await startProverBroker(options, signalHandlers, services, userLog)); diff --git a/yarn-project/cli/src/config/cached_fetch.ts b/yarn-project/cli/src/config/cached_fetch.ts new file mode 100644 index 000000000000..26dde5c735cc --- /dev/null +++ b/yarn-project/cli/src/config/cached_fetch.ts @@ -0,0 +1,67 @@ +import { createLogger } from '@aztec/aztec.js'; + +import { mkdir, readFile, stat, writeFile } from 'fs/promises'; +import { dirname } from 'path'; + +export interface CachedFetchOptions { + /** Cache duration in milliseconds */ + cacheDurationMs: number; + /** The cache file */ + cacheFile?: string; +} + +/** + * Fetches data from a URL with file-based caching support. + * This utility can be used by both remote config and bootnodes fetching. + * + * @param url - The URL to fetch from + * @param networkName - Network name for cache directory structure + * @param options - Caching and error handling options + * @param cacheDir - Optional cache directory (defaults to no caching) + * @returns The fetched and parsed JSON data, or undefined if fetch fails and throwOnError is false + */ +export async function cachedFetch( + url: string, + options: CachedFetchOptions, + fetch = globalThis.fetch, + log = createLogger('cached_fetch'), +): Promise { + const { cacheDurationMs, cacheFile } = options; + + // Try to read from cache first + try { + if (cacheFile) { + const info = await stat(cacheFile); + if (info.mtimeMs + cacheDurationMs > Date.now()) { + const cachedData = JSON.parse(await readFile(cacheFile, 'utf-8')); + return cachedData; + } + } + } catch { + log.trace('Failed to read data from cache'); + } + + try { + const response = await fetch(url); + if (!response.ok) { + log.warn(`Failed to fetch from ${url}: ${response.status} ${response.statusText}`); + return undefined; + } + + const data = await response.json(); + + try { + if (cacheFile) { + await mkdir(dirname(cacheFile), { recursive: true }); + await writeFile(cacheFile, JSON.stringify(data), 'utf-8'); + } + } catch (err) { + log.warn('Failed to cache data on disk: ' + cacheFile, { cacheFile, err }); + } + + return data; + } catch (err) { + log.warn(`Failed to fetch from ${url}`, { err }); + return undefined; + } +} diff --git a/yarn-project/cli/src/config/chain_l2_config.ts b/yarn-project/cli/src/config/chain_l2_config.ts index de9571d54846..36c329b35241 100644 --- a/yarn-project/cli/src/config/chain_l2_config.ts +++ b/yarn-project/cli/src/config/chain_l2_config.ts @@ -5,10 +5,11 @@ import type { SharedNodeConfig } from '@aztec/node-lib/config'; import type { P2PConfig } from '@aztec/p2p/config'; import type { SlasherConfig } from '@aztec/stdlib/interfaces/server'; -import { mkdir, readFile, stat, writeFile } from 'fs/promises'; -import path, { dirname, join } from 'path'; +import path, { join } from 'path'; import publicIncludeMetrics from '../../public_include_metric_prefixes.json' with { type: 'json' }; +import { cachedFetch } from './cached_fetch.js'; +import { enrichEthAddressVar, enrichVar } from './enrich_env.js'; export type L2ChainConfig = L1ContractsConfig & Pick & @@ -63,11 +64,12 @@ const DefaultSlashConfig = { slashProposeInvalidAttestationsPenalty: DefaultL1ContractsConfig.slashAmountLarge, slashAttestDescendantOfInvalidPenalty: DefaultL1ContractsConfig.slashAmountLarge, slashUnknownPenalty: DefaultL1ContractsConfig.slashAmountSmall, - slashBroadcastedInvalidBlockPenalty: DefaultL1ContractsConfig.slashAmountMedium, + slashBroadcastedInvalidBlockPenalty: 0n, // DefaultL1ContractsConfig.slashAmountSmall // Disabled until further testing slashMaxPayloadSize: 50, slashGracePeriodL2Slots: 32 * 2, // Two epochs from genesis slashOffenseExpirationRounds: 8, sentinelEnabled: true, + slashExecuteRoundsLookBack: 4, } satisfies Partial; export const stagingIgnitionL2ChainConfig: L2ChainConfig = { @@ -140,11 +142,12 @@ export const stagingIgnitionL2ChainConfig: L2ChainConfig = { slashProposeInvalidAttestationsPenalty: 50_000n * 10n ** 18n, slashAttestDescendantOfInvalidPenalty: 50_000n * 10n ** 18n, slashUnknownPenalty: 2_000n * 10n ** 18n, - slashBroadcastedInvalidBlockPenalty: 10_000n * 10n ** 18n, + slashBroadcastedInvalidBlockPenalty: 0n, // 10_000n * 10n ** 18n, Disabled for now until further testing slashMaxPayloadSize: 50, slashGracePeriodL2Slots: 32 * 4, // One round from genesis slashOffenseExpirationRounds: 8, sentinelEnabled: true, + slashExecuteRoundsLookBack: 4, }; export const stagingPublicL2ChainConfig: L2ChainConfig = { @@ -253,37 +256,13 @@ export const testnetL2ChainConfig: L2ChainConfig = { const BOOTNODE_CACHE_DURATION_MS = 60 * 60 * 1000; // 1 hour; export async function getBootnodes(networkName: NetworkNames, cacheDir?: string) { - const cacheFile = cacheDir ? join(cacheDir, networkName, 'bootnodes.json') : undefined; - try { - if (cacheFile) { - const info = await stat(cacheFile); - if (info.mtimeMs + BOOTNODE_CACHE_DURATION_MS > Date.now()) { - return JSON.parse(await readFile(cacheFile, 'utf-8'))['bootnodes']; - } - } - } catch { - // no-op. Get the remote-file - } - const url = `http://static.aztec.network/${networkName}/bootnodes.json`; - const response = await fetch(url); - if (!response.ok) { - throw new Error( - `Failed to fetch basic contract addresses from ${url}. Check you are using a correct network name.`, - ); - } - const json = await response.json(); - - try { - if (cacheFile) { - await mkdir(dirname(cacheFile), { recursive: true }); - await writeFile(cacheFile, JSON.stringify(json), 'utf-8'); - } - } catch { - // no-op - } + const data = await cachedFetch(url, { + cacheDurationMs: BOOTNODE_CACHE_DURATION_MS, + cacheFile: cacheDir ? join(cacheDir, networkName, 'bootnodes.json') : undefined, + }); - return json['bootnodes']; + return data?.bootnodes; } export async function getL2ChainConfig( @@ -309,23 +288,6 @@ export async function getL2ChainConfig( return config; } -function enrichVar(envVar: EnvVar, value: string | undefined) { - // Don't override - if (process.env[envVar] || value === undefined) { - return; - } - process.env[envVar] = value; -} - -function enrichEthAddressVar(envVar: EnvVar, value: string) { - // EthAddress doesn't like being given empty strings - if (value === '') { - enrichVar(envVar, EthAddress.ZERO.toString()); - return; - } - enrichVar(envVar, value); -} - function getDefaultDataDir(networkName: NetworkNames): string { return path.join(process.env.HOME || '~', '.aztec', networkName, 'data'); } diff --git a/yarn-project/cli/src/config/enrich_env.ts b/yarn-project/cli/src/config/enrich_env.ts new file mode 100644 index 000000000000..4712157859ba --- /dev/null +++ b/yarn-project/cli/src/config/enrich_env.ts @@ -0,0 +1,15 @@ +import { EthAddress } from '@aztec/aztec.js'; +import type { EnvVar } from '@aztec/foundation/config'; + +export function enrichVar(envVar: EnvVar, value: string | undefined) { + // Don't override + if (process.env[envVar] || value === undefined) { + return; + } + process.env[envVar] = value; +} + +export function enrichEthAddressVar(envVar: EnvVar, value: string) { + // EthAddress doesn't like being given empty strings + enrichVar(envVar, value || EthAddress.ZERO.toString()); +} diff --git a/yarn-project/cli/src/config/index.ts b/yarn-project/cli/src/config/index.ts index 5e6e849aa628..f421f72041e3 100644 --- a/yarn-project/cli/src/config/index.ts +++ b/yarn-project/cli/src/config/index.ts @@ -1,2 +1,4 @@ +export * from './cached_fetch.js'; export * from './chain_l2_config.js'; export * from './get_l1_config.js'; +export * from './network_config.js'; diff --git a/yarn-project/cli/src/config/network_config.ts b/yarn-project/cli/src/config/network_config.ts new file mode 100644 index 000000000000..f92edb51a394 --- /dev/null +++ b/yarn-project/cli/src/config/network_config.ts @@ -0,0 +1,102 @@ +import { type NetworkConfig, NetworkConfigMapSchema, type NetworkNames } from '@aztec/foundation/config'; + +import { readFile } from 'fs/promises'; +import { join } from 'path'; + +import { cachedFetch } from './cached_fetch.js'; +import { enrichEthAddressVar, enrichVar } from './enrich_env.js'; + +const DEFAULT_CONFIG_URL = + 'https://raw.githubusercontent.com/AztecProtocol/networks/refs/heads/main/network_config.json'; +const NETWORK_CONFIG_CACHE_DURATION_MS = 60 * 60 * 1000; // 1 hour + +/** + * Fetches remote network configuration from GitHub with caching support. + * Uses the reusable cachedFetch utility. + * + * @param networkName - The network name to fetch config for + * @param cacheDir - Optional cache directory for storing fetched config + * @returns Remote configuration for the specified network, or undefined if not found/error + */ +export async function getNetworkConfig( + networkName: NetworkNames, + cacheDir?: string, +): Promise { + let url: URL | undefined; + const configLocation = process.env.NETWORK_CONFIG_LOCATION || DEFAULT_CONFIG_URL; + + if (!configLocation) { + return undefined; + } + + try { + if (configLocation.includes('://')) { + url = new URL(configLocation); + } else { + url = new URL(`file://${configLocation}`); + } + } catch { + /* no-op */ + } + + if (!url) { + return undefined; + } + + try { + let rawConfig: any; + + if (url.protocol === 'http:' || url.protocol === 'https:') { + rawConfig = await cachedFetch(url.href, { + cacheDurationMs: NETWORK_CONFIG_CACHE_DURATION_MS, + cacheFile: cacheDir ? join(cacheDir, networkName, 'network_config.json') : undefined, + }); + } else if (url.protocol === 'file:') { + rawConfig = JSON.parse(await readFile(url.pathname, 'utf-8')); + } else { + throw new Error('Unsupported Aztec network config protocol: ' + url.href); + } + + if (!rawConfig) { + return undefined; + } + + const networkConfigMap = NetworkConfigMapSchema.parse(rawConfig); + if (networkName in networkConfigMap) { + return networkConfigMap[networkName]; + } else { + return undefined; + } + } catch { + return undefined; + } +} + +/** + * Enriches environment variables with remote network configuration. + * This function is called before node config initialization to set env vars + * from the remote config, following the same pattern as enrichEnvironmentWithChainConfig(). + * + * @param networkName - The network name to fetch remote config for + */ +export async function enrichEnvironmentWithNetworkConfig(networkName: NetworkNames) { + if (networkName === 'local') { + return; // No remote config for local development + } + + const cacheDir = process.env.DATA_DIRECTORY ? join(process.env.DATA_DIRECTORY, 'cache') : undefined; + const networkConfig = await getNetworkConfig(networkName, cacheDir); + + if (!networkConfig) { + return; + } + + enrichVar('BOOTSTRAP_NODES', networkConfig.bootnodes.join(',')); + enrichVar('L1_CHAIN_ID', String(networkConfig.l1ChainId)); + enrichVar('SYNC_SNAPSHOTS_URL', networkConfig.snapshots.join(',')); + + enrichEthAddressVar('REGISTRY_CONTRACT_ADDRESS', networkConfig.registryAddress.toString()); + if (networkConfig.feeAssetHandlerAddress) { + enrichEthAddressVar('FEE_ASSET_HANDLER_CONTRACT_ADDRESS', networkConfig.feeAssetHandlerAddress.toString()); + } +} diff --git a/yarn-project/end-to-end/bootstrap.sh b/yarn-project/end-to-end/bootstrap.sh index 2161ea62c933..c5ee508e8b52 100755 --- a/yarn-project/end-to-end/bootstrap.sh +++ b/yarn-project/end-to-end/bootstrap.sh @@ -48,7 +48,17 @@ function test_cmds { echo "$hash:ONLY_TERM_PARENT=1 $run_test_script compose $test" done - echo "$hash:ONLY_TERM_PARENT=1 $run_test_script web3signer src/composed/web3signer/integration_remote_signer.test.ts" + tests=( + src/composed/web3signer/*.test.ts + ) + for test in "${tests[@]}"; do + # We must set ONLY_TERM_PARENT=1 to allow the script to fully control cleanup process. + echo "$hash:ONLY_TERM_PARENT=1 $run_test_script web3signer $test" + done + + #echo "$hash:ONLY_TERM_PARENT=1 $run_test_script simple src/e2e_multi_validator/e2e_multi_validator_node.test.ts" + # echo "$hash:ONLY_TERM_PARENT=1 $run_test_script web3signer src/composed/web3signer/integration_remote_signer.test.ts" + #echo "$hash:ONLY_TERM_PARENT=1 $run_test_script web3signer src/e2e_multi_validator/e2e_multi_validator_node_key_store.test.ts" # TODO(AD): figure out workaround for mainframe subnet exhaustion if [ "$CI" -eq 1 ]; then diff --git a/yarn-project/end-to-end/scripts/web3signer/docker-compose.yml b/yarn-project/end-to-end/scripts/web3signer/docker-compose.yml index d64d298b0c24..e6f83ad373d3 100644 --- a/yarn-project/end-to-end/scripts/web3signer/docker-compose.yml +++ b/yarn-project/end-to-end/scripts/web3signer/docker-compose.yml @@ -1,18 +1,6 @@ -configs: - test_private_key: - content: | - type: file-raw - keyType: SECP256K1 - privateKey: 0x1111111111111111111111111111111111111111111111111111111111111111 - services: web3signer: image: consensys/web3signer:25.6.0 - ports: - - "9000:9000" - configs: - - source: test_private_key - target: /keys/test_private_key.yaml command: - --http-listen-port=9000 - --http-host-allowlist=* @@ -20,6 +8,8 @@ services: - --logging=ALL - eth1 - --chain-id=31337 + volumes: + - web3signer_keys:/keys end-to-end: image: aztecprotocol/build:3.0 @@ -29,6 +19,7 @@ services: volumes: - ../../../../:/root/aztec-packages - ${HOME}/.bb-crs:/root/.bb-crs + - web3signer_keys:/keys tmpfs: - /tmp:rw,size=1g - /tmp-jest:rw,size=512m @@ -38,6 +29,7 @@ services: LOG_LEVEL: ${LOG_LEVEL:-verbose} L1_CHAIN_ID: 31337 WEB3_SIGNER_URL: http://web3signer:9000 + WEB3_SIGNER_TEST_KEYSTORE_DIR: /keys FORCE_COLOR: ${FORCE_COLOR:-1} # Allow git usage despite different ownership. Relevant for script tests. GIT_CONFIG_GLOBAL: /root/aztec-packages/build-images/src/home/.gitconfig @@ -60,8 +52,6 @@ services: # There's a lot of doubling of $'s to escape dockers string interpolation. entrypoint: > bash -c ' - export TEST_PRIVATE_KEY=$(yq .privateKey /keys/test_private_key.yaml) - while ! nc -z web3signer 9000; do sleep 1; done; setsid ./scripts/test_simple.sh ${TEST:-./src/e2e_deploy_contract.test.ts} & pid=$$! @@ -72,7 +62,7 @@ services: ' depends_on: - web3signer - configs: - # mount in the test as well in order to compare remote against local signer - - source: test_private_key - target: /keys/test_private_key.yaml + +volumes: + # a shared volume so that tests can load up arbitrary private keys into web3signer + web3signer_keys: {} diff --git a/yarn-project/end-to-end/src/bench/bench_build_block.test.ts b/yarn-project/end-to-end/src/bench/bench_build_block.test.ts index 24fa0a2b5ee7..e07f11eed7f9 100644 --- a/yarn-project/end-to-end/src/bench/bench_build_block.test.ts +++ b/yarn-project/end-to-end/src/bench/bench_build_block.test.ts @@ -16,7 +16,6 @@ describe('benchmarks/build_block', () => { enforceTimeTable: false, // Let the sequencer take as much time as it needs metrics: [ Metrics.SEQUENCER_BLOCK_BUILD_DURATION, - Metrics.SEQUENCER_BLOCK_BUILD_INSERTION_TIME, { // Invert mana-per-second since benchmark action requires that all metrics // conform to either "bigger-is-better" or "smaller-is-better". diff --git a/yarn-project/end-to-end/src/e2e_multi_validator/e2e_multi_validator_node_key_store.test.ts b/yarn-project/end-to-end/src/composed/web3signer/e2e_multi_validator_node_key_store.test.ts similarity index 90% rename from yarn-project/end-to-end/src/e2e_multi_validator/e2e_multi_validator_node_key_store.test.ts rename to yarn-project/end-to-end/src/composed/web3signer/e2e_multi_validator_node_key_store.test.ts index f26326325972..833dd7b2c14d 100644 --- a/yarn-project/end-to-end/src/e2e_multi_validator/e2e_multi_validator_node_key_store.test.ts +++ b/yarn-project/end-to-end/src/composed/web3signer/e2e_multi_validator_node_key_store.test.ts @@ -26,31 +26,33 @@ import { NodeKeystoreAdapter, ValidatorClient } from '@aztec/validator-client'; import { jest } from '@jest/globals'; import { mkdtemp, rmdir } from 'fs/promises'; -import { createServer } from 'http'; import { tmpdir } from 'os'; import { join } from 'path'; import { privateKeyToAccount } from 'viem/accounts'; -import { MNEMONIC } from '../fixtures/fixtures.js'; -import { getPrivateKeyFromIndex, setup } from '../fixtures/utils.js'; import { addressForPrivateKey, - createJSONRPCSigner, createKeyFile1, createKeyFile2, createKeyFile3, createKeyFile4, createKeyFile5, createKeyFile6, -} from './utils.js'; +} from '../../e2e_multi_validator/utils.js'; +import { MNEMONIC } from '../../fixtures/fixtures.js'; +import { getPrivateKeyFromIndex, setup } from '../../fixtures/utils.js'; +import { + createWeb3SignerKeystore, + getWeb3SignerTestKeystoreDir, + getWeb3SignerUrl, + refreshWeb3Signer, +} from '../../fixtures/web3signer.js'; const VALIDATOR_COUNT = 7; const COMMITTEE_SIZE = VALIDATOR_COUNT; const PUBLISHER_COUNT = 7; const VALIDATOR_KEY_START_INDEX = 0; const PUBLISHER_KEY_START_INDEX = VALIDATOR_COUNT + VALIDATOR_KEY_START_INDEX; -const SIGNER_URL_PORT = 15000; -const SIGNER_URL = `http://localhost:${SIGNER_URL_PORT}`; const PROVER_PUBLISHER_INDEX = PUBLISHER_KEY_START_INDEX + PUBLISHER_COUNT; const BLOCK_COUNT = 20; @@ -66,6 +68,8 @@ const validators = Array.from( async function createKeyFiles() { const directory = await mkdtemp(join(tmpdir(), 'foo-')); + const web3signerDir = getWeb3SignerTestKeystoreDir(); + const web3signerUrl = getWeb3SignerUrl(); const file1 = join(directory, 'keyfile1.json'); const file2 = join(directory, 'keyfile2.json'); const file3 = join(directory, 'keyfile3.json'); @@ -106,9 +110,11 @@ async function createKeyFiles() { publishers[2].key, publishers[3].key, coinbaseAddresses[1], - SIGNER_URL, + getWeb3SignerUrl(), feeRecipientAddresses[2], ); + await createWeb3SignerKeystore(web3signerDir, validators[2]); + await createKeyFile4( file4, addressForPrivateKey(validators[3]), @@ -119,12 +125,19 @@ async function createKeyFiles() { publishers[6].key, coinbaseAddresses[3], coinbaseAddresses[4], - SIGNER_URL, + web3signerUrl, feeRecipientAddresses[3], feeRecipientAddresses[4], ); - await createKeyFile5(file5, addressForPrivateKey(proverPrivateKey), SIGNER_URL); + await createWeb3SignerKeystore(web3signerDir, validators[3], validators[4]); + + await createKeyFile5(file5, addressForPrivateKey(proverPrivateKey), web3signerUrl); + await createWeb3SignerKeystore(web3signerDir, proverPrivateKey); + await createKeyFile6(file6, MNEMONIC, 5, coinbaseAddresses[5], feeRecipientAddresses[5]); + + await refreshWeb3Signer(web3signerUrl); + return directory; } @@ -157,11 +170,8 @@ describe('e2e_multi_validator_node', () => { let sequencerClient: SequencerClient | undefined; let publisherFactory: SequencerPublisherFactory; let validatorClient: ValidatorClient; - let jsonRpcServer: ReturnType | null = null; const artifact = StatefulTestContractArtifact; const addressToPrivateKey = new Map(); - const remoteSignerStats = new Map(); - const expectedRemoteSigners = new Set(); const expectedCoinbaseAddresses = new Map(); const expectedFeeRecipientAddresses = new Map(); const expectedPublishers = new Map(); @@ -185,11 +195,6 @@ describe('e2e_multi_validator_node', () => { }; }); - // These validators have remote signing configured - expectedRemoteSigners.add(validatorAddresses[2].toLowerCase()); - expectedRemoteSigners.add(validatorAddresses[3].toLowerCase()); - expectedRemoteSigners.add(validatorAddresses[4].toLowerCase()); - // Setup expected coinbase and fee recipient values per validator validatorAddresses.forEach((validatorAddress, i) => { const coinbase = EthAddress.fromNumber(i + 1) @@ -268,13 +273,6 @@ describe('e2e_multi_validator_node', () => { addressToPrivateKey.set(account.toLowerCase(), pk); } - // Create JSON RPC server for signing transactions - jsonRpcServer = createJSONRPCSigner(addressToPrivateKey, remoteSignerStats); - // Start server on the SIGNER_URL port - await new Promise(resolve => { - jsonRpcServer!.listen(SIGNER_URL_PORT, resolve); - }); - const { aztecSlotDuration: _aztecSlotDuration } = getL1ContractsConfigEnvVars(); ({ @@ -322,13 +320,6 @@ describe('e2e_multi_validator_node', () => { afterEach(async () => { await teardown(); await rmdir(keyStoreDirectory, { recursive: true }); - - // Close JSON RPC server - if (jsonRpcServer) { - await new Promise(resolve => { - jsonRpcServer!.close(() => resolve()); - }); - } }); const sendTx = async (sender: AztecAddress, contractAddressSalt: Fr) => { @@ -411,14 +402,6 @@ describe('e2e_multi_validator_node', () => { }), ); - const currentBlockNumber = await aztecNode.getBlockNumber(); - - for (const expectedRemoteSigner of expectedRemoteSigners) { - const remoteSigner = remoteSignerStats.get(expectedRemoteSigner); - expect(remoteSigner).toBeDefined(); - expect(remoteSigner).toBeGreaterThanOrEqual(currentBlockNumber); - } - for (const [proposer, coinbase] of requestedCoinbaseAddresses) { const expectedCoinbase = expectedCoinbaseAddresses.get(proposer); expect(expectedCoinbase).toBeDefined(); diff --git a/yarn-project/end-to-end/src/composed/web3signer/integration_remote_signer.test.ts b/yarn-project/end-to-end/src/composed/web3signer/integration_remote_signer.test.ts index afd5016d612a..4e7088ba3522 100644 --- a/yarn-project/end-to-end/src/composed/web3signer/integration_remote_signer.test.ts +++ b/yarn-project/end-to-end/src/composed/web3signer/integration_remote_signer.test.ts @@ -4,19 +4,22 @@ import { LocalSigner, RemoteSigner } from '@aztec/node-keystore'; import { jest } from '@jest/globals'; import type { TransactionSerializable, TypedDataDefinition } from 'viem'; -import { privateKeyToAddress } from 'viem/accounts'; +import { generatePrivateKey, privateKeyToAddress } from 'viem/accounts'; -const { - WEB3_SIGNER_URL = 'http://localhost:9000', - L1_CHAIN_ID = '31337', - TEST_PRIVATE_KEY = '0x1111111111111111111111111111111111111111111111111111111111111111', -} = process.env; +import { + createWeb3SignerKeystore, + getWeb3SignerTestKeystoreDir, + getWeb3SignerUrl, + refreshWeb3Signer, +} from '../../fixtures/web3signer.js'; + +const { L1_CHAIN_ID = '31337' } = process.env; describe('RemoteSigner integration: Web3Signer (compose)', () => { jest.setTimeout(180_000); - let chainId: number; let web3SignerUrl: string; + let chainId: number; let privateKey: Buffer32; let address: EthAddress; @@ -24,20 +27,16 @@ describe('RemoteSigner integration: Web3Signer (compose)', () => { let remoteSigner: RemoteSigner; let localSigner: LocalSigner; - beforeAll(() => { - if (!WEB3_SIGNER_URL) { - throw new Error('Need to set WEB3_SIGNER_URL'); - } + beforeAll(async () => { + web3SignerUrl = getWeb3SignerUrl(); - if (!TEST_PRIVATE_KEY) { - throw new Error('Need to set WEB3_SIGNER_URL'); - } - - privateKey = Buffer32.fromString(TEST_PRIVATE_KEY); + privateKey = Buffer32.fromString(generatePrivateKey()); address = EthAddress.fromString(privateKeyToAddress(privateKey.toString())); chainId = parseInt(L1_CHAIN_ID, 10); - web3SignerUrl = WEB3_SIGNER_URL; + + await createWeb3SignerKeystore(getWeb3SignerTestKeystoreDir(), privateKey.toString()); + await refreshWeb3Signer(web3SignerUrl); }); beforeEach(() => { @@ -138,4 +137,21 @@ describe('RemoteSigner integration: Web3Signer (compose)', () => { expect(remoteSig.s.toString()).toBe(localSig.s.toString()); expect([0, 1, 27, 28]).toContain(remoteSig.v); }); + + it('validates web3signer accessibility and address availability', async () => { + // Should succeed with the correct address + await expect(RemoteSigner.validateAccess(web3SignerUrl, [address.toString()])).resolves.not.toThrow(); + + // Should fail with a non-existent address + const nonExistentAddress = EthAddress.random().toString(); + await expect(RemoteSigner.validateAccess(web3SignerUrl, [nonExistentAddress])).rejects.toThrow( + `The following addresses are not available in the web3signer: ${nonExistentAddress.toLowerCase()}`, + ); + + // Should succeed when checking multiple addresses where one exists + await expect(RemoteSigner.validateAccess(web3SignerUrl, [address.toString()])).resolves.not.toThrow(); + + // Should fail with an invalid URL + await expect(RemoteSigner.validateAccess('http://invalid-url:9999', [address.toString()])).rejects.toThrow(); + }); }); diff --git a/yarn-project/end-to-end/src/e2e_event_logs.test.ts b/yarn-project/end-to-end/src/e2e_event_logs.test.ts index 94f6b7d3e372..9209ddf88c32 100644 --- a/yarn-project/end-to-end/src/e2e_event_logs.test.ts +++ b/yarn-project/end-to-end/src/e2e_event_logs.test.ts @@ -1,4 +1,4 @@ -import { type AccountWalletWithSecretKey, AztecAddress, Fr } from '@aztec/aztec.js'; +import { AccountWalletWithSecretKey, AztecAddress, Fr, type Logger } from '@aztec/aztec.js'; import { makeTuple } from '@aztec/foundation/array'; import { timesParallel } from '@aztec/foundation/collection'; import type { Tuple } from '@aztec/foundation/serialize'; @@ -20,6 +20,7 @@ describe('Logs', () => { let account1Address: AztecAddress; let account2Address: AztecAddress; + let log: Logger; let teardown: () => Promise; beforeAll(async () => { @@ -27,10 +28,13 @@ describe('Logs', () => { teardown, wallets: [wallet1, wallet2], accounts: [account1Address, account2Address], + logger: log, } = await setup(2)); + log.warn(`Setup complete, checking account contracts published`); await ensureAccountContractsPublished(wallet1, [wallet1, wallet2]); + log.warn(`Deploying test contract`); testLogContract = await TestLogContract.deploy(wallet1).send({ from: account1Address }).deployed(); }); diff --git a/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts b/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts index 69a095037cbd..792b166516f5 100644 --- a/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts +++ b/yarn-project/end-to-end/src/e2e_l1_publisher/e2e_l1_publisher.test.ts @@ -240,6 +240,7 @@ describe('L1Publisher integration', () => { slashFactoryContract, dateProvider, metrics: sequencerPublisherMetrics, + lastActions: {}, }, ); diff --git a/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts index d18219116bff..4ae39aeee343 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts @@ -1,4 +1,6 @@ import type { AztecNodeService } from '@aztec/aztec-node'; +import { times } from '@aztec/foundation/collection'; +import { OffenseType } from '@aztec/slasher'; import { jest } from '@jest/globals'; import fs from 'fs'; @@ -15,6 +17,11 @@ jest.setTimeout(1000000); // Don't set this to a higher value than 9 because each node will use a different L1 publisher account and anvil seeds const NUM_VALIDATORS = 4; const BOOT_NODE_UDP_PORT = 4500; +const COMMITTEE_SIZE = NUM_VALIDATORS; + +// This test needs longer slot window to ensure that the client has enough time to submit their txs, +// and have the nodes get recreated, prior to the reorg. +const AZTEC_SLOT_DURATION = process.env.AZTEC_SLOT_DURATION ? parseInt(process.env.AZTEC_SLOT_DURATION) : 32; const DATA_DIR = fs.mkdtempSync(path.join(os.tmpdir(), 'data-withholding-slash-')); @@ -40,9 +47,6 @@ describe('e2e_p2p_data_withholding_slash', () => { const slashingUnit = BigInt(20e18); const slashingQuorum = 3; const slashingRoundSize = 4; - // This test needs longer slot window to ensure that the client has enough time to submit their txs, - // and have the nodes get recreated, prior to the reorg. - const aztecSlotDuration = 32; beforeEach(async () => { t = await P2PNetworkTest.create({ @@ -55,8 +59,9 @@ describe('e2e_p2p_data_withholding_slash', () => { listenAddress: '127.0.0.1', aztecEpochDuration: 2, ethereumSlotDuration: 4, - aztecSlotDuration, + aztecSlotDuration: AZTEC_SLOT_DURATION, aztecProofSubmissionEpochs: 0, // effectively forces instant reorgs + aztecTargetCommitteeSize: COMMITTEE_SIZE, slashingQuorum, slashingRoundSizeInEpochs: slashingRoundSize / 2, slashAmountSmall: slashingUnit, @@ -108,7 +113,7 @@ describe('e2e_p2p_data_withholding_slash', () => { t.ctx.aztecNodeConfig.validatorReexecute = false; t.ctx.aztecNodeConfig.minTxsPerBlock = 1; - t.logger.info('Creating nodes'); + t.logger.warn('Creating nodes'); nodes = await createNodes( t.ctx.aztecNodeConfig, t.ctx.dateProvider, @@ -136,9 +141,9 @@ describe('e2e_p2p_data_withholding_slash', () => { await debugRollup(); // Send Aztec txs - t.logger.info('Setup account'); + t.logger.warn('Setup account'); await t.setupAccount(); - t.logger.info('Stopping nodes'); + t.logger.warn('Stopping nodes'); // Note, we needed to keep the initial node running, as that is the one the txs were sent to. await t.removeInitialNode(); // Now stop the nodes, @@ -150,7 +155,7 @@ describe('e2e_p2p_data_withholding_slash', () => { // Re-create the nodes. // ASSUMING they sync in the middle of the epoch, they will "see" the reorg, and try to slash. - t.logger.info('Re-creating nodes'); + t.logger.warn('Re-creating nodes'); nodes = await createNodes( t.ctx.aztecNodeConfig, t.ctx.dateProvider, @@ -161,13 +166,18 @@ describe('e2e_p2p_data_withholding_slash', () => { DATA_DIR, ); - await awaitOffenseDetected({ + const offenses = await awaitOffenseDetected({ epochDuration: t.ctx.aztecNodeConfig.aztecEpochDuration, logger: t.logger, nodeAdmin: nodes[0], slashingRoundSize, + waitUntilOffenseCount: COMMITTEE_SIZE, }); + // Check offenses are correct + expect(offenses.map(o => o.validator.toChecksumString()).sort()).toEqual(committee.map(a => a.toString()).sort()); + expect(offenses.map(o => o.offenseType)).toEqual(times(COMMITTEE_SIZE, () => OffenseType.DATA_WITHHOLDING)); + await awaitCommitteeKicked({ rollup, cheatCodes: t.ctx.cheatCodes.rollup, @@ -175,7 +185,7 @@ describe('e2e_p2p_data_withholding_slash', () => { slashFactory, slashingProposer, slashingRoundSize, - aztecSlotDuration, + aztecSlotDuration: AZTEC_SLOT_DURATION, logger: t.logger, dateProvider: t.ctx.dateProvider, }); diff --git a/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts index 1ca696ff3acf..448822522b2e 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/gossip_network.test.ts @@ -1,5 +1,5 @@ import type { Archiver } from '@aztec/archiver'; -import type { AztecNodeService } from '@aztec/aztec-node'; +import type { AztecNodeConfig, AztecNodeService } from '@aztec/aztec-node'; import { retryUntil, sleep } from '@aztec/aztec.js'; import type { ProverNode } from '@aztec/prover-node'; import type { SequencerClient } from '@aztec/sequencer-client'; @@ -16,6 +16,7 @@ import { ATTESTER_PRIVATE_KEYS_START_INDEX, type NodeContext, createNodes, + createNonValidatorNode, createProverNode, } from '../fixtures/setup_p2p_test.js'; import { AlertChecker, type AlertConfig } from '../quality_of_service/alert_checker.js'; @@ -47,6 +48,7 @@ describe('e2e_p2p_network', () => { let t: P2PNetworkTest; let nodes: AztecNodeService[]; let proverNode: ProverNode; + let monitoringNode: AztecNodeService; beforeEach(async () => { t = await P2PNetworkTest.create({ @@ -71,6 +73,7 @@ describe('e2e_p2p_network', () => { afterEach(async () => { await tryStop(proverNode); + await tryStop(monitoringNode); await t.stopNodes(nodes); await t.teardown(); for (let i = 0; i < NUM_VALIDATORS; i++) { @@ -98,7 +101,7 @@ describe('e2e_p2p_network', () => { // should be set so that the only way for rollups to be built // is if the txs are successfully gossiped around the nodes. const contexts: NodeContext[] = []; - t.logger.info('Creating nodes'); + t.logger.info('Creating validator nodes'); nodes = await createNodes( t.ctx.aztecNodeConfig, t.ctx.dateProvider, @@ -112,6 +115,7 @@ describe('e2e_p2p_network', () => { ); // create a prover node that uses p2p only (not rpc) to gather txs to test prover tx collection + t.logger.warn(`Creating prover node`); proverNode = await createProverNode( t.ctx.aztecNodeConfig, BOOT_NODE_UDP_PORT + NUM_VALIDATORS + 1, @@ -124,6 +128,18 @@ describe('e2e_p2p_network', () => { ); await proverNode.start(); + t.logger.warn(`Creating non validator node`); + const monitoringNodeConfig: AztecNodeConfig = { ...t.ctx.aztecNodeConfig, alwaysReexecuteBlockProposals: true }; + monitoringNode = await createNonValidatorNode( + monitoringNodeConfig, + t.ctx.dateProvider, + BOOT_NODE_UDP_PORT + NUM_VALIDATORS + 2, + t.bootstrapNodeEnr, + t.prefilledPublicData, + `${DATA_DIR}-monitor`, + shouldCollectMetrics(), + ); + // wait a bit for peers to discover each other await sleep(8000); diff --git a/yarn-project/end-to-end/src/e2e_p2p/preferred_gossip_network.test.ts b/yarn-project/end-to-end/src/e2e_p2p/preferred_gossip_network.test.ts index 31a473c95ffc..0b2d94e02845 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/preferred_gossip_network.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/preferred_gossip_network.test.ts @@ -284,6 +284,7 @@ describe('e2e_p2p_preferred_network', () => { .concat(validators.map((_, i) => `Validator ${i + 1}`)) .concat(noDiscoveryValidators.map((_, i) => `Picky Validator ${i + 1}`)) .concat(['Default Node']); + t.logger.warn(`All nodes initialized: ${identifiers.join(', ')}`); const validatorsUsingDiscovery = validators.length; const totalNumValidators = validators.length + noDiscoveryValidators.length; @@ -297,7 +298,9 @@ describe('e2e_p2p_preferred_network', () => { const peerResult = await waitForNodeToAcquirePeers(allNodes[i], expectedPeerCounts[i], 300, identifiers[i]); expect(peerResult).toBeTruthy(); } - t.logger.info('All node/validator peer connections established'); + t.logger.warn( + `All node peer connections established: ${identifiers.map((id, i) => `${id} (${expectedPeerCounts[i]})`).join(', ')}`, + ); validators.push(...noDiscoveryValidators); diff --git a/yarn-project/end-to-end/src/e2e_p2p/shared.ts b/yarn-project/end-to-end/src/e2e_p2p/shared.ts index 94e5ffc0c9c0..d7a1c12b4921 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/shared.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/shared.ts @@ -14,6 +14,7 @@ import { import type { RollupCheatCodes } from '@aztec/aztec/testing'; import type { EmpireSlashingProposerContract, RollupContract, TallySlashingProposerContract } from '@aztec/ethereum'; import { timesAsync, unique } from '@aztec/foundation/collection'; +import { pluralize } from '@aztec/foundation/string'; import type { TestDateProvider } from '@aztec/foundation/timer'; import type { SpamContract } from '@aztec/noir-test-contracts.js/Spam'; import { TestContract, TestContractArtifact } from '@aztec/noir-test-contracts.js/Test'; @@ -164,17 +165,20 @@ export async function awaitOffenseDetected({ nodeAdmin, slashingRoundSize, epochDuration, + waitUntilOffenseCount, }: { nodeAdmin: AztecNodeAdmin; logger: Logger; slashingRoundSize: number; epochDuration: number; + waitUntilOffenseCount?: number; }) { - logger.info(`Waiting for an offense to be detected`); + const targetOffenseCount = waitUntilOffenseCount ?? 1; + logger.warn(`Waiting for ${pluralize('offense', targetOffenseCount)} to be detected`); const offenses = await retryUntil( async () => { const offenses = await nodeAdmin.getSlashOffenses('all'); - if (offenses.length > 0) { + if (offenses.length >= targetOffenseCount) { return offenses; } }, diff --git a/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned.test.ts b/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts similarity index 51% rename from yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned.test.ts rename to yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts index 9e2e08cd59e6..89be6b8d5820 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts @@ -1,5 +1,8 @@ import type { AztecNodeService } from '@aztec/aztec-node'; import { sleep } from '@aztec/aztec.js'; +import { times } from '@aztec/foundation/collection'; +import { SpamContract } from '@aztec/noir-test-contracts.js/Spam'; +import { OffenseType } from '@aztec/slasher'; import { jest } from '@jest/globals'; import fs from 'fs'; @@ -15,23 +18,27 @@ jest.setTimeout(10 * 60_000); // 10 minutes // Don't set this to a higher value than 9 because each node will use a different L1 publisher account and anvil seeds const NUM_VALIDATORS = 4; +const COMMITTEE_SIZE = NUM_VALIDATORS; const BOOT_NODE_UDP_PORT = 4500; -const DATA_DIR = fs.mkdtempSync(path.join(os.tmpdir(), 'valid-epoch-pruned-')); +const DATA_DIR = fs.mkdtempSync(path.join(os.tmpdir(), 'valid-epoch-pruned-slash-')); /** * Test that we slash the committee when the pruned epoch could have been proven. - * - * Note, we don't need to do anything special for this test other than to run it without a prover node - * (which is the default), and this will produce pruned epochs that could have been proven. + * We don't need to do anything special for this test other than to run it without a prover node + * (which is the default), and this will produce pruned epochs that could have been proven. But we do + * need to send a tx to make sure that the slash is due to valid epoch prune and not data withholding. */ -describe('e2e_p2p_valid_epoch_pruned', () => { +describe('e2e_p2p_valid_epoch_pruned_slash', () => { let t: P2PNetworkTest; let nodes: AztecNodeService[]; const slashingQuorum = 3; const slashingRoundSize = 4; + const ethereumSlotDuration = 4; const aztecSlotDuration = 8; + const aztecEpochDuration = 2; + const initialEpoch = 8; const slashingUnit = BigInt(20e18); beforeEach(async () => { @@ -42,23 +49,26 @@ describe('e2e_p2p_valid_epoch_pruned', () => { basePort: BOOT_NODE_UDP_PORT, metricsPort: shouldCollectMetrics(), initialConfig: { + cancelTxOnTimeout: false, + publisherAllowInvalidStates: true, listenAddress: '127.0.0.1', - aztecEpochDuration: 2, - ethereumSlotDuration: 4, + aztecEpochDuration, + ethereumSlotDuration, aztecSlotDuration, - aztecProofSubmissionEpochs: 0, // reorg as soon as epoch ends + aztecProofSubmissionEpochs: 1, slashingQuorum, - slashingRoundSizeInEpochs: slashingRoundSize / 2, + slashingRoundSizeInEpochs: slashingRoundSize / aztecEpochDuration, slashSelfAllowed: true, + slashGracePeriodL2Slots: initialEpoch * aztecEpochDuration, slashAmountSmall: slashingUnit, slashAmountMedium: slashingUnit * 2n, slashAmountLarge: slashingUnit * 3n, + aztecTargetCommitteeSize: COMMITTEE_SIZE, }, }); await t.applyBaseSnapshots(); await t.setup(); - await t.removeInitialNode(); }); afterEach(async () => { @@ -91,16 +101,10 @@ describe('e2e_p2p_valid_epoch_pruned', () => { t.ctx.aztecNodeConfig.slashPrunePenalty = slashingAmount; t.ctx.aztecNodeConfig.validatorReexecute = false; - t.ctx.aztecNodeConfig.minTxsPerBlock = 0; - - // Jump forward to an epoch in the future such that the validator set is not empty - await t.ctx.cheatCodes.rollup.advanceToEpoch(4n); + t.ctx.aztecNodeConfig.minTxsPerBlock = 1; + t.ctx.aztecNodeConfig.txPoolDeleteTxsAfterReorg = true; - // create our network of nodes and submit txs into each of them - // the number of txs per node and the number of txs per rollup - // should be set so that the only way for rollups to be built - // is if the txs are successfully gossiped around the nodes. - t.logger.info('Creating nodes'); + t.logger.warn(`Creating ${NUM_VALIDATORS} new nodes`); nodes = await createNodes( t.ctx.aztecNodeConfig, t.ctx.dateProvider, @@ -118,17 +122,59 @@ describe('e2e_p2p_valid_epoch_pruned', () => { await debugRollup(); // Wait for the committee to exist + await t.ctx.cheatCodes.rollup.advanceToEpoch(2, { updateDateProvider: t.ctx.dateProvider }); + await t.ctx.cheatCodes.rollup.markAsProven(); const committee = await awaitCommitteeExists({ rollup, logger: t.logger }); await debugRollup(); + // Set up a wallet and keep it out of reorgs + await t.ctx.cheatCodes.rollup.markAsProven(); + await t.setupAccount(); + await t.ctx.cheatCodes.rollup.markAsProven(); + + // Warp forward to after the initial grace period + expect(await rollup.getCurrentEpoch()).toBeLessThan(initialEpoch); + await t.ctx.cheatCodes.rollup.advanceToEpoch(initialEpoch, { + updateDateProvider: t.ctx.dateProvider, + offset: -ethereumSlotDuration, + }); + await t.ctx.cheatCodes.rollup.markAsProven(); + + // Send a tx to deploy a contract so that we have a tx with public function execution in the pruned epoch + // This allows us to test that the slashed offense is valid epoch prune and not data withholding + t.logger.warn(`Submitting deployment tx to the network`); + const _spamContract = await SpamContract.deploy(t.wallet!).send({ from: t.defaultAccountAddress! }).deployed(); + + // And send a tx that depends on a tx with public function execution on a contract class that will be reorged out + // This allows us to test that we handle pruned contract classes correctly + // TODO(palla/A-51): For this check to actually check what we need, we need to ensure the deployment and the + // this tx are in different blocks but within the same epoch, so it gets reexecuted by the prune-watcher. + // This does not always happen in the current test setup. + // t.logger.warn(`Submitting tx with public function execution to the network`); + // await spamContract.methods.spam(1, 1, true).send({ from: t.defaultAccountAddress! }).wait(); + + // Initial node receives the txs, so we cannot stop it before that one is mined + // Yes, that means that there are probably two nodes running the same validator key (the initial node and nodes[0]) + // This will come back and haunt us eventually, not just here but in most e2e p2p tests that make the same mistake + t.logger.warn(`Removing initial node`); + await t.removeInitialNode(); + + // Warp forward so we can prune the epoch + await t.ctx.cheatCodes.rollup.advanceToNextEpoch({ updateDateProvider: t.ctx.dateProvider }); + // Wait for epoch to be pruned and the offense to be detected - const _offenses = await awaitOffenseDetected({ + const offenses = await awaitOffenseDetected({ logger: t.logger, nodeAdmin: nodes[0], slashingRoundSize, epochDuration: t.ctx.aztecNodeConfig.aztecEpochDuration, + waitUntilOffenseCount: COMMITTEE_SIZE, }); + // Check offenses are correct + expect(offenses.map(o => o.validator.toChecksumString()).sort()).toEqual(committee.map(a => a.toString()).sort()); + expect(offenses.map(o => o.offenseType)).toEqual(times(COMMITTEE_SIZE, () => OffenseType.VALID_EPOCH_PRUNED)); + // And then wait for them to be kicked out await awaitCommitteeKicked({ rollup, diff --git a/yarn-project/end-to-end/src/e2e_synching.test.ts b/yarn-project/end-to-end/src/e2e_synching.test.ts index a01fbcab6413..fc99fdb6f9a6 100644 --- a/yarn-project/end-to-end/src/e2e_synching.test.ts +++ b/yarn-project/end-to-end/src/e2e_synching.test.ts @@ -448,6 +448,7 @@ describe('e2e_synching', () => { epochCache, dateProvider: dateProvider!, metrics: sequencerPublisherMetrics, + lastActions: {}, }, ); diff --git a/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts b/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts index 71438a39719c..3f68b380496d 100644 --- a/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts +++ b/yarn-project/end-to-end/src/fixtures/setup_p2p_test.ts @@ -86,7 +86,7 @@ export async function createNodes( return nodes; } -// creates a P2P enabled instance of Aztec Node Service +/** Creates a P2P enabled instance of Aztec Node Service with a validator */ export async function createNode( config: AztecNodeConfig, dateProvider: DateProvider, @@ -106,6 +106,31 @@ export async function createNode( return loggerIdStorage ? await loggerIdStorage.run(tcpPort.toString(), createNode) : createNode(); } +/** Creates a P2P enabled instance of Aztec Node Service without a validator */ +export async function createNonValidatorNode( + baseConfig: AztecNodeConfig, + dateProvider: DateProvider, + tcpPort: number, + bootstrapNode: string | undefined, + prefilledPublicData?: PublicDataTreeLeaf[], + dataDirectory?: string, + metricsPort?: number, + loggerIdStorage?: AsyncLocalStorage, +) { + const createNode = async () => { + const p2pConfig = await createP2PConfig(baseConfig, bootstrapNode, tcpPort, dataDirectory); + const config: AztecNodeConfig = { + ...p2pConfig, + disableValidator: true, + validatorPrivateKeys: undefined, + publisherPrivateKeys: [], + }; + const telemetry = getEndToEndTestTelemetryClient(metricsPort); + return await AztecNodeService.createAndSync(config, { telemetry, dateProvider }, { prefilledPublicData }); + }; + return loggerIdStorage ? await loggerIdStorage.run(tcpPort.toString(), createNode) : createNode(); +} + export async function createProverNode( config: AztecNodeConfig, tcpPort: number, @@ -121,14 +146,13 @@ export async function createProverNode( const proverNodePrivateKey = getPrivateKeyFromIndex(ATTESTER_PRIVATE_KEYS_START_INDEX + addressIndex)!; const telemetry = getEndToEndTestTelemetryClient(metricsPort); - const proverConfig: Partial = { - p2pIp: `127.0.0.1`, - p2pPort: tcpPort ?? (await getPort()), - p2pEnabled: true, - peerCheckIntervalMS: TEST_PEER_CHECK_INTERVAL_MS, - blockCheckIntervalMS: 1000, - bootstrapNodes: bootstrapNode ? [bootstrapNode] : [], - }; + const proverConfig: Partial = await createP2PConfig( + config, + bootstrapNode, + tcpPort, + dataDirectory, + ); + const aztecNodeRpcTxProvider = undefined; return await createAndSyncProverNode( bufferToHex(proverNodePrivateKey), @@ -142,20 +166,14 @@ export async function createProverNode( return loggerIdStorage ? await loggerIdStorage.run(tcpPort.toString(), createProverNode) : createProverNode(); } -export async function createValidatorConfig( +export async function createP2PConfig( config: AztecNodeConfig, bootstrapNodeEnr?: string, port?: number, - addressIndex: number = 1, dataDirectory?: string, ) { port = port ?? (await getPort()); - const attesterPrivateKey = bufferToHex(getPrivateKeyFromIndex(ATTESTER_PRIVATE_KEYS_START_INDEX + addressIndex)!); - - config.validatorPrivateKeys = new SecretValue([attesterPrivateKey]); - config.publisherPrivateKeys = [new SecretValue(attesterPrivateKey)]; - const nodeConfig: AztecNodeConfig = { ...config, p2pIp: `127.0.0.1`, @@ -169,3 +187,22 @@ export async function createValidatorConfig( return nodeConfig; } + +export async function createValidatorConfig( + config: AztecNodeConfig, + bootstrapNodeEnr?: string, + port?: number, + addressIndex: number = 1, + dataDirectory?: string, +) { + const attesterPrivateKey = bufferToHex(getPrivateKeyFromIndex(ATTESTER_PRIVATE_KEYS_START_INDEX + addressIndex)!); + const p2pConfig = await createP2PConfig(config, bootstrapNodeEnr, port, dataDirectory); + const nodeConfig: AztecNodeConfig = { + ...config, + ...p2pConfig, + validatorPrivateKeys: new SecretValue([attesterPrivateKey]), + publisherPrivateKeys: [new SecretValue(attesterPrivateKey)], + }; + + return nodeConfig; +} diff --git a/yarn-project/end-to-end/src/fixtures/web3signer.ts b/yarn-project/end-to-end/src/fixtures/web3signer.ts new file mode 100644 index 000000000000..ac81895fc67a --- /dev/null +++ b/yarn-project/end-to-end/src/fixtures/web3signer.ts @@ -0,0 +1,46 @@ +import { sleep } from '@aztec/aztec.js'; +import { randomBytes } from '@aztec/foundation/crypto'; + +import { mkdirSync } from 'node:fs'; +import { writeFile } from 'node:fs/promises'; +import { join } from 'node:path'; + +export async function createWeb3SignerKeystore(dir: string, ...privateKeys: string[]) { + const yaml = privateKeys + .map( + pk => `\ +type: file-raw +keyType: SECP256K1 +privateKey: ${pk}`, + ) + .join('\n---\n'); + + // NOTE: nodejs stdlib can only create temp directories, not temp files! + // this write uses wx (write-exclusive) so it'll throw if the file already exists + const path = join(dir, `keystore-${randomBytes(4).toString('hex')}.yaml`); + await writeFile(path, yaml, { flag: 'wx' }); +} + +export async function refreshWeb3Signer(url: string) { + await fetch(new URL('reload', url), { method: 'POST' }); + // give the service a chance to load up the new files + // 1s might not be enough if there are a lot of files to scan + await sleep(1000); +} + +export function getWeb3SignerTestKeystoreDir(): string { + if (process.env.WEB3_SIGNER_TEST_KEYSTORE_DIR) { + mkdirSync(process.env.WEB3_SIGNER_TEST_KEYSTORE_DIR, { recursive: true }); + return process.env.WEB3_SIGNER_TEST_KEYSTORE_DIR; + } else { + throw new Error('Web3signer not running'); + } +} + +export function getWeb3SignerUrl(): string { + if (process.env.WEB3_SIGNER_URL) { + return process.env.WEB3_SIGNER_URL; + } else { + throw new Error('Web3signer not running'); + } +} diff --git a/yarn-project/ethereum/src/contracts/multicall.ts b/yarn-project/ethereum/src/contracts/multicall.ts index 7666cb6ebf87..44c01e87d727 100644 --- a/yarn-project/ethereum/src/contracts/multicall.ts +++ b/yarn-project/ethereum/src/contracts/multicall.ts @@ -1,4 +1,5 @@ import { toHex as toPaddedHex } from '@aztec/foundation/bigint-buffer'; +import { TimeoutError } from '@aztec/foundation/error'; import type { Logger } from '@aztec/foundation/log'; import { type EncodeFunctionDataParameters, type Hex, encodeFunctionData, multicall3Abi } from 'viem'; @@ -92,6 +93,10 @@ export class Multicall3 { return { receipt, gasPrice, errorMsg }; } } catch (err) { + if (err instanceof TimeoutError) { + throw err; + } + for (const request of requests) { logger.debug('Simulating request', { request }); const result = await l1TxUtils diff --git a/yarn-project/ethereum/src/contracts/slasher_contract.ts b/yarn-project/ethereum/src/contracts/slasher_contract.ts index ff93b8f1da61..6cc9caea900e 100644 --- a/yarn-project/ethereum/src/contracts/slasher_contract.ts +++ b/yarn-project/ethereum/src/contracts/slasher_contract.ts @@ -38,6 +38,15 @@ export class SlasherContract { } } + /** + * Checks if slashing is currently enabled. + * @returns True if slashing is enabled, false otherwise + */ + public isSlashingEnabled(): Promise { + // TODO(#16971) Update when merged L1 changes + return Promise.resolve(true); + } + /** * Gets the current vetoer address. * @returns The vetoer address diff --git a/yarn-project/ethereum/src/l1_tx_utils.ts b/yarn-project/ethereum/src/l1_tx_utils.ts index 645cd91f039c..561ca717fa6a 100644 --- a/yarn-project/ethereum/src/l1_tx_utils.ts +++ b/yarn-project/ethereum/src/l1_tx_utils.ts @@ -7,6 +7,7 @@ import { getDefaultConfig, numberConfigHelper, } from '@aztec/foundation/config'; +import { TimeoutError } from '@aztec/foundation/error'; import { EthAddress } from '@aztec/foundation/eth-address'; import type { ViemTransactionSignature } from '@aztec/foundation/eth-signature'; import { type Logger, createLogger } from '@aztec/foundation/log'; @@ -928,7 +929,7 @@ export class L1TxUtils extends ReadOnlyL1TxUtils { }); } - this.logger?.error(`L1 transaction ${currentTxHash} timed out`, { + this.logger?.error(`L1 transaction ${currentTxHash} timed out`, undefined, { txHash: currentTxHash, txTimeoutAt: gasConfig.txTimeoutAt, txTimeoutMs: gasConfig.txTimeoutMs, @@ -939,7 +940,7 @@ export class L1TxUtils extends ReadOnlyL1TxUtils { ...tx, }); - throw new Error(`L1 transaction ${currentTxHash} timed out`); + throw new TimeoutError(`L1 transaction ${currentTxHash} timed out`); } /** diff --git a/yarn-project/ethereum/src/publisher_manager.test.ts b/yarn-project/ethereum/src/publisher_manager.test.ts index 73260c4f0ddd..967d487254f1 100644 --- a/yarn-project/ethereum/src/publisher_manager.test.ts +++ b/yarn-project/ethereum/src/publisher_manager.test.ts @@ -18,7 +18,7 @@ describe('PublisherManager', () => { it('should initialize with publishers', () => { mockPublishers = createMockPublishers(3); - expect(() => new PublisherManager(mockPublishers)).not.toThrow(); + expect(() => new PublisherManager(mockPublishers, {})).not.toThrow(); }); }); @@ -27,7 +27,7 @@ describe('PublisherManager', () => { beforeEach(() => { addresses = Array.from({ length: 3 }, () => EthAddress.random()); mockPublishers = createMockPublishers(3, addresses); - publisherManager = new PublisherManager(mockPublishers); + publisherManager = new PublisherManager(mockPublishers, {}); }); it('should throw error when no valid publishers found', async () => { @@ -47,6 +47,17 @@ describe('PublisherManager', () => { await expect(publisherManager.getAvailablePublisher()).rejects.toThrow('Failed to find an available publisher.'); }); + it('should return a publisher in invalid state if allowed', async () => { + mockPublishers[0]['state'] = TxUtilsState.SENT; + mockPublishers[1]['state'] = TxUtilsState.CANCELLED; + mockPublishers[2]['state'] = TxUtilsState.NOT_MINED; + + publisherManager = new PublisherManager(mockPublishers, { publisherAllowInvalidStates: true }); + await expect(publisherManager.getAvailablePublisher(p => p.state === TxUtilsState.CANCELLED)).resolves.toBe( + mockPublishers[1], + ); + }); + it('should return publisher with best state', async () => { mockPublishers[0]['state'] = TxUtilsState.MINED; mockPublishers[1]['state'] = TxUtilsState.IDLE; @@ -118,7 +129,7 @@ describe('PublisherManager', () => { it('should prioritise same state publishers based on balance and then least recently used', async () => { const ethAddresses = Array.from({ length: 5 }, () => EthAddress.random()); mockPublishers = createMockPublishers(5, ethAddresses); - publisherManager = new PublisherManager(mockPublishers); + publisherManager = new PublisherManager(mockPublishers, {}); const filter = (utils: L1TxUtils) => utils.getSenderAddress() !== mockPublishers[2].getSenderAddress(); // Filter out publisher in index 2 diff --git a/yarn-project/ethereum/src/publisher_manager.ts b/yarn-project/ethereum/src/publisher_manager.ts index 0596902a631d..14712a34d132 100644 --- a/yarn-project/ethereum/src/publisher_manager.ts +++ b/yarn-project/ethereum/src/publisher_manager.ts @@ -1,3 +1,4 @@ +import { pick } from '@aztec/foundation/collection'; import { createLogger } from '@aztec/foundation/log'; import { L1TxUtils, TxUtilsState } from './l1_tx_utils.js'; @@ -9,10 +10,15 @@ export type PublisherFilter = (utils: UtilsType) => export class PublisherManager { private log = createLogger('PublisherManager'); + private config: { publisherAllowInvalidStates?: boolean }; - constructor(private publishers: UtilsType[]) { + constructor( + private publishers: UtilsType[], + config: { publisherAllowInvalidStates?: boolean }, + ) { this.log.info(`PublisherManager initialized with ${publishers.length} publishers.`); this.publishers = publishers; + this.config = pick(config, 'publisherAllowInvalidStates'); } // Finds and prioritises available publishers based on @@ -23,9 +29,13 @@ export class PublisherManager { // 5. Then priority based on least recently used public async getAvailablePublisher(filter: PublisherFilter = () => true): Promise { // Extract the valid publishers - const validPublishers = this.publishers.filter( - (pub: UtilsType) => !invalidStates.includes(pub.state) && filter(pub), - ); + let validPublishers = this.publishers.filter((pub: UtilsType) => !invalidStates.includes(pub.state) && filter(pub)); + + // If none found but we allow invalid states, try again including them + if (validPublishers.length === 0 && this.config.publisherAllowInvalidStates) { + this.log.warn(`No valid publishers found. Trying again including invalid states.`); + validPublishers = this.publishers.filter(pub => filter(pub)); + } // Error if none found if (validPublishers.length === 0) { diff --git a/yarn-project/ethereum/src/test/rollup_cheat_codes.ts b/yarn-project/ethereum/src/test/rollup_cheat_codes.ts index d05597d0ef2e..82a96f4cd9bb 100644 --- a/yarn-project/ethereum/src/test/rollup_cheat_codes.ts +++ b/yarn-project/ethereum/src/test/rollup_cheat_codes.ts @@ -112,14 +112,17 @@ export class RollupCheatCodes { * @param opts - Options */ public async advanceToEpoch( - epoch: bigint, + epoch: bigint | number, opts: { /** Optional test date provider to update with the epoch timestamp */ updateDateProvider?: TestDateProvider; + /** Offset in seconds */ + offset?: number; } = {}, ) { const { epochDuration: slotsInEpoch } = await this.getConfig(); - const timestamp = await this.rollup.read.getTimestampForSlot([epoch * slotsInEpoch]); + const timestamp = + (await this.rollup.read.getTimestampForSlot([BigInt(epoch) * slotsInEpoch])) + BigInt(opts.offset ?? 0); try { await this.ethCheatCodes.warp(Number(timestamp), { ...opts, silent: true, resetBlockInterval: true }); this.logger.warn(`Warped to epoch ${epoch}`); diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index ab503ea7ec7e..d4c07c2cc3ed 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -78,6 +78,7 @@ export type EnvVar = | 'LOG_LEVEL' | 'MNEMONIC' | 'NETWORK' + | 'NETWORK_CONFIG_LOCATION' | 'NO_PXE' | 'USE_GCLOUD_LOGGING' | 'OTEL_EXPORTER_OTLP_METRICS_ENDPOINT' @@ -162,6 +163,8 @@ export type EnvVar = | 'PROVER_PUBLISHER_PRIVATE_KEY' | 'PROVER_PUBLISHER_PRIVATE_KEYS' | 'PROVER_PUBLISHER_ADDRESSES' + | 'PROVER_PUBLISHER_ALLOW_INVALID_STATES' + | 'PROVER_PUBLISHER_ENABLED' | 'PROVER_REAL_PROOFS' | 'PROVER_TEST_DELAY_FACTOR' | 'PROVER_TEST_DELAY_MS' @@ -184,6 +187,8 @@ export type EnvVar = | 'SEQ_PUBLISHER_PRIVATE_KEY' | 'SEQ_PUBLISHER_PRIVATE_KEYS' | 'SEQ_PUBLISHER_ADDRESSES' + | 'SEQ_PUBLISHER_ALLOW_INVALID_STATES' + | 'SEQ_PUBLISHER_ENABLED' | 'SEQ_TX_POLLING_INTERVAL_MS' | 'SEQ_ENFORCE_TIME_TABLE' | 'SEQ_MAX_L1_TX_INCLUSION_TIME_INTO_SLOT' @@ -207,6 +212,7 @@ export type EnvVar = | 'SLASH_GRACE_PERIOD_L2_SLOTS' | 'SLASH_OFFENSE_EXPIRATION_ROUNDS' | 'SLASH_MAX_PAYLOAD_SIZE' + | 'SLASH_EXECUTE_ROUNDS_LOOK_BACK' | 'SYNC_MODE' | 'SYNC_SNAPSHOTS_URL' | 'TELEMETRY' @@ -280,6 +286,7 @@ export type EnvVar = | 'K8S_POD_UID' | 'K8S_NAMESPACE_NAME' | 'VALIDATOR_REEXECUTE_DEADLINE_MS' + | 'ALWAYS_REEXECUTE_BLOCK_PROPOSALS' | 'AUTO_UPDATE' | 'AUTO_UPDATE_URL' | 'WEB3_SIGNER_URL'; diff --git a/yarn-project/foundation/src/config/index.ts b/yarn-project/foundation/src/config/index.ts index d195f6b70b48..65b6428b8997 100644 --- a/yarn-project/foundation/src/config/index.ts +++ b/yarn-project/foundation/src/config/index.ts @@ -5,6 +5,8 @@ import { SecretValue } from './secret_value.js'; export { SecretValue, getActiveNetworkName }; export type { EnvVar, NetworkNames }; +export type { NetworkConfig, NetworkConfigMap } from './network_config.js'; +export { NetworkConfigMapSchema, NetworkConfigSchema } from './network_config.js'; export interface ConfigMapping { env?: EnvVar; diff --git a/yarn-project/foundation/src/config/network_config.test.ts b/yarn-project/foundation/src/config/network_config.test.ts new file mode 100644 index 000000000000..ca484686d25a --- /dev/null +++ b/yarn-project/foundation/src/config/network_config.test.ts @@ -0,0 +1,150 @@ +import { NetworkConfigMapSchema, NetworkConfigSchema } from './network_config.js'; + +describe('NetworkConfig', () => { + describe('NetworkConfigSchema', () => { + it('should validate a valid remote config', () => { + const validConfigInput = { + bootnodes: ['enr:-test1', 'enr:-test2'], + snapshots: ['https://example.com/snapshot1.tar.gz'], + registryAddress: '0x1234567890123456789012345678901234567890', + feeAssetHandlerAddress: '0x2345678901234567890123456789012345678901', + l1ChainId: 11155111, + }; + + const result = NetworkConfigSchema.safeParse(validConfigInput); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.bootnodes).toEqual(validConfigInput.bootnodes); + expect(result.data.snapshots).toEqual(validConfigInput.snapshots); + expect(result.data.registryAddress.toString()).toBe(validConfigInput.registryAddress); + expect(result.data.feeAssetHandlerAddress?.toString()).toBe(validConfigInput.feeAssetHandlerAddress); + expect(result.data.l1ChainId).toBe(validConfigInput.l1ChainId); + } + }); + + it('should validate config without optional feeAssetHandlerAddress', () => { + const validConfig = { + bootnodes: ['enr:-test1'], + snapshots: ['https://example.com/snapshot1.tar.gz'], + registryAddress: '0x1234567890123456789012345678901234567890', + l1ChainId: 11155111, + }; + + const result = NetworkConfigSchema.safeParse(validConfig); + expect(result.success).toBe(true); + }); + + it('should reject invalid config with missing required fields', () => { + const invalidConfig = { + bootnodes: ['enr:-test1'], + // Missing required fields + }; + + const result = NetworkConfigSchema.safeParse(invalidConfig); + expect(result.success).toBe(false); + }); + + it('should allow additional unknown fields (permissive parsing)', () => { + const configWithExtraFields = { + bootnodes: ['enr:-test1'], + snapshots: ['https://example.com/snapshot1.tar.gz'], + registryAddress: '0x1234567890123456789012345678901234567890', + l1ChainId: 11155111, + // New fields that might be added in the future + newFeature: 'enabled', + futureConfig: { + someNestedValue: 42, + anotherValue: 'test', + }, + arrayOfNewStuff: ['item1', 'item2'], + }; + + const result = NetworkConfigSchema.safeParse(configWithExtraFields); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.bootnodes).toEqual(configWithExtraFields.bootnodes); + expect(result.data.registryAddress.toString()).toBe(configWithExtraFields.registryAddress); + expect(result.data.l1ChainId).toBe(configWithExtraFields.l1ChainId); + // Verify that unknown fields are preserved + expect((result.data as any).newFeature).toBe('enabled'); + expect((result.data as any).futureConfig).toEqual(configWithExtraFields.futureConfig); + expect((result.data as any).arrayOfNewStuff).toEqual(configWithExtraFields.arrayOfNewStuff); + } + }); + }); + + describe('NetworkConfigMapSchema', () => { + it('should validate multiple network configurations', () => { + const networkConfigInput = { + 'staging-public': { + bootnodes: ['enr:-staging1'], + snapshots: ['https://example.com/staging-snapshot.tar.gz'], + registryAddress: '0x1234567890123456789012345678901234567890', + l1ChainId: 11155111, + }, + testnet: { + bootnodes: ['enr:-testnet1', 'enr:-testnet2'], + snapshots: ['https://example.com/testnet-snapshot.tar.gz'], + registryAddress: '0x2345678901234567890123456789012345678901', + feeAssetHandlerAddress: '0x3456789012345678901234567890123456789012', + l1ChainId: 1, + }, + }; + + const result = NetworkConfigMapSchema.safeParse(networkConfigInput); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data['staging-public'].registryAddress.toString()).toBe( + '0x1234567890123456789012345678901234567890', + ); + expect(result.data['testnet'].registryAddress.toString()).toBe('0x2345678901234567890123456789012345678901'); + expect(result.data['testnet'].feeAssetHandlerAddress?.toString()).toBe( + '0x3456789012345678901234567890123456789012', + ); + } + }); + + it('should handle future network config schema evolution', () => { + const futureFriendlyNetworkConfig = { + 'staging-public': { + bootnodes: ['enr:-staging1'], + snapshots: ['https://example.com/staging-snapshot.tar.gz'], + registryAddress: '0x1234567890123456789012345678901234567890', + l1ChainId: 11155111, + // Future fields that don't exist in current schema + newBootnodeFormat: ['multiaddr:/ip4/...'], + advancedP2PConfig: { + maxPeers: 50, + timeout: 30000, + }, + }, + testnet: { + bootnodes: ['enr:-testnet1'], + snapshots: ['https://example.com/testnet-snapshot.tar.gz'], + registryAddress: '0x2345678901234567890123456789012345678901', + l1ChainId: 1, + // Different future fields per network + experimentalFeatures: ['feature1', 'feature2'], + }, + }; + + const result = NetworkConfigMapSchema.safeParse(futureFriendlyNetworkConfig); + expect(result.success).toBe(true); + if (result.success) { + // Verify existing fields still work + expect(result.data['staging-public'].registryAddress.toString()).toBe( + '0x1234567890123456789012345678901234567890', + ); + expect(result.data['testnet'].registryAddress.toString()).toBe('0x2345678901234567890123456789012345678901'); + + // Verify future fields are preserved + expect((result.data['staging-public'] as any).newBootnodeFormat).toEqual(['multiaddr:/ip4/...']); + expect((result.data['staging-public'] as any).advancedP2PConfig).toEqual({ + maxPeers: 50, + timeout: 30000, + }); + expect((result.data['testnet'] as any).experimentalFeatures).toEqual(['feature1', 'feature2']); + } + }); + }); +}); diff --git a/yarn-project/foundation/src/config/network_config.ts b/yarn-project/foundation/src/config/network_config.ts new file mode 100644 index 000000000000..97d976a6de2e --- /dev/null +++ b/yarn-project/foundation/src/config/network_config.ts @@ -0,0 +1,16 @@ +import { z } from 'zod'; + +export const NetworkConfigSchema = z + .object({ + bootnodes: z.array(z.string()), + snapshots: z.array(z.string()), + registryAddress: z.string(), + feeAssetHandlerAddress: z.string().optional(), + l1ChainId: z.number(), + }) + .passthrough(); // Allow additional unknown fields to pass through + +export const NetworkConfigMapSchema = z.record(z.string(), NetworkConfigSchema); + +export type NetworkConfig = z.infer; +export type NetworkConfigMap = z.infer; diff --git a/yarn-project/foundation/src/eth-address/index.ts b/yarn-project/foundation/src/eth-address/index.ts index 0e406b315e63..a1dc046536ca 100644 --- a/yarn-project/foundation/src/eth-address/index.ts +++ b/yarn-project/foundation/src/eth-address/index.ts @@ -117,14 +117,14 @@ export class EthAddress { * @param address - The Ethereum address as a hex-encoded string. * @returns The Ethereum address in its checksum format. */ - public static toChecksumAddress(address: string) { + public static toChecksumAddress(address: string): `0x${string}` { if (!EthAddress.isAddress(address)) { throw new Error('Invalid address string.'); } address = address.toLowerCase().replace(/^0x/i, ''); const addressHash = keccak256String(address); - let checksumAddress = '0x'; + let checksumAddress: `0x${string}` = '0x'; for (let i = 0; i < address.length; i++) { // If ith character is 9 to f then make it uppercase. diff --git a/yarn-project/foundation/src/log/log_fn.ts b/yarn-project/foundation/src/log/log_fn.ts index be761201cd51..5c130fb7c9f7 100644 --- a/yarn-project/foundation/src/log/log_fn.ts +++ b/yarn-project/foundation/src/log/log_fn.ts @@ -1,5 +1,5 @@ /** Structured log data to include with the message. */ -export type LogData = Record; +export type LogData = Record; /** A callable logger instance. */ export type LogFn = (msg: string, data?: unknown) => void; diff --git a/yarn-project/node-keystore/src/keystore_manager.test.ts b/yarn-project/node-keystore/src/keystore_manager.test.ts index 48c47da1242b..b2b260be3fd5 100644 --- a/yarn-project/node-keystore/src/keystore_manager.test.ts +++ b/yarn-project/node-keystore/src/keystore_manager.test.ts @@ -6,13 +6,14 @@ import { Buffer32 } from '@aztec/foundation/buffer'; import { EthAddress } from '@aztec/foundation/eth-address'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; -import { describe, expect, it } from '@jest/globals'; +import { describe, expect, it, jest } from '@jest/globals'; import { mkdirSync, writeFileSync } from 'fs'; import { tmpdir } from 'os'; import { join } from 'path'; import { mnemonicToAccount } from 'viem/accounts'; import { KeystoreError, KeystoreManager } from '../src/keystore_manager.js'; +import { RemoteSigner } from '../src/signer.js'; import type { KeyStore } from '../src/types.js'; describe('KeystoreManager', () => { @@ -1093,4 +1094,216 @@ describe('KeystoreManager', () => { expect(cfg).toBeUndefined(); }); }); + + describe('validateSigners', () => { + it('should not validate when there are no remote signers', async () => { + const keystore: KeyStore = { + schemaVersion: 1, + validators: [ + { + attester: '0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef' as any, + feeRecipient: await AztecAddress.random(), + }, + ], + }; + + const manager = new KeystoreManager(keystore); + await expect(manager.validateSigners()).resolves.not.toThrow(); + }); + + it('should validate remote signers for validators', async () => { + const testAddress = EthAddress.random(); + const testUrl = 'http://test-signer:9000'; + + const keystore: KeyStore = { + schemaVersion: 1, + validators: [ + { + attester: { address: testAddress, remoteSignerUrl: testUrl }, + feeRecipient: await AztecAddress.random(), + }, + ], + }; + + using _ = jest.spyOn(RemoteSigner, 'validateAccess').mockImplementation(() => Promise.resolve()); + + const manager = new KeystoreManager(keystore); + await expect(manager.validateSigners()).resolves.not.toThrow(); + }); + + it('should batch validate multiple addresses for the same remote signer URL', async () => { + const testUrl = 'http://test-signer:9000'; + const address1 = EthAddress.random(); + const address2 = EthAddress.random(); + const address3 = EthAddress.random(); + + const keystore: KeyStore = { + schemaVersion: 1, + validators: [ + { + attester: [ + { address: address1, remoteSignerUrl: testUrl }, + { address: address2, remoteSignerUrl: testUrl }, + ], + publisher: { address: address3, remoteSignerUrl: testUrl }, + feeRecipient: await AztecAddress.random(), + }, + ], + }; + + using validateAccessSpy = jest.spyOn(RemoteSigner, 'validateAccess').mockImplementation(() => Promise.resolve()); + + const manager = new KeystoreManager(keystore); + await expect(manager.validateSigners()).resolves.not.toThrow(); + + // Should batch all three addresses into one call + expect(validateAccessSpy).toHaveBeenCalledTimes(1); + expect(validateAccessSpy).toHaveBeenCalledWith( + testUrl, + expect.arrayContaining([address1.toString(), address2.toString(), address3.toString()]), + ); + }); + + it('should validate remote signers from default config', async () => { + const defaultUrl = 'http://default-signer:9000'; + const address = EthAddress.random(); + + const keystore: KeyStore = { + schemaVersion: 1, + remoteSigner: defaultUrl, + validators: [ + { + attester: address, // Just address, uses default remote signer + feeRecipient: await AztecAddress.random(), + }, + ], + }; + + using validateAccessSpy = jest.spyOn(RemoteSigner, 'validateAccess'); + validateAccessSpy.mockResolvedValueOnce(undefined); + + const manager = new KeystoreManager(keystore); + await expect(manager.validateSigners()).resolves.not.toThrow(); + + expect(validateAccessSpy).toHaveBeenCalledWith(defaultUrl, [address.toString()]); + }); + + it('should validate slasher remote signers', async () => { + const testUrl = 'http://slasher-signer:9000'; + const slasherAddress = EthAddress.random(); + + const keystore: KeyStore = { + schemaVersion: 1, + slasher: { address: slasherAddress, remoteSignerUrl: testUrl }, + }; + + using validateAccessSpy = jest.spyOn(RemoteSigner, 'validateAccess'); + validateAccessSpy.mockResolvedValueOnce(undefined); + + const manager = new KeystoreManager(keystore); + await expect(manager.validateSigners()).resolves.not.toThrow(); + + expect(validateAccessSpy).toHaveBeenCalledWith(testUrl, [slasherAddress.toString()]); + }); + + it('should validate prover remote signers', async () => { + const testUrl = 'http://prover-signer:9000'; + const publisherAddress = EthAddress.random(); + const proverId = EthAddress.random(); + + const keystore: KeyStore = { + schemaVersion: 1, + remoteSigner: testUrl, + prover: { + id: proverId, + publisher: [publisherAddress], + }, + }; + + using validateAccessSpy = jest.spyOn(RemoteSigner, 'validateAccess'); + validateAccessSpy.mockResolvedValueOnce(undefined); + + const manager = new KeystoreManager(keystore); + await expect(manager.validateSigners()).resolves.not.toThrow(); + + expect(validateAccessSpy).toHaveBeenCalledWith(testUrl, [publisherAddress.toString()]); + }); + + it('should handle validation errors', async () => { + const testUrl = 'http://test-signer:9000'; + const address = EthAddress.random(); + + const keystore: KeyStore = { + schemaVersion: 1, + validators: [ + { + attester: { address, remoteSignerUrl: testUrl }, + feeRecipient: await AztecAddress.random(), + }, + ], + }; + + using validateAccessSpy = jest.spyOn(RemoteSigner, 'validateAccess'); + validateAccessSpy.mockRejectedValueOnce(new Error('Connection refused')); + + const manager = new KeystoreManager(keystore); + await expect(manager.validateSigners()).rejects.toThrow('Connection refused'); + }); + + it('should skip validation for mnemonic and JSON V3 configs', async () => { + const keystore: KeyStore = { + schemaVersion: 1, + validators: [ + { + attester: { mnemonic: 'test test test test test test test test test test test junk' } as any, + feeRecipient: await AztecAddress.random(), + }, + { + attester: { path: '/some/path.json', password: 'test' } as any, + feeRecipient: await AztecAddress.random(), + }, + ], + }; + + using validateAccessSpy = jest.spyOn(RemoteSigner, 'validateAccess'); + + const manager = new KeystoreManager(keystore); + await expect(manager.validateSigners()).resolves.not.toThrow(); + + // Should not call validateAccess for mnemonic or JSON configs + expect(validateAccessSpy).not.toHaveBeenCalled(); + }); + + it('should validate multiple remote signer URLs separately', async () => { + const url1 = 'http://signer1:9000'; + const url2 = 'http://signer2:9000'; + const address1 = EthAddress.random(); + const address2 = EthAddress.random(); + + const keystore: KeyStore = { + schemaVersion: 1, + validators: [ + { + attester: { address: address1, remoteSignerUrl: url1 }, + feeRecipient: await AztecAddress.random(), + }, + { + attester: { address: address2, remoteSignerUrl: url2 }, + feeRecipient: await AztecAddress.random(), + }, + ], + }; + + using validateAccessSpy = jest.spyOn(RemoteSigner, 'validateAccess'); + validateAccessSpy.mockResolvedValue(undefined); + + const manager = new KeystoreManager(keystore); + await expect(manager.validateSigners()).resolves.not.toThrow(); + + // Should call validateAccess twice, once for each URL + expect(validateAccessSpy).toHaveBeenCalledTimes(2); + expect(validateAccessSpy).toHaveBeenCalledWith(url1, [address1.toString()]); + expect(validateAccessSpy).toHaveBeenCalledWith(url2, [address2.toString()]); + }); + }); }); diff --git a/yarn-project/node-keystore/src/keystore_manager.ts b/yarn-project/node-keystore/src/keystore_manager.ts index 5e1cc29443a7..47b08576953a 100644 --- a/yarn-project/node-keystore/src/keystore_manager.ts +++ b/yarn-project/node-keystore/src/keystore_manager.ts @@ -58,6 +58,82 @@ export class KeystoreManager { this.validateUniqueAttesterAddresses(); } + /** + * Validates all remote signers in the keystore are accessible and have the required addresses. + * Should be called after construction if validation is needed. + */ + async validateSigners(): Promise { + // Collect all remote signers with their addresses grouped by URL + const remoteSignersByUrl = new Map>(); + + // Helper to extract remote signer URL from config + const getUrl = (config: EthRemoteSignerConfig): string => { + return typeof config === 'string' ? config : config.remoteSignerUrl; + }; + + // Helper to collect remote signers from accounts + const collectRemoteSigners = (accounts: EthAccounts, defaultRemoteSigner?: EthRemoteSignerConfig): void => { + const processAccount = (account: EthAccount): void => { + if (typeof account === 'object' && !('path' in account) && !('mnemonic' in (account as any))) { + // This is a remote signer account + const remoteSigner = account as EthRemoteSignerAccount; + const address = 'address' in remoteSigner ? remoteSigner.address : remoteSigner; + + let url: string; + if ('remoteSignerUrl' in remoteSigner && remoteSigner.remoteSignerUrl) { + url = remoteSigner.remoteSignerUrl; + } else if (defaultRemoteSigner) { + url = getUrl(defaultRemoteSigner); + } else { + return; // No remote signer URL available + } + + if (!remoteSignersByUrl.has(url)) { + remoteSignersByUrl.set(url, new Set()); + } + remoteSignersByUrl.get(url)!.add(address.toString()); + } + }; + + if (Array.isArray(accounts)) { + accounts.forEach(account => collectRemoteSigners(account, defaultRemoteSigner)); + } else if (typeof accounts === 'object' && 'mnemonic' in accounts) { + // Skip mnemonic configs + } else { + processAccount(accounts as EthAccount); + } + }; + + // Collect from validators + const validatorCount = this.getValidatorCount(); + for (let i = 0; i < validatorCount; i++) { + const validator = this.getValidator(i); + const remoteSigner = validator.remoteSigner || this.keystore.remoteSigner; + + collectRemoteSigners(validator.attester, remoteSigner); + if (validator.publisher) { + collectRemoteSigners(validator.publisher, remoteSigner); + } + } + + // Collect from slasher + if (this.keystore.slasher) { + collectRemoteSigners(this.keystore.slasher, this.keystore.remoteSigner); + } + + // Collect from prover + if (this.keystore.prover && typeof this.keystore.prover === 'object' && 'publisher' in this.keystore.prover) { + collectRemoteSigners(this.keystore.prover.publisher, this.keystore.remoteSigner); + } + + // Validate each remote signer URL with all its addresses + for (const [url, addresses] of remoteSignersByUrl.entries()) { + if (addresses.size > 0) { + await RemoteSigner.validateAccess(url, Array.from(addresses)); + } + } + } + /** * Validates that attester addresses are unique across all validators * Only checks simple private key attesters, not JSON-V3 or mnemonic attesters, diff --git a/yarn-project/node-keystore/src/signer.ts b/yarn-project/node-keystore/src/signer.ts index 04807561d33b..5201b1db66e7 100644 --- a/yarn-project/node-keystore/src/signer.ts +++ b/yarn-project/node-keystore/src/signer.ts @@ -104,6 +104,82 @@ export class RemoteSigner implements EthSigner { private fetch: typeof globalThis.fetch = globalThis.fetch, ) {} + /** + * Validates that a web3signer is accessible and that the given addresses are available. + * @param remoteSignerUrl - The URL of the web3signer (can be string or EthRemoteSignerConfig) + * @param addresses - The addresses to check for availability + * @param fetch - Optional fetch implementation for testing + * @throws Error if the web3signer is not accessible or if any address is not available + */ + static async validateAccess( + remoteSignerUrl: EthRemoteSignerConfig, + addresses: string[], + fetch: typeof globalThis.fetch = globalThis.fetch, + ): Promise { + const url = typeof remoteSignerUrl === 'string' ? remoteSignerUrl : remoteSignerUrl.remoteSignerUrl; + + try { + // Check if the web3signer is reachable by calling eth_accounts + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + jsonrpc: '2.0', + method: 'eth_accounts', + params: [], + id: 1, + }), + }); + + if (!response.ok) { + const errorText = await response.text(); + throw new SignerError( + `Web3Signer validation failed: ${response.status} ${response.statusText} - ${errorText}`, + 'eth_accounts', + url, + response.status, + ); + } + + const result = await response.json(); + + if (result.error) { + throw new SignerError( + `Web3Signer JSON-RPC error during validation: ${result.error.code} - ${result.error.message}`, + 'eth_accounts', + url, + 200, + result.error.code, + ); + } + + if (!result.result || !Array.isArray(result.result)) { + throw new Error('Invalid response from Web3Signer: expected array of accounts'); + } + + // Normalize addresses to lowercase for comparison + const availableAccounts: string[] = result.result.map((addr: string) => addr.toLowerCase()); + const requestedAddresses = addresses.map(addr => addr.toLowerCase()); + + // Check if all requested addresses are available + const missingAddresses = requestedAddresses.filter(addr => !availableAccounts.includes(addr)); + + if (missingAddresses.length > 0) { + throw new Error(`The following addresses are not available in the web3signer: ${missingAddresses.join(', ')}`); + } + } catch (error: any) { + if (error instanceof SignerError) { + throw error; + } + if (error.code === 'ECONNREFUSED' || error.cause?.code === 'ECONNREFUSED') { + throw new Error(`Unable to connect to web3signer at ${url}. Please ensure it is running and accessible.`); + } + throw error; + } + } + /** * Sign a message using eth_sign via remote JSON-RPC. */ diff --git a/yarn-project/p2p/src/client/factory.ts b/yarn-project/p2p/src/client/factory.ts index ec82bb0aaacb..5ab78914925b 100644 --- a/yarn-project/p2p/src/client/factory.ts +++ b/yarn-project/p2p/src/client/factory.ts @@ -54,6 +54,13 @@ export async function createP2PClient( }); const logger = deps.logger ?? createLogger('p2p'); + + if (config.bootstrapNodes.length === 0) { + logger.warn( + 'No bootstrap nodes have been provided. Set the BOOTSTRAP_NODES environment variable in order to join the P2P network', + ); + } + const store = deps.store ?? (await createStore(P2P_STORE_NAME, 2, config, createLogger('p2p:lmdb-v2'))); const archive = await createStore(P2P_ARCHIVE_STORE_NAME, 1, config, createLogger('p2p-archive:lmdb-v2')); const peerStore = await createStore(P2P_PEER_STORE_NAME, 1, config, createLogger('p2p-peer:lmdb-v2')); diff --git a/yarn-project/p2p/src/client/interface.ts b/yarn-project/p2p/src/client/interface.ts index 37c353407ac4..d877d1fed3a2 100644 --- a/yarn-project/p2p/src/client/interface.ts +++ b/yarn-project/p2p/src/client/interface.ts @@ -125,11 +125,11 @@ export type P2P = P2PApiFull & getArchivedTxByHash(txHash: TxHash): Promise; /** - * Returns whether the given tx hash is flagged as pending or mined. + * Returns whether the given tx hash is flagged as pending, mined, or deleted. * @param txHash - Hash of the tx to query. - * @returns Pending or mined depending on its status, or undefined if not found. + * @returns Pending, mined, or deleted depending on its status, or undefined if not found. */ - getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined>; + getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | 'deleted' | undefined>; /** Returns an iterator over pending txs on the mempool. */ iteratePendingTxs(): AsyncIterableIterator; diff --git a/yarn-project/p2p/src/client/p2p_client.test.ts b/yarn-project/p2p/src/client/p2p_client.test.ts index fa0b1234e606..bc770f1bf7af 100644 --- a/yarn-project/p2p/src/client/p2p_client.test.ts +++ b/yarn-project/p2p/src/client/p2p_client.test.ts @@ -148,10 +148,11 @@ describe('P2P Client', () => { expect(txPool.deleteTxs).not.toHaveBeenCalled(); await advanceToFinalizedBlock(5); - expect(txPool.deleteTxs).toHaveBeenCalledTimes(5); + expect(txPool.deleteTxs).toHaveBeenCalledTimes(1); + txPool.deleteTxs.mockClear(); await advanceToFinalizedBlock(8); - expect(txPool.deleteTxs).toHaveBeenCalledTimes(8); + expect(txPool.deleteTxs).toHaveBeenCalledTimes(1); await client.stop(); }); diff --git a/yarn-project/p2p/src/client/p2p_client.ts b/yarn-project/p2p/src/client/p2p_client.ts index db4cdf55773e..48f06e130de6 100644 --- a/yarn-project/p2p/src/client/p2p_client.ts +++ b/yarn-project/p2p/src/client/p2p_client.ts @@ -599,7 +599,7 @@ export class P2PClient * @param txHash - Hash of the tx to query. * @returns Pending or mined depending on its status, or undefined if not found. */ - public getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined> { + public getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | 'deleted' | undefined> { return this.txPool.getTxStatus(txHash); } @@ -687,19 +687,6 @@ export class P2PClient } } - /** - * Deletes txs from these blocks. - * @param blocks - A list of existing blocks with txs that the P2P client needs to ensure the tx pool is reconciled with. - * @returns Empty promise. - */ - private async deleteTxsFromBlocks(blocks: L2Block[]): Promise { - this.log.debug(`Deleting txs from blocks ${blocks[0].number} to ${blocks[blocks.length - 1].number}`); - for (const block of blocks) { - const txHashes = block.body.txEffects.map(txEffect => txEffect.txHash); - await this.txPool.deleteTxs(txHashes); - } - } - /** * Handles new mined blocks by marking the txs in them as mined. * @param blocks - A list of existing blocks with txs that the P2P client needs to ensure the tx pool is reconciled with. @@ -765,19 +752,23 @@ export class P2PClient * @returns Empty promise. */ private async handleFinalizedL2Blocks(blocks: L2Block[]): Promise { - this.log.trace(`Handling finalized blocks ${blocks.length} up to ${blocks.at(-1)?.number}`); if (!blocks.length) { return Promise.resolve(); } + this.log.debug(`Handling finalized blocks ${blocks.length} up to ${blocks.at(-1)?.number}`); const lastBlockNum = blocks[blocks.length - 1].number; const lastBlockSlot = blocks[blocks.length - 1].header.getSlot(); - await this.deleteTxsFromBlocks(blocks); + const txHashes = blocks.flatMap(block => block.body.txEffects.map(txEffect => txEffect.txHash)); + this.log.debug(`Deleting ${txHashes.length} txs from pool from finalized blocks up to ${lastBlockNum}`); + await this.txPool.deleteTxs(txHashes, { permanently: true }); + await this.txPool.cleanupDeletedMinedTxs(lastBlockNum); + await this.attestationPool?.deleteAttestationsOlderThan(lastBlockSlot); await this.synchedFinalizedBlockNumber.set(lastBlockNum); - this.log.debug(`Synched to finalized block ${lastBlockNum}`); + this.log.debug(`Synched to finalized block ${lastBlockNum} at slot ${lastBlockSlot}`); await this.startServiceIfSynched(); } @@ -802,6 +793,7 @@ export class P2PClient this.log.info(`Detected chain prune. Removing ${txsToDelete.size} txs built against pruned blocks.`, { newLatestBlock: latestBlock, previousLatestBlock: await this.getSyncedLatestBlockNum(), + txsToDelete: Array.from(txsToDelete.keys()), }); // delete invalid txs (both pending and mined) @@ -814,7 +806,8 @@ export class P2PClient // (see this.keepProvenTxsFor) const minedTxsFromReorg: TxHash[] = []; for (const [txHash, blockNumber] of minedTxs) { - if (blockNumber > latestBlock) { + // We keep the txsToDelete out of this list as they have already been deleted above + if (blockNumber > latestBlock && !txsToDelete.has(txHash.toString())) { minedTxsFromReorg.push(txHash); } } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts index cc4cebad5032..d31003f89233 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts @@ -48,6 +48,12 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< /** Index from tx hash to its header hash, filtered by pending txs. */ #pendingTxHashToHeaderHash: AztecAsyncMap; + /** Map from tx hash to the block number it was originally mined in (for soft-deleted txs). */ + #deletedMinedTxHashes: AztecAsyncMap; + + /** MultiMap from block number to deleted mined tx hashes for efficient cleanup. */ + #blockToDeletedMinedTxHash: AztecAsyncMultiMap; + /** The cumulative tx size in bytes that the pending txs in the pool take up. */ #pendingTxSize: AztecAsyncSingleton; @@ -103,6 +109,8 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< this.#pendingTxHashToSize = store.openMap('pendingTxHashToSize'); this.#pendingTxHashToHeaderHash = store.openMap('pendingTxHashToHeaderHash'); this.#pendingTxSize = store.openSingleton('pendingTxSize'); + this.#deletedMinedTxHashes = store.openMap('deletedMinedTxHashes'); + this.#blockToDeletedMinedTxHash = store.openMultiMap('blockToDeletedMinedTxHash'); this.#pendingTxs = new Map(); this.#nonEvictableTxs = new Set(); @@ -152,6 +160,17 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< let pendingTxSize = (await this.#pendingTxSize.getAsync()) ?? 0; for (const hash of txHashes) { const key = hash.toString(); + + // If this tx was previously soft-deleted, remove it from the deleted sets + if (await this.#deletedMinedTxHashes.hasAsync(key)) { + const originalBlock = await this.#deletedMinedTxHashes.getAsync(key); + await this.#deletedMinedTxHashes.delete(key); + // Remove from block-to-hash mapping + if (originalBlock !== undefined) { + await this.#blockToDeletedMinedTxHash.deleteValue(originalBlock, key); + } + } + await this.#minedTxHashToBlock.set(key, blockHeader.globalVariables.blockNumber); const tx = await this.getPendingTxByHash(hash); @@ -200,7 +219,7 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< public async getPendingTxHashes(): Promise { const vals = await toArray(this.#pendingTxPriorityToHash.valuesAsync({ reverse: true })); - return vals.map(x => TxHash.fromString(x)); + return vals.map(TxHash.fromString); } public async getMinedTxHashes(): Promise<[TxHash, number][]> { @@ -216,11 +235,17 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< return (await this.#minedTxHashToBlock.sizeAsync()) ?? 0; } - public async getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined> { + public async getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | 'deleted' | undefined> { const key = txHash.toString(); - const [isMined, isKnown] = await Promise.all([this.#minedTxHashToBlock.hasAsync(key), this.#txs.hasAsync(key)]); - - if (isMined) { + const [isMined, isKnown, isDeleted] = await Promise.all([ + this.#minedTxHashToBlock.hasAsync(key), + this.#txs.hasAsync(key), + this.#deletedMinedTxHashes.hasAsync(key), + ]); + + if (isDeleted) { + return 'deleted'; + } else if (isMined) { return 'mined'; } else if (isKnown) { return 'pending'; @@ -236,24 +261,12 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< */ public async getTxByHash(txHash: TxHash): Promise { const buffer = await this.#txs.getAsync(txHash.toString()); - if (buffer) { - const tx = Tx.fromBuffer(buffer); - tx.setTxHash(txHash); - return tx; - } - return undefined; + return buffer ? Tx.fromBuffer(buffer) : undefined; } async getTxsByHash(txHashes: TxHash[]): Promise<(Tx | undefined)[]> { const txs = await Promise.all(txHashes.map(txHash => this.#txs.getAsync(txHash.toString()))); - return txs.map((buffer, index) => { - if (buffer) { - const tx = Tx.fromBuffer(buffer); - tx.setTxHash(txHashes[index]); - return tx; - } - return undefined; - }); + return txs.map(buffer => (buffer ? Tx.fromBuffer(buffer) : undefined)); } async hasTxs(txHashes: TxHash[]): Promise { @@ -267,12 +280,7 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< */ public async getArchivedTxByHash(txHash: TxHash): Promise { const buffer = await this.#archivedTxs.getAsync(txHash.toString()); - if (buffer) { - const tx = Tx.fromBuffer(buffer); - tx.setTxHash(txHash); - return tx; - } - return undefined; + return buffer ? Tx.fromBuffer(buffer) : undefined; } /** @@ -322,10 +330,11 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< /** * Deletes transactions from the pool. Tx hashes that are not present are ignored. - * @param txHashes - An array of tx hashes to be removed from the tx pool. + * Mined transactions are soft-deleted with a timestamp, pending transactions are permanently deleted. + * @param txHashes - An array of tx hashes to be deleted from the tx pool. * @returns Empty promise. */ - public deleteTxs(txHashes: TxHash[], eviction = false): Promise { + public deleteTxs(txHashes: TxHash[], opts: { eviction?: boolean; permanently?: boolean } = {}): Promise { if (txHashes.length === 0) { return Promise.resolve(); } @@ -337,18 +346,33 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< const tx = await this.getTxByHash(hash); if (tx) { - const isMined = await this.#minedTxHashToBlock.hasAsync(key); - if (!isMined) { + const minedBlockNumber = await this.#minedTxHashToBlock.getAsync(key); + + if (minedBlockNumber !== undefined) { + await this.#minedTxHashToBlock.delete(key); + if (opts.permanently) { + // Permanently delete mined transactions if specified + this.#log.trace(`Deleting mined tx ${key} from pool`); + await this.#txs.delete(key); + } else { + // Soft-delete mined transactions: remove from mined set but keep in storage + this.#log.trace(`Soft-deleting mined tx ${key} from pool`); + await this.#deletedMinedTxHashes.set(key, minedBlockNumber); + await this.#blockToDeletedMinedTxHash.set(minedBlockNumber, key); + } + } else { + // Permanently delete pending transactions + this.#log.trace(`Deleting pending tx ${key} from pool`); pendingTxSize -= tx.getSize(); await this.removePendingTxIndices(tx, key); + await this.#txs.delete(key); } - if (!eviction && this.#archivedTxLimit) { + if (!opts.eviction && this.#archivedTxLimit) { deletedTxs.push(tx); } - - await this.#txs.delete(key); - await this.#minedTxHashToBlock.delete(key); + } else { + this.#log.trace(`Skipping deletion of missing tx ${key} from pool`); } } @@ -363,12 +387,8 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< * @returns Array of tx objects in the order they were added to the pool. */ public async getAllTxs(): Promise { - const vals = await toArray(this.#txs.entriesAsync()); - return vals.map(([hash, buffer]) => { - const tx = Tx.fromBuffer(buffer); - tx.setTxHash(TxHash.fromString(hash)); - return tx; - }); + const vals = await toArray(this.#txs.valuesAsync()); + return vals.map(buffer => Tx.fromBuffer(buffer)); } /** @@ -402,6 +422,8 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< assert(archivedTxLimit >= 0, 'archivedTxLimit must be greater or equal to 0'); this.#archivedTxLimit = archivedTxLimit; } + + // deletedMinedCleanupThresholdMs is no longer used in block-based cleanup } public markTxsAsNonEvictable(txHashes: TxHash[]): Promise { @@ -409,6 +431,46 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< return Promise.resolve(); } + /** + * Permanently deletes deleted mined transactions from blocks up to and including the specified block number. + * @param blockNumber - Block number threshold. Deleted mined txs from this block or earlier will be permanently deleted. + * @returns The number of transactions permanently deleted. + */ + public async cleanupDeletedMinedTxs(blockNumber: number): Promise { + let deletedCount = 0; + const txHashesToDelete: string[] = []; + const blocksToDelete: number[] = []; + + await this.#store.transactionAsync(async () => { + // Iterate through all entries and check block numbers + for await (const [block, txHash] of this.#blockToDeletedMinedTxHash.entriesAsync()) { + if (block <= blockNumber) { + // Permanently delete the transaction + await this.#txs.delete(txHash); + await this.#deletedMinedTxHashes.delete(txHash); + txHashesToDelete.push(txHash); + if (!blocksToDelete.includes(block)) { + blocksToDelete.push(block); + } + deletedCount++; + } + } + + // Clean up block-to-hash mapping - delete all values for each block + for (const block of blocksToDelete) { + const txHashesForBlock = await toArray(this.#blockToDeletedMinedTxHash.getValuesAsync(block)); + for (const txHash of txHashesForBlock) { + await this.#blockToDeletedMinedTxHash.deleteValue(block, txHash); + } + } + }); + + if (deletedCount > 0) { + this.#log.debug(`Permanently deleted ${deletedCount} deleted mined txs from blocks up to ${blockNumber}`); + } + return deletedCount; + } + /** * Creates a GasTxValidator instance. * @param db - DB for the validator to use @@ -546,7 +608,7 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< } if (txsToEvict.length > 0) { - await this.deleteTxs(txsToEvict, true); + await this.deleteTxs(txsToEvict, { eviction: true }); } return { numLowPriorityTxsEvicted: txsToEvict.length, @@ -620,7 +682,7 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< } if (txsToEvict.length > 0) { - await this.deleteTxs(txsToEvict, true); + await this.deleteTxs(txsToEvict, { eviction: true }); } return txsToEvict.length; } @@ -663,7 +725,7 @@ export class AztecKVTxPool extends (EventEmitter as new () => TypedEventEmitter< } if (txsToEvict.length > 0) { - await this.deleteTxs(txsToEvict, true); + await this.deleteTxs(txsToEvict, { eviction: true }); } return txsToEvict.length; } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/memory_tx_pool.ts b/yarn-project/p2p/src/mem_pools/tx_pool/memory_tx_pool.ts index b4921b3c55a7..113e9d6a8e5d 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/memory_tx_pool.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/memory_tx_pool.ts @@ -20,6 +20,8 @@ export class InMemoryTxPool extends (EventEmitter as new () => TypedEventEmitter private txs: Map; private minedTxs: Map; private pendingTxs: Set; + private deletedMinedTxHashes: Map; + private blockToDeletedMinedTxHash: Map>; private metrics: PoolInstrumentation; @@ -35,6 +37,8 @@ export class InMemoryTxPool extends (EventEmitter as new () => TypedEventEmitter this.txs = new Map(); this.minedTxs = new Map(); this.pendingTxs = new Set(); + this.deletedMinedTxHashes = new Map(); + this.blockToDeletedMinedTxHash = new Map(); this.metrics = new PoolInstrumentation(telemetry, PoolName.TX_POOL, this.countTx); } @@ -54,6 +58,19 @@ export class InMemoryTxPool extends (EventEmitter as new () => TypedEventEmitter public markAsMined(txHashes: TxHash[], blockHeader: BlockHeader): Promise { const keys = txHashes.map(x => x.toBigInt()); for (const key of keys) { + // If this tx was previously soft-deleted, remove it from the deleted sets + if (this.deletedMinedTxHashes.has(key)) { + const originalBlock = this.deletedMinedTxHashes.get(key)!; + this.deletedMinedTxHashes.delete(key); + // Remove from block-to-hash mapping + const txHashesForBlock = this.blockToDeletedMinedTxHash.get(originalBlock); + if (txHashesForBlock) { + txHashesForBlock.delete(key); + if (txHashesForBlock.size === 0) { + this.blockToDeletedMinedTxHash.delete(originalBlock); + } + } + } this.minedTxs.set(key, blockHeader.globalVariables.blockNumber); this.pendingTxs.delete(key); } @@ -83,7 +100,12 @@ export class InMemoryTxPool extends (EventEmitter as new () => TypedEventEmitter (tx1, tx2) => -getPendingTxPriority(tx1).localeCompare(getPendingTxPriority(tx2)), ); const txHashes = await Promise.all(txs.map(tx => tx.getTxHash())); - return txHashes.filter(txHash => this.pendingTxs.has(txHash.toBigInt())); + + // No need to check deleted since pending txs are never soft-deleted + return txHashes.filter(txHash => { + const key = txHash.toBigInt(); + return this.pendingTxs.has(key); + }); } public getMinedTxHashes(): Promise<[TxHash, number][]> { @@ -93,17 +115,22 @@ export class InMemoryTxPool extends (EventEmitter as new () => TypedEventEmitter } public getPendingTxCount(): Promise { + // Soft-deleted transactions are always mined, never pending return Promise.resolve(this.pendingTxs.size); } - public getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined> { + public getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | 'deleted' | undefined> { const key = txHash.toBigInt(); - if (this.pendingTxs.has(key)) { - return Promise.resolve('pending'); + + if (this.deletedMinedTxHashes.has(key)) { + return Promise.resolve('deleted'); } if (this.minedTxs.has(key)) { return Promise.resolve('mined'); } + if (this.pendingTxs.has(key)) { + return Promise.resolve('pending'); + } return Promise.resolve(undefined); } @@ -161,15 +188,34 @@ export class InMemoryTxPool extends (EventEmitter as new () => TypedEventEmitter /** * Deletes transactions from the pool. Tx hashes that are not present are ignored. - * @param txHashes - An array of tx hashes to be removed from the tx pool. - * @returns The number of transactions that was deleted from the pool. + * Mined transactions are soft-deleted with a timestamp, pending transactions are permanently deleted. + * @param txHashes - An array of tx hashes to be deleted from the tx pool. + * @returns Empty promise. */ - public deleteTxs(txHashes: TxHash[]): Promise { + public deleteTxs(txHashes: TxHash[], opts?: { permanently?: boolean }): Promise { for (const txHash of txHashes) { const key = txHash.toBigInt(); - this.txs.delete(key); - this.pendingTxs.delete(key); - this.minedTxs.delete(key); + if (this.txs.has(key)) { + if (this.minedTxs.has(key)) { + const blockNumber = this.minedTxs.get(key)!; + this.minedTxs.delete(key); + // Soft-delete mined transactions: remove from mined set but keep in storage + if (opts?.permanently) { + // Permanently delete mined transactions if specified + this.txs.delete(key); + } else { + this.deletedMinedTxHashes.set(key, blockNumber); + if (!this.blockToDeletedMinedTxHash.has(blockNumber)) { + this.blockToDeletedMinedTxHash.set(blockNumber, new Set()); + } + this.blockToDeletedMinedTxHash.get(blockNumber)!.add(key); + } + } else { + // Permanently delete pending transactions + this.txs.delete(key); + this.pendingTxs.delete(key); + } + } } return Promise.resolve(); @@ -196,4 +242,37 @@ export class InMemoryTxPool extends (EventEmitter as new () => TypedEventEmitter markTxsAsNonEvictable(_: TxHash[]): Promise { return Promise.resolve(); } + + /** + * Permanently deletes deleted mined transactions from blocks up to and including the specified block number. + * @param blockNumber - Block number threshold. Deleted mined txs from this block or earlier will be permanently deleted. + * @returns The number of transactions permanently deleted. + */ + public cleanupDeletedMinedTxs(blockNumber: number): Promise { + let deletedCount = 0; + const blocksToDelete: number[] = []; + + // Find all blocks up to the specified block number + for (const [block, txHashes] of this.blockToDeletedMinedTxHash.entries()) { + if (block <= blockNumber) { + // Permanently delete all transactions from this block + for (const txHash of txHashes) { + this.txs.delete(txHash); + this.deletedMinedTxHashes.delete(txHash); + deletedCount++; + } + blocksToDelete.push(block); + } + } + + // Clean up block-to-hash mapping + for (const block of blocksToDelete) { + this.blockToDeletedMinedTxHash.delete(block); + } + + if (deletedCount > 0) { + this.log.debug(`Permanently deleted ${deletedCount} deleted mined txs from blocks up to ${blockNumber}`); + } + return Promise.resolve(deletedCount); + } } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool.ts b/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool.ts index bbfc860355e3..f1011641278f 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool.ts @@ -68,7 +68,7 @@ export interface TxPool extends TypedEventEmitter { * Deletes transactions from the pool. Tx hashes that are not present are ignored. * @param txHashes - An array of tx hashes to be removed from the tx pool. */ - deleteTxs(txHashes: TxHash[]): Promise; + deleteTxs(txHashes: TxHash[], opts?: { permanently?: boolean }): Promise; /** * Gets all transactions currently in the tx pool. @@ -98,11 +98,11 @@ export interface TxPool extends TypedEventEmitter { getMinedTxHashes(): Promise<[tx: TxHash, blockNumber: number][]>; /** - * Returns whether the given tx hash is flagged as pending or mined. + * Returns whether the given tx hash is flagged as pending, mined, or deleted. * @param txHash - Hash of the tx to query. - * @returns Pending or mined depending on its status, or undefined if not found. + * @returns Pending, mined, or deleted depending on its status, or undefined if not found. */ - getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | undefined>; + getTxStatus(txHash: TxHash): Promise<'pending' | 'mined' | 'deleted' | undefined>; /** * Configure the maximum size of the tx pool @@ -118,4 +118,11 @@ export interface TxPool extends TypedEventEmitter { * @param txHashes - Hashes of the transactions to mark as non-evictible. */ markTxsAsNonEvictable(txHashes: TxHash[]): Promise; + + /** + * Permanently deletes deleted mined transactions from blocks up to and including the specified block number. + * @param blockNumber - Block number threshold. Deleted mined txs from this block or earlier will be permanently deleted. + * @returns The number of transactions permanently deleted. + */ + cleanupDeletedMinedTxs(blockNumber: number): Promise; } diff --git a/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool_test_suite.ts b/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool_test_suite.ts index 08a276bc4d11..5e3211161ced 100644 --- a/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool_test_suite.ts +++ b/yarn-project/p2p/src/mem_pools/tx_pool/tx_pool_test_suite.ts @@ -54,14 +54,24 @@ export function describeTxPool(getTxPool: () => TxPool) { expect(txsFromEvent).toEqual(expect.arrayContaining([tx2, tx3])); }); - it('removes txs from the pool', async () => { - const tx1 = await mockTx(); - - await pool.addTxs([tx1]); - await pool.deleteTxs([tx1.getTxHash()]); + it('permanently deletes pending txs and soft-deletes mined txs', async () => { + const pendingTx = await mockTx(1); + const minedTx = await mockTx(2); + + await pool.addTxs([pendingTx, minedTx]); + await pool.markAsMined([minedTx.getTxHash()], minedBlockHeader); + + // Delete a pending tx - should be permanently deleted + await pool.deleteTxs([pendingTx.getTxHash()]); + await expect(pool.getTxByHash(pendingTx.getTxHash())).resolves.toBeUndefined(); + await expect(pool.getTxStatus(pendingTx.getTxHash())).resolves.toBeUndefined(); + + // Delete a mined tx - should be soft-deleted (still in storage) + await pool.deleteTxs([minedTx.getTxHash()]); + await expect(pool.getTxByHash(minedTx.getTxHash())).resolves.toBeDefined(); + await expect(pool.getTxStatus(minedTx.getTxHash())).resolves.toEqual('deleted'); + await expect(pool.getMinedTxHashes()).resolves.toEqual([]); - await expect(pool.getTxByHash(tx1.getTxHash())).resolves.toBeFalsy(); - await expect(pool.getTxStatus(tx1.getTxHash())).resolves.toBeUndefined(); await expect(pool.getPendingTxCount()).resolves.toEqual(0); }); @@ -203,4 +213,101 @@ export function describeTxPool(getTxPool: () => TxPool) { expect(poolTxHashes).toHaveLength(4); expect(poolTxHashes).toEqual([tx4, tx1, tx3, tx2].map(tx => tx.getTxHash())); }); + + describe('soft-delete', () => { + it('soft-deletes mined txs and keeps them in storage', async () => { + const txs = await Promise.all([mockTx(1), mockTx(2), mockTx(3)]); + await pool.addTxs(txs); + + // Mark first tx as mined + await pool.markAsMined([txs[0].getTxHash()], minedBlockHeader); + + // Verify initial state + await expect(pool.getPendingTxCount()).resolves.toBe(2); + await expect(pool.getTxByHash(txs[0].getTxHash())).resolves.toBeDefined(); + await expect(pool.getTxByHash(txs[1].getTxHash())).resolves.toBeDefined(); + + // Delete mined tx - should be soft-deleted + await pool.deleteTxs([txs[0].getTxHash()]); + + // Delete pending tx - should be permanently deleted + await pool.deleteTxs([txs[1].getTxHash()]); + + // Verify mined tx still exists in storage but has 'deleted' status + await expect(pool.getTxByHash(txs[0].getTxHash())).resolves.toBeDefined(); + await expect(pool.getTxStatus(txs[0].getTxHash())).resolves.toEqual('deleted'); + + // Verify pending tx is permanently deleted + await expect(pool.getTxByHash(txs[1].getTxHash())).resolves.toBeUndefined(); + await expect(pool.getTxStatus(txs[1].getTxHash())).resolves.toBeUndefined(); + + // Verify remaining pending count + await expect(pool.getPendingTxCount()).resolves.toBe(1); + + // Verify pending hashes don't include deleted txs + const pendingHashes = await pool.getPendingTxHashes(); + expect(pendingHashes).toHaveLength(1); + expect(pendingHashes.map(h => h.toString())).toContain(txs[2].getTxHash().toString()); + }); + + it('cleans up old deleted mined transactions', async () => { + const txs = await Promise.all([mockTx(1), mockTx(2), mockTx(3)]); + await pool.addTxs(txs); + + // Mark first two as mined in block 1 + await pool.markAsMined([txs[0].getTxHash(), txs[1].getTxHash()], minedBlockHeader); + + // Soft-delete mined transactions + await pool.deleteTxs([txs[0].getTxHash(), txs[1].getTxHash()]); + + // Clean up deleted mined txs from block 1 and earlier + const deletedCount = await pool.cleanupDeletedMinedTxs(1); + + // Verify old transactions are permanently deleted + expect(deletedCount).toBe(2); + await expect(pool.getTxByHash(txs[0].getTxHash())).resolves.toBeUndefined(); + await expect(pool.getTxByHash(txs[1].getTxHash())).resolves.toBeUndefined(); + await expect(pool.getTxByHash(txs[2].getTxHash())).resolves.toBeDefined(); + }); + + it('does not clean up recent deleted mined transactions', async () => { + const txs = await Promise.all([mockTx(1), mockTx(2)]); + await pool.addTxs(txs); + + // Mark as mined in block 2 + const laterBlockHeader = BlockHeader.empty({ + globalVariables: GlobalVariables.empty({ blockNumber: 2, timestamp: 0n }), + }); + await pool.markAsMined([txs[0].getTxHash()], laterBlockHeader); + + // Soft-delete a mined transaction + await pool.deleteTxs([txs[0].getTxHash()]); + + // Try to clean up with block 1 (before the mined block) + const deletedCount = await pool.cleanupDeletedMinedTxs(1); + + // Verify no transactions were cleaned up + expect(deletedCount).toBe(0); + await expect(pool.getTxByHash(txs[0].getTxHash())).resolves.toBeDefined(); + }); + + it('restores deleted mined tx when it is mined again', async () => { + const tx = await mockTx(1); + await pool.addTxs([tx]); + + // Mark as mined + await pool.markAsMined([tx.getTxHash()], minedBlockHeader); + + // Soft-delete it + await pool.deleteTxs([tx.getTxHash()]); + await expect(pool.getTxStatus(tx.getTxHash())).resolves.toEqual('deleted'); + + // Mark as mined again (e.g., after a reorg) + await pool.markAsMined([tx.getTxHash()], minedBlockHeader); + + // Should be back to mined status + await expect(pool.getTxStatus(tx.getTxHash())).resolves.toEqual('mined'); + await expect(pool.getTxByHash(tx.getTxHash())).resolves.toBeDefined(); + }); + }); } diff --git a/yarn-project/p2p/src/msg_validators/tx_validator/double_spend_validator.test.ts b/yarn-project/p2p/src/msg_validators/tx_validator/double_spend_validator.test.ts index 2085acbab360..a4b03031ea05 100644 --- a/yarn-project/p2p/src/msg_validators/tx_validator/double_spend_validator.test.ts +++ b/yarn-project/p2p/src/msg_validators/tx_validator/double_spend_validator.test.ts @@ -1,3 +1,4 @@ +import { Fr } from '@aztec/foundation/fields'; import { mockTx, mockTxForRollup } from '@aztec/stdlib/testing'; import { type AnyTx, TX_ERROR_DUPLICATE_NULLIFIER_IN_TX, TX_ERROR_EXISTING_NULLIFIER } from '@aztec/stdlib/tx'; @@ -27,8 +28,8 @@ describe('DoubleSpendTxValidator', () => { numberOfNonRevertiblePublicCallRequests: 1, numberOfRevertiblePublicCallRequests: 0, }); - badTx.data.forPublic!.nonRevertibleAccumulatedData.nullifiers[1] = - badTx.data.forPublic!.nonRevertibleAccumulatedData.nullifiers[0]; + const nullifiers = badTx.data.forPublic!.nonRevertibleAccumulatedData.nullifiers; + nullifiers[1] = new Fr(nullifiers[0].toBigInt()); await expectInvalid(badTx, TX_ERROR_DUPLICATE_NULLIFIER_IN_TX); }); @@ -38,8 +39,8 @@ describe('DoubleSpendTxValidator', () => { numberOfRevertiblePublicCallRequests: 1, numberOfRevertibleNullifiers: 1, }); - badTx.data.forPublic!.revertibleAccumulatedData.nullifiers[1] = - badTx.data.forPublic!.revertibleAccumulatedData.nullifiers[0]; + const nullifiers = badTx.data.forPublic!.revertibleAccumulatedData.nullifiers; + nullifiers[1] = new Fr(nullifiers[0].toBigInt()); await expectInvalid(badTx, TX_ERROR_DUPLICATE_NULLIFIER_IN_TX); }); diff --git a/yarn-project/p2p/src/msg_validators/tx_validator/double_spend_validator.ts b/yarn-project/p2p/src/msg_validators/tx_validator/double_spend_validator.ts index 308cbbd0f976..66b680ba4edb 100644 --- a/yarn-project/p2p/src/msg_validators/tx_validator/double_spend_validator.ts +++ b/yarn-project/p2p/src/msg_validators/tx_validator/double_spend_validator.ts @@ -24,7 +24,7 @@ export class DoubleSpendTxValidator implements TxValidator { const nullifiers = tx instanceof Tx ? tx.data.getNonEmptyNullifiers() : tx.txEffect.nullifiers; // Ditch this tx if it has repeated nullifiers - const uniqueNullifiers = new Set(nullifiers); + const uniqueNullifiers = new Set(nullifiers.map(n => n.toBigInt())); if (uniqueNullifiers.size !== nullifiers.length) { this.#log.verbose(`Rejecting tx ${'txHash' in tx ? tx.txHash : tx.hash} for emitting duplicate nullifiers`); return { result: 'invalid', reason: [TX_ERROR_DUPLICATE_NULLIFIER_IN_TX] }; diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index 9bd8195629e0..d25f61e84351 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -453,10 +453,6 @@ export class LibP2PService extends const goodbyeHandler = reqGoodbyeHandler(this.peerManager); const blockHandler = reqRespBlockHandler(this.archiver); const statusHandler = reqRespStatusHandler(this.protocolVersion, this.worldStateSynchronizer, this.logger); - // In case P2P client doesnt'have attestation pool, - // const blockTxsHandler = this.mempools.attestationPool - // ? reqRespBlockTxsHandler(this.mempools.attestationPool, this.mempools.txPool) - // : def; const requestResponseHandlers: Partial = { [ReqRespSubProtocol.PING]: pingHandler, diff --git a/yarn-project/p2p/src/services/peer-manager/peer_manager.ts b/yarn-project/p2p/src/services/peer-manager/peer_manager.ts index 7fa8bb57632e..792efa6f4e39 100644 --- a/yarn-project/p2p/src/services/peer-manager/peer_manager.ts +++ b/yarn-project/p2p/src/services/peer-manager/peer_manager.ts @@ -883,7 +883,7 @@ export class PeerManager implements PeerManagerInterface { const response = await this.reqresp.sendRequestToPeer(peerId, ReqRespSubProtocol.AUTH, authRequest.toBuffer()); const { status } = response; if (status !== ReqRespStatus.SUCCESS) { - this.logger.debug(`Disconnecting peer ${peerId} who failed to respond auth handshake`, { + this.logger.verbose(`Disconnecting peer ${peerId} who failed to respond auth handshake`, { peerId, status: ReqRespStatus[status], }); @@ -899,7 +899,7 @@ export class PeerManager implements PeerManagerInterface { const peerStatusMessage = peerAuthResponse.status; if (!ourStatus.validate(peerStatusMessage)) { - this.logger.debug(`Disconnecting peer ${peerId} due to failed status handshake as part of auth.`, logData); + this.logger.verbose(`Disconnecting peer ${peerId} due to failed status handshake as part of auth.`, logData); this.markAuthHandshakeFailed(peerId); this.markPeerForDisconnect(peerId); return; @@ -911,12 +911,9 @@ export class PeerManager implements PeerManagerInterface { const registeredValidators = await this.epochCache.getRegisteredValidators(); const found = registeredValidators.find(v => v.toString() === sender.toString()) !== undefined; if (!found) { - this.logger.debug( + this.logger.verbose( `Disconnecting peer ${peerId} due to failed auth handshake, peer is not a registered validator.`, - { - peerId, - address: sender.toString(), - }, + { ...logData, address: sender.toString() }, ); this.markAuthHandshakeFailed(peerId); this.markPeerForDisconnect(peerId); @@ -926,8 +923,9 @@ export class PeerManager implements PeerManagerInterface { // Check to see that this validator address isn't already allocated to a different peer const peerForAddress = this.authenticatedValidatorAddressToPeerId.get(sender.toString()); if (peerForAddress !== undefined && peerForAddress.toString() !== peerIdString) { - this.logger.debug( + this.logger.verbose( `Received auth for validator ${sender.toString()} from peer ${peerIdString}, but this validator is already authenticated to peer ${peerForAddress.toString()}`, + { ...logData, address: sender.toString() }, ); return; } @@ -937,12 +935,13 @@ export class PeerManager implements PeerManagerInterface { this.authenticatedValidatorAddressToPeerId.set(sender.toString(), peerId); this.logger.info( `Successfully completed auth handshake with peer ${peerId}, validator address ${sender.toString()}`, - logData, + { ...logData, address: sender.toString() }, ); } catch (err: any) { //TODO: maybe hard ban these peers in the future - this.logger.debug(`Disconnecting peer ${peerId} due to error during auth handshake: ${err.message ?? err}`, { + this.logger.verbose(`Disconnecting peer ${peerId} due to error during auth handshake: ${err.message}`, { peerId, + err, }); this.markAuthHandshakeFailed(peerId); this.markPeerForDisconnect(peerId); diff --git a/yarn-project/p2p/src/services/reqresp/interface.ts b/yarn-project/p2p/src/services/reqresp/interface.ts index 68f838f0a9d1..0666359d90a3 100644 --- a/yarn-project/p2p/src/services/reqresp/interface.ts +++ b/yarn-project/p2p/src/services/reqresp/interface.ts @@ -122,27 +122,6 @@ export type SubProtocolMap = { >; }; -/** - * Default handler for unimplemented sub protocols, this SHOULD be overwritten - * by the service, but is provided as a fallback - */ -export const defaultHandler = (_msg: any): Promise => { - return Promise.resolve(Buffer.from('unimplemented')); -}; - -/** - * Default sub protocol handlers - this SHOULD be overwritten by the service, - */ -export const DEFAULT_SUB_PROTOCOL_HANDLERS: ReqRespSubProtocolHandlers = { - [ReqRespSubProtocol.PING]: defaultHandler, - [ReqRespSubProtocol.STATUS]: defaultHandler, - [ReqRespSubProtocol.TX]: defaultHandler, - [ReqRespSubProtocol.GOODBYE]: defaultHandler, - [ReqRespSubProtocol.BLOCK]: defaultHandler, - [ReqRespSubProtocol.AUTH]: defaultHandler, - [ReqRespSubProtocol.BLOCK_TXS]: defaultHandler, -}; - /** * The Request Response Pair interface defines the methods that each * request response pair must implement diff --git a/yarn-project/p2p/src/services/reqresp/reqresp.ts b/yarn-project/p2p/src/services/reqresp/reqresp.ts index 9f12d44e7790..387a91382236 100644 --- a/yarn-project/p2p/src/services/reqresp/reqresp.ts +++ b/yarn-project/p2p/src/services/reqresp/reqresp.ts @@ -26,7 +26,6 @@ import { import { BatchConnectionSampler } from './connection-sampler/batch_connection_sampler.js'; import { ConnectionSampler, RandomSampler } from './connection-sampler/connection_sampler.js'; import { - DEFAULT_SUB_PROTOCOL_HANDLERS, DEFAULT_SUB_PROTOCOL_VALIDATORS, type ReqRespInterface, type ReqRespResponse, @@ -64,9 +63,8 @@ export class ReqResp implements ReqRespInterface { private individualRequestTimeoutMs: number = DEFAULT_INDIVIDUAL_REQUEST_TIMEOUT_MS; private dialTimeoutMs: number = DEFAULT_REQRESP_DIAL_TIMEOUT_MS; - // Warning, if the `start` function is not called as the parent class constructor, then the default sub protocol handlers will be used ( not good ) - private subProtocolHandlers: ReqRespSubProtocolHandlers = DEFAULT_SUB_PROTOCOL_HANDLERS; - private subProtocolValidators: ReqRespSubProtocolValidators = DEFAULT_SUB_PROTOCOL_VALIDATORS; + private subProtocolHandlers: Partial = {}; + private subProtocolValidators: Partial = {}; private connectionSampler: ConnectionSampler; private rateLimiter: RequestResponseRateLimiter; @@ -117,11 +115,12 @@ export class ReqResp implements ReqRespInterface { * Start the reqresp service */ async start(subProtocolHandlers: ReqRespSubProtocolHandlers, subProtocolValidators: ReqRespSubProtocolValidators) { - this.subProtocolHandlers = subProtocolHandlers; - this.subProtocolValidators = subProtocolValidators; + Object.assign(this.subProtocolHandlers, subProtocolHandlers); + Object.assign(this.subProtocolValidators, subProtocolValidators); // Register all protocol handlers - for (const subProtocol of Object.keys(this.subProtocolHandlers)) { + for (const subProtocol of Object.keys(subProtocolHandlers)) { + this.logger.debug(`Registering handler for sub protocol ${subProtocol}`); await this.libp2p.handle( subProtocol, (data: IncomingStreamData) => @@ -140,6 +139,7 @@ export class ReqResp implements ReqRespInterface { ): Promise { this.subProtocolHandlers[subProtocol] = handler; this.subProtocolValidators[subProtocol] = validator; + this.logger.debug(`Registering handler for sub protocol ${subProtocol}`); await this.libp2p.handle( subProtocol, (data: IncomingStreamData) => @@ -209,7 +209,7 @@ export class ReqResp implements ReqRespInterface { maxPeers = Math.max(10, Math.ceil(requests.length / 3)), maxRetryAttempts = 3, ): Promise[]> { - const responseValidator = this.subProtocolValidators[subProtocol]; + const responseValidator = this.subProtocolValidators[subProtocol] ?? DEFAULT_SUB_PROTOCOL_VALIDATORS[subProtocol]; const responses: InstanceType[] = new Array(requests.length); const requestBuffers = requests.map(req => req.toBuffer()); @@ -594,7 +594,11 @@ export class ReqResp implements ReqRespInterface { * * */ private async processStream(protocol: ReqRespSubProtocol, { stream, connection }: IncomingStreamData): Promise { - const handler = this.subProtocolHandlers[protocol]!; + const handler = this.subProtocolHandlers[protocol]; + if (!handler) { + throw new Error(`No handler defined for reqresp subprotocol ${protocol}`); + } + const snappy = this.snappyTransform; const SUCCESS = Uint8Array.of(ReqRespStatus.SUCCESS); diff --git a/yarn-project/p2p/src/services/tx_provider.ts b/yarn-project/p2p/src/services/tx_provider.ts index 66091f33594a..0e2459ab4699 100644 --- a/yarn-project/p2p/src/services/tx_provider.ts +++ b/yarn-project/p2p/src/services/tx_provider.ts @@ -45,7 +45,7 @@ export class TxProvider implements ITxProvider { if (tx === undefined) { missingTxs.push(txHashes[i]); } else { - txs.push(tx.setTxHash(txHashes[i])); + txs.push(tx); } } diff --git a/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts b/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts index 131b6361d845..f979280ef780 100644 --- a/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts +++ b/yarn-project/p2p/src/testbench/p2p_client_testbench_worker.ts @@ -54,6 +54,7 @@ function mockTxPool(): TxPool { hasTxs: () => Promise.resolve([]), updateConfig: () => {}, markTxsAsNonEvictable: () => Promise.resolve(), + cleanupDeletedMinedTxs: () => Promise.resolve(0), }; return Object.assign(new EventEmitter(), pool); } diff --git a/yarn-project/prover-node/src/config.ts b/yarn-project/prover-node/src/config.ts index c2ff0a9d9be6..b4d54cfe644d 100644 --- a/yarn-project/prover-node/src/config.ts +++ b/yarn-project/prover-node/src/config.ts @@ -118,7 +118,7 @@ export function getProverNodeAgentConfigFromEnv(): ProverAgentConfig & BBConfig }; } -function createKeyStoreFromWeb3Signer(config: ProverNodeConfig) { +function createKeyStoreFromWeb3Signer(config: ProverNodeConfig): KeyStore | undefined { // If we don't have a valid prover Id then we can't build a valid key store with remote signers if (config.proverId === undefined) { return undefined; @@ -144,7 +144,7 @@ function createKeyStoreFromWeb3Signer(config: ProverNodeConfig) { return keyStore; } -function createKeyStoreFromPublisherKeys(config: ProverNodeConfig) { +function createKeyStoreFromPublisherKeys(config: ProverNodeConfig): KeyStore | undefined { // Extract the publisher keys from the provided config. const publisherKeys = config.publisherPrivateKeys ? config.publisherPrivateKeys.map(k => ethPrivateKeySchema.parse(k.getValue())) @@ -174,7 +174,7 @@ function createKeyStoreFromPublisherKeys(config: ProverNodeConfig) { return keyStore; } -export function createKeyStoreForProver(config: ProverNodeConfig) { +export function createKeyStoreForProver(config: ProverNodeConfig): KeyStore | undefined { if (config.web3SignerUrl !== undefined && config.web3SignerUrl.length > 0) { return createKeyStoreFromWeb3Signer(config); } diff --git a/yarn-project/prover-node/src/factory.ts b/yarn-project/prover-node/src/factory.ts index 10e8998954f2..79d734a11f6a 100644 --- a/yarn-project/prover-node/src/factory.ts +++ b/yarn-project/prover-node/src/factory.ts @@ -73,6 +73,8 @@ export async function createProverNode( } } + await keyStoreManager?.validateSigners(); + // Extract the prover signers from the key store and verify that we have one. const proverSigners = keyStoreManager?.createProverSigners(); @@ -141,7 +143,7 @@ export async function createProverNode( deps.publisherFactory ?? new ProverPublisherFactory(config, { rollupContract, - publisherManager: new PublisherManager(l1TxUtils), + publisherManager: new PublisherManager(l1TxUtils, config), telemetry, }); diff --git a/yarn-project/prover-node/src/prover-node-publisher.ts b/yarn-project/prover-node/src/prover-node-publisher.ts index ad4ba4af0126..afeef3991d5c 100644 --- a/yarn-project/prover-node/src/prover-node-publisher.ts +++ b/yarn-project/prover-node/src/prover-node-publisher.ts @@ -39,6 +39,7 @@ export type L1SubmitEpochProofArgs = { }; export class ProverNodePublisher { + private enabled: boolean; private interruptibleSleep = new InterruptibleSleep(); private sleepTimeMs: number; private interrupted = false; @@ -58,6 +59,7 @@ export class ProverNodePublisher { telemetry?: TelemetryClient; }, ) { + this.enabled = config.publisherEnabled ?? true; this.sleepTimeMs = config?.l1PublishRetryIntervalMS ?? 60_000; const telemetry = deps.telemetry ?? getTelemetryClient(); @@ -103,6 +105,12 @@ export class ProverNodePublisher { }): Promise { const { epochNumber, fromBlock, toBlock } = args; const ctx = { epochNumber, fromBlock, toBlock }; + + if (!this.enabled) { + this.log.warn(`Publishing L1 txs is disabled`); + return false; + } + if (!this.interrupted) { const timer = new Timer(); // Validate epoch proof range and hashes are correct before submitting diff --git a/yarn-project/sequencer-client/src/client/sequencer-client.ts b/yarn-project/sequencer-client/src/client/sequencer-client.ts index 2f64bbd74dfc..156f0243f235 100644 --- a/yarn-project/sequencer-client/src/client/sequencer-client.ts +++ b/yarn-project/sequencer-client/src/client/sequencer-client.ts @@ -84,7 +84,7 @@ export class SequencerClient { telemetry: telemetryClient, } = deps; const { l1RpcUrls: rpcUrls, l1ChainId: chainId } = config; - const log = createLogger('sequencer-client'); + const log = createLogger('sequencer'); const publicClient = getPublicClient(config); const l1TxUtils = deps.l1TxUtils; const l1Metrics = new L1Metrics( @@ -92,7 +92,7 @@ export class SequencerClient { publicClient, l1TxUtils.map(x => x.getSenderAddress()), ); - const publisherManager = new PublisherManager(l1TxUtils); + const publisherManager = new PublisherManager(l1TxUtils, config); const rollupContract = new RollupContract(publicClient, config.l1Contracts.rollupAddress.toString()); const [l1GenesisTime, slotDuration] = await Promise.all([ rollupContract.getL1GenesisTime(), @@ -133,6 +133,7 @@ export class SequencerClient { dateProvider: deps.dateProvider, publisherManager, nodeKeyStore: NodeKeystoreAdapter.fromKeyStoreManager(deps.nodeKeyStore), + logger: log, }); const globalsBuilder = new GlobalVariableBuilder(config); @@ -178,6 +179,7 @@ export class SequencerClient { rollupContract, { ...config, maxL1TxInclusionTimeIntoSlot, maxL2BlockGas: sequencerManaLimit }, telemetryClient, + log, ); await sequencer.init(); diff --git a/yarn-project/sequencer-client/src/publisher/config.ts b/yarn-project/sequencer-client/src/publisher/config.ts index 3bb646417c5a..d8edf16735d8 100644 --- a/yarn-project/sequencer-client/src/publisher/config.ts +++ b/yarn-project/sequencer-client/src/publisher/config.ts @@ -5,7 +5,12 @@ import { l1ReaderConfigMappings, l1TxUtilsConfigMappings, } from '@aztec/ethereum'; -import { type ConfigMappingsType, SecretValue, getConfigFromMappings } from '@aztec/foundation/config'; +import { + type ConfigMappingsType, + SecretValue, + booleanConfigHelper, + getConfigFromMappings, +} from '@aztec/foundation/config'; import { EthAddress } from '@aztec/foundation/eth-address'; /** @@ -21,6 +26,9 @@ export type TxSenderConfig = L1ReaderConfig & { * Publisher addresses to be used with a remote signer */ publisherAddresses?: EthAddress[]; + + /** Whether this publisher is enabled */ + publisherEnabled?: boolean; }; /** @@ -28,10 +36,10 @@ export type TxSenderConfig = L1ReaderConfig & { */ export type PublisherConfig = L1TxUtilsConfig & BlobSinkConfig & { - /** - * The interval to wait between publish retries. - */ + /** The interval to wait between publish retries. */ l1PublishRetryIntervalMS: number; + /** True to use publishers in invalid states (timed out, cancelled, etc) if no other is available */ + publisherAllowInvalidStates?: boolean; }; export const getTxSenderConfigMappings: ( @@ -43,7 +51,7 @@ export const getTxSenderConfigMappings: ( description: 'The private keys to be used by the publisher.', parseEnv: (val: string) => val.split(',').map(key => new SecretValue(`0x${key.replace('0x', '')}`)), defaultValue: [], - fallback: scope === 'PROVER' ? ['PROVER_PUBLISHER_PRIVATE_KEY'] : ['SEQ_PUBLISHER_PRIVATE_KEY'], + fallback: [scope === 'PROVER' ? `PROVER_PUBLISHER_PRIVATE_KEY` : `SEQ_PUBLISHER_PRIVATE_KEY`], }, publisherAddresses: { env: scope === 'PROVER' ? `PROVER_PUBLISHER_ADDRESSES` : `SEQ_PUBLISHER_ADDRESSES`, @@ -51,6 +59,11 @@ export const getTxSenderConfigMappings: ( parseEnv: (val: string) => val.split(',').map(address => EthAddress.fromString(address)), defaultValue: [], }, + publisherEnabled: { + env: scope === 'PROVER' ? `PROVER_PUBLISHER_ENABLED` : `SEQ_PUBLISHER_ENABLED`, + description: 'Whether this L1 publisher is enabled', + ...booleanConfigHelper(true), + }, }); export function getTxSenderConfigFromEnv(scope: 'PROVER' | 'SEQ'): Omit { @@ -66,6 +79,11 @@ export const getPublisherConfigMappings: ( defaultValue: 1000, description: 'The interval to wait between publish retries.', }, + publisherAllowInvalidStates: { + description: 'True to use publishers in invalid states (timed out, cancelled, etc) if no other is available', + env: scope === `PROVER` ? `PROVER_PUBLISHER_ALLOW_INVALID_STATES` : `SEQ_PUBLISHER_ALLOW_INVALID_STATES`, + ...booleanConfigHelper(false), + }, ...l1TxUtilsConfigMappings, ...blobSinkConfigMapping, }); diff --git a/yarn-project/sequencer-client/src/publisher/index.ts b/yarn-project/sequencer-client/src/publisher/index.ts index c222a1eabd5a..08f9a6e5ca08 100644 --- a/yarn-project/sequencer-client/src/publisher/index.ts +++ b/yarn-project/sequencer-client/src/publisher/index.ts @@ -1,4 +1,4 @@ -export { SequencerPublisher, SignalType } from './sequencer-publisher.js'; +export { SequencerPublisher } from './sequencer-publisher.js'; export { SequencerPublisherFactory } from './sequencer-publisher-factory.js'; // Used for tests diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher-factory.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher-factory.ts index 30da032bc387..57b7508f8bfe 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher-factory.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher-factory.ts @@ -1,4 +1,4 @@ -import { EthAddress } from '@aztec/aztec.js'; +import { EthAddress, type Logger, createLogger } from '@aztec/aztec.js'; import type { BlobSinkClientInterface } from '@aztec/blob-sink/client'; import type { EpochCache } from '@aztec/epoch-cache'; import type { GovernanceProposerContract, PublisherFilter, PublisherManager, RollupContract } from '@aztec/ethereum'; @@ -10,7 +10,7 @@ import { NodeKeystoreAdapter } from '@aztec/validator-client'; import type { SequencerClientConfig } from '../config.js'; import { SequencerPublisherMetrics } from './sequencer-publisher-metrics.js'; -import { SequencerPublisher } from './sequencer-publisher.js'; +import { type Action, SequencerPublisher } from './sequencer-publisher.js'; export type AttestorPublisherPair = { attestorAddress: EthAddress; @@ -19,6 +19,12 @@ export type AttestorPublisherPair = { export class SequencerPublisherFactory { private publisherMetrics: SequencerPublisherMetrics; + + /** Stores the last slot in which every action was carried out by a publisher */ + private lastActions: Partial> = {}; + + private logger: Logger; + constructor( private sequencerConfig: SequencerClientConfig, private deps: { @@ -31,9 +37,11 @@ export class SequencerPublisherFactory { governanceProposerContract: GovernanceProposerContract; slashFactoryContract: SlashFactoryContract; nodeKeyStore: NodeKeystoreAdapter; + logger?: Logger; }, ) { this.publisherMetrics = new SequencerPublisherMetrics(deps.telemetry, 'SequencerPublisher'); + this.logger = deps.logger ?? createLogger('sequencer'); } /** * Creates a new SequencerPublisher instance. @@ -69,6 +77,8 @@ export class SequencerPublisherFactory { slashFactoryContract: this.deps.slashFactoryContract, dateProvider: this.deps.dateProvider, metrics: this.publisherMetrics, + lastActions: this.lastActions, + log: this.logger.createChild('publisher'), }); return { diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts index 7cbb6c7a408b..fa950408321a 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.test.ts @@ -155,10 +155,10 @@ describe('SequencerPublisher', () => { slashFactoryContract, dateProvider: new TestDateProvider(), metrics: l1Metrics, + lastActions: {}, }); - (publisher as any)['l1TxUtils'] = l1TxUtils; - publisher as any; + publisher.l1TxUtils = l1TxUtils; l1TxUtils.sendAndMonitorTransaction.mockResolvedValue({ receipt: proposeTxReceipt, @@ -224,15 +224,7 @@ describe('SequencerPublisher', () => { }); }; - it('bundles propose and vote tx to l1', async () => { - const kzg = Blob.getViemKzgInstance(); - - const expectedBlobs = await Blob.getBlobsPerBlock(l2Block.body.toBlobFields()); - - // Expect the blob sink server to receive the blobs - await runBlobSinkServer(expectedBlobs); - - expect(await publisher.enqueueProposeL2Block(l2Block)).toEqual(true); + const mockGovernancePayload = () => { const govPayload = EthAddress.random(); const voteSig = Signature.random(); governanceProposerContract.getRoundInfo.mockResolvedValue({ @@ -248,7 +240,23 @@ describe('SequencerPublisher', () => { args: [govPayload.toString(), voteSig.toViemSignature()], }), }); + return { govPayload, voteSig }; + }; + + it('bundles propose and vote tx to l1', async () => { + const kzg = Blob.getViemKzgInstance(); + + const expectedBlobs = await Blob.getBlobsPerBlock(l2Block.body.toBlobFields()); + + // Expect the blob sink server to receive the blobs + await runBlobSinkServer(expectedBlobs); + + expect(await publisher.enqueueProposeL2Block(l2Block)).toEqual(true); + + const { govPayload, voteSig } = mockGovernancePayload(); + rollup.getProposerAt.mockResolvedValueOnce(mockForwarderAddress); + expect( await publisher.enqueueGovernanceCastSignal( govPayload, diff --git a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts index 9cce2b3386f6..80ab3887b147 100644 --- a/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts +++ b/yarn-project/sequencer-client/src/publisher/sequencer-publisher.ts @@ -28,7 +28,7 @@ import { sumBigint } from '@aztec/foundation/bigint'; import { toHex as toPaddedHex } from '@aztec/foundation/bigint-buffer'; import { EthAddress } from '@aztec/foundation/eth-address'; import type { Fr } from '@aztec/foundation/fields'; -import { createLogger } from '@aztec/foundation/log'; +import { type Logger, createLogger } from '@aztec/foundation/log'; import { bufferToHex } from '@aztec/foundation/string'; import { DateProvider, Timer } from '@aztec/foundation/timer'; import { EmpireBaseAbi, ErrorsAbi, RollupAbi } from '@aztec/l1-artifacts'; @@ -62,11 +62,6 @@ type L1ProcessArgs = { attestations?: CommitteeAttestation[]; }; -export enum SignalType { - GOVERNANCE, - SLASHING, -} - export const Actions = [ 'invalidate-by-invalid-attestation', 'invalidate-by-insufficient-attestations', @@ -78,8 +73,11 @@ export const Actions = [ 'vote-offenses', 'execute-slash', ] as const; + export type Action = (typeof Actions)[number]; +type GovernanceSignalAction = Extract; + // Sorting for actions such that invalidations go before proposals, and proposals go before votes export const compareActions = (a: Action, b: Action) => Actions.indexOf(a) - Actions.indexOf(b); @@ -104,6 +102,7 @@ interface RequestWithExpiry { } export class SequencerPublisher { + private enabled: boolean; private interrupted = false; private metrics: SequencerPublisherMetrics; public epochCache: EpochCache; @@ -111,12 +110,9 @@ export class SequencerPublisher { protected governanceLog = createLogger('sequencer:publisher:governance'); protected slashingLog = createLogger('sequencer:publisher:slashing'); - private myLastSignals: Record = { - [SignalType.GOVERNANCE]: 0n, - [SignalType.SLASHING]: 0n, - }; + protected lastActions: Partial> = {}; - protected log = createLogger('sequencer:publisher'); + protected log: Logger; protected ethereumSlotDuration: bigint; private blobSinkClient: BlobSinkClientInterface; @@ -152,10 +148,15 @@ export class SequencerPublisher { epochCache: EpochCache; dateProvider: DateProvider; metrics: SequencerPublisherMetrics; + lastActions: Partial>; + log?: Logger; }, ) { + this.enabled = config.publisherEnabled ?? true; + this.log = deps.log ?? createLogger('sequencer:publisher'); this.ethereumSlotDuration = BigInt(config.ethereumSlotDuration); this.epochCache = deps.epochCache; + this.lastActions = deps.lastActions; this.blobSinkClient = deps.blobSinkClient ?? createBlobSinkClient(config, { logger: createLogger('sequencer:blob-sink:client') }); @@ -201,6 +202,14 @@ export class SequencerPublisher { * - undefined if no valid requests are found OR the tx failed to send. */ public async sendRequests() { + if (!this.enabled) { + this.log.warn(`Sending L1 txs is disabled`, { + requestsDiscarded: this.requests.map(r => r.action), + }); + this.requests = []; + return undefined; + } + const requestsToProcess = [...this.requests]; this.requests = []; if (this.interrupted) { @@ -528,13 +537,14 @@ export class SequencerPublisher { private async enqueueCastSignalHelper( slotNumber: bigint, timestamp: bigint, - signalType: SignalType, + signalType: GovernanceSignalAction, payload: EthAddress, base: IEmpireBase, signerAddress: EthAddress, signer: (msg: TypedDataDefinition) => Promise<`0x${string}`>, ): Promise { - if (this.myLastSignals[signalType] >= slotNumber) { + if (this.lastActions[signalType] && this.lastActions[signalType] === slotNumber) { + this.log.debug(`Skipping duplicate vote cast signal ${signalType} for slot ${slotNumber}`); return false; } if (payload.equals(EthAddress.ZERO)) { @@ -551,10 +561,9 @@ export class SequencerPublisher { return false; } - const cachedLastVote = this.myLastSignals[signalType]; - this.myLastSignals[signalType] = slotNumber; - - const action = signalType === SignalType.GOVERNANCE ? 'governance-signal' : 'empire-slashing-signal'; + const cachedLastVote = this.lastActions[signalType]; + this.lastActions[signalType] = slotNumber; + const action = signalType; const request = await base.createSignalRequestWithSignature( payload.toString(), @@ -597,7 +606,7 @@ export class SequencerPublisher { `Signaling in [${action}] for ${payload} at slot ${slotNumber} in round ${round} failed`, logData, ); - this.myLastSignals[signalType] = cachedLastVote; + this.lastActions[signalType] = cachedLastVote; return false; } else { this.log.info( @@ -627,7 +636,7 @@ export class SequencerPublisher { return this.enqueueCastSignalHelper( slotNumber, timestamp, - SignalType.GOVERNANCE, + 'governance-signal', governancePayload, this.govProposerContract, signerAddress, @@ -661,7 +670,7 @@ export class SequencerPublisher { await this.enqueueCastSignalHelper( slotNumber, timestamp, - SignalType.SLASHING, + 'empire-slashing-signal', action.payload, this.slashingProposerContract, signerAddress, @@ -842,16 +851,24 @@ export class SequencerPublisher { } private async simulateAndEnqueueRequest( - action: RequestWithExpiry['action'], + action: Action, request: L1TxRequest, checkSuccess: (receipt: TransactionReceipt) => boolean | undefined, slotNumber: bigint, timestamp: bigint, ) { const logData = { slotNumber, timestamp, gasLimit: undefined as bigint | undefined }; - let gasUsed: bigint; + if (this.lastActions[action] && this.lastActions[action] === slotNumber) { + this.log.debug(`Skipping duplicate action ${action} for slot ${slotNumber}`); + return false; + } - this.log.debug(`Simulating ${action}`, logData); + const cachedLastActionSlot = this.lastActions[action]; + this.lastActions[action] = slotNumber; + + this.log.debug(`Simulating ${action} for slot ${slotNumber}`, logData); + + let gasUsed: bigint; try { ({ gasUsed } = await this.l1TxUtils.simulate(request, { time: timestamp }, [], ErrorsAbi)); // TODO(palla/slash): Check the timestamp logic this.log.verbose(`Simulation for ${action} succeeded`, { ...logData, request, gasUsed }); @@ -875,6 +892,7 @@ export class SequencerPublisher { const success = result && result.receipt && result.receipt.status === 'success' && checkSuccess(result.receipt); if (!success) { this.log.warn(`Action ${action} at ${slotNumber} failed`, { ...result, ...logData }); + this.lastActions[action] = cachedLastActionSlot; } else { this.log.info(`Action ${action} at ${slotNumber} succeeded`, { ...result, ...logData }); } diff --git a/yarn-project/sequencer-client/src/sequencer/metrics.ts b/yarn-project/sequencer-client/src/sequencer/metrics.ts index fd91eaa85c7c..2b3538193c6f 100644 --- a/yarn-project/sequencer-client/src/sequencer/metrics.ts +++ b/yarn-project/sequencer-client/src/sequencer/metrics.ts @@ -2,21 +2,19 @@ import type { EthAddress } from '@aztec/aztec.js'; import type { RollupContract } from '@aztec/ethereum'; import { Attributes, - type BatchObservableResult, type Gauge, type Histogram, type Meter, Metrics, - type ObservableGauge, type TelemetryClient, type Tracer, type UpDownCounter, ValueType, } from '@aztec/telemetry-client'; -import { formatUnits } from 'viem'; +import { type Hex, formatUnits } from 'viem'; -import { type SequencerState, type SequencerStateCallback, sequencerStateToNumber } from './utils.js'; +import type { SequencerState } from './utils.js'; export class SequencerMetrics { public readonly tracer: Tracer; @@ -26,9 +24,6 @@ export class SequencerMetrics { private blockBuildDuration: Histogram; private blockBuildManaPerSecond: Gauge; private stateTransitionBufferDuration: Histogram; - private currentBlockNumber: Gauge; - private currentBlockSize: Gauge; - private blockBuilderInsertions: Histogram; // these are gauges because for individual sequencers building a block is not something that happens often enough to warrant a histogram private timeToCollectAttestations: Gauge; @@ -36,18 +31,15 @@ export class SequencerMetrics { private requiredAttestions: Gauge; private collectedAttestions: Gauge; - private rewards: ObservableGauge; + private rewards: Gauge; private slots: UpDownCounter; private filledSlots: UpDownCounter; - private missedSlots: UpDownCounter; private lastSeenSlot?: bigint; constructor( client: TelemetryClient, - getState: SequencerStateCallback, - private coinbase: EthAddress, private rollup: RollupContract, name = 'Sequencer', ) { @@ -78,35 +70,7 @@ export class SequencerMetrics { }, ); - const currentState = this.meter.createObservableGauge(Metrics.SEQUENCER_CURRENT_STATE, { - description: 'Current state of the sequencer', - }); - - currentState.addCallback(observer => { - observer.observe(sequencerStateToNumber(getState())); - }); - - this.currentBlockNumber = this.meter.createGauge(Metrics.SEQUENCER_CURRENT_BLOCK_NUMBER, { - description: 'Current block number', - valueType: ValueType.INT, - }); - - this.currentBlockSize = this.meter.createGauge(Metrics.SEQUENCER_CURRENT_BLOCK_SIZE, { - description: 'Current block size', - valueType: ValueType.INT, - }); - - this.blockBuilderInsertions = this.meter.createHistogram(Metrics.SEQUENCER_BLOCK_BUILD_INSERTION_TIME, { - description: 'Timer for tree insertions performed by the block builder', - unit: 'us', - valueType: ValueType.INT, - }); - // Init gauges and counters - this.setCurrentBlock(0, 0); - this.blockCounter.add(0, { - [Attributes.STATUS]: 'cancelled', - }); this.blockCounter.add(0, { [Attributes.STATUS]: 'failed', }); @@ -114,7 +78,7 @@ export class SequencerMetrics { [Attributes.STATUS]: 'built', }); - this.rewards = this.meter.createObservableGauge(Metrics.SEQUENCER_CURRENT_BLOCK_REWARDS, { + this.rewards = this.meter.createGauge(Metrics.SEQUENCER_CURRENT_BLOCK_REWARDS, { valueType: ValueType.DOUBLE, description: 'The rewards earned', }); @@ -124,16 +88,15 @@ export class SequencerMetrics { description: 'The number of slots this sequencer was selected for', }); + /** + * NOTE: we do not track missed slots as a separate metric. That would be difficult to determine + * Instead, use a computed metric, `slots - filledSlots` to get the number of slots a sequencer has missed. + */ this.filledSlots = this.meter.createUpDownCounter(Metrics.SEQUENCER_FILLED_SLOT_COUNT, { valueType: ValueType.INT, description: 'The number of slots this sequencer has filled', }); - this.missedSlots = this.meter.createUpDownCounter(Metrics.SEQUENCER_MISSED_SLOT_COUNT, { - valueType: ValueType.INT, - description: 'The number of slots this sequencer has missed to fill', - }); - this.timeToCollectAttestations = this.meter.createGauge(Metrics.SEQUENCER_COLLECT_ATTESTATIONS_DURATION, { description: 'The time spent collecting attestations from committee members', unit: 'ms', @@ -160,28 +123,6 @@ export class SequencerMetrics { }); } - public setCoinbase(coinbase: EthAddress) { - this.coinbase = coinbase; - } - - public start() { - this.meter.addBatchObservableCallback(this.observe, [this.rewards]); - } - - public stop() { - this.meter.removeBatchObservableCallback(this.observe, [this.rewards]); - } - - private observe = async (observer: BatchObservableResult): Promise => { - let rewards = 0n; - rewards = await this.rollup.getSequencerRewards(this.coinbase); - - const fmt = parseFloat(formatUnits(rewards, 18)); - observer.observe(this.rewards, fmt, { - [Attributes.COINBASE]: this.coinbase.toString(), - }); - }; - public recordRequiredAttestations(requiredAttestationsCount: number, allowanceMs: number) { this.requiredAttestions.record(requiredAttestationsCount); this.allowanceToCollectAttestations.record(Math.ceil(allowanceMs)); @@ -196,17 +137,6 @@ export class SequencerMetrics { this.timeToCollectAttestations.record(Math.ceil(durationMs)); } - recordBlockBuilderTreeInsertions(timeUs: number) { - this.blockBuilderInsertions.record(Math.ceil(timeUs)); - } - - recordCancelledBlock() { - this.blockCounter.add(1, { - [Attributes.STATUS]: 'cancelled', - }); - this.setCurrentBlock(0, 0); - } - recordBuiltBlock(buildDurationMs: number, totalMana: number) { this.blockCounter.add(1, { [Attributes.STATUS]: 'built', @@ -219,11 +149,6 @@ export class SequencerMetrics { this.blockCounter.add(1, { [Attributes.STATUS]: 'failed', }); - this.setCurrentBlock(0, 0); - } - - recordNewBlock(blockNumber: number, txCount: number) { - this.setCurrentBlock(blockNumber, txCount); } recordStateTransitionBufferMs(durationMs: number, state: SequencerState) { @@ -232,36 +157,35 @@ export class SequencerMetrics { }); } - observeSlotChange(slot: bigint | undefined, proposer: string) { + incOpenSlot(slot: bigint, proposer: string) { // sequencer went through the loop a second time. Noop if (slot === this.lastSeenSlot) { return; } - if (typeof this.lastSeenSlot === 'bigint') { - this.missedSlots.add(1, { - [Attributes.BLOCK_PROPOSER]: proposer, - }); - } - - if (typeof slot === 'bigint') { - this.slots.add(1, { - [Attributes.BLOCK_PROPOSER]: proposer, - }); - } + this.slots.add(1, { + [Attributes.BLOCK_PROPOSER]: proposer, + }); this.lastSeenSlot = slot; } - incFilledSlot(proposer: string) { + async incFilledSlot(proposer: string, coinbase: Hex | EthAddress | undefined): Promise { this.filledSlots.add(1, { [Attributes.BLOCK_PROPOSER]: proposer, }); this.lastSeenSlot = undefined; - } - private setCurrentBlock(blockNumber: number, txCount: number) { - this.currentBlockNumber.record(blockNumber); - this.currentBlockSize.record(txCount); + if (coinbase) { + try { + const rewards = await this.rollup.getSequencerRewards(coinbase); + const fmt = parseFloat(formatUnits(rewards, 18)); + this.rewards.record(fmt, { + [Attributes.COINBASE]: coinbase.toString(), + }); + } catch { + // no-op + } + } } } diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.ts index e2ae0d3fa54c..57657d999cb8 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.ts @@ -1,5 +1,5 @@ import type { L2Block } from '@aztec/aztec.js'; -import { INITIAL_L2_BLOCK_NUM } from '@aztec/constants'; +import { BLOBS_PER_BLOCK, FIELDS_PER_BLOB, INITIAL_L2_BLOCK_NUM } from '@aztec/constants'; import type { EpochCache } from '@aztec/epoch-cache'; import { FormattedViemError, NoCommitteeError, type RollupContract } from '@aztec/ethereum'; import { omit, pick } from '@aztec/foundation/collection'; @@ -127,15 +127,7 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter this.state, coinbase, this.rollupContract, 'Sequencer'); - + this.metrics = new SequencerMetrics(telemetry, this.rollupContract, 'Sequencer'); // Initialize config this.updateConfig(this.config); } @@ -220,7 +212,6 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter TypedEventEmitter { this.log.info(`Stopping sequencer`); - this.metrics.stop(); this.publisher?.interrupt(); await this.validatorClient?.stop(); await this.runningPromise?.stop(); @@ -361,8 +351,6 @@ export class Sequencer extends (EventEmitter as new () => TypedEventEmitter TypedEventEmitter TypedEventEmitter TypedEventEmitter TypedEventEmitter { expect(failed).toEqual([]); }); + it('does not exceed max blob fields limit', async function () { + // Create 3 private-only transactions + const txs = await Promise.all(Array.from([1, 2, 3], seed => mockPrivateOnlyTx({ seed }))); + + // First, let's process one transaction to see how many blob fields it actually has + const [testProcessed] = await processor.process([txs[0]]); + const actualBlobFields = testProcessed[0].txEffect.toBlobFields().length; + + // Set the limit to allow only 2 transactions + // If each tx has `actualBlobFields` fields, we set limit to allow 2 but not 3 + const maxBlobFields = actualBlobFields * 2; + + // Process all 3 transactions with the blob field limit + const [processed, failed] = await processor.process(txs, { maxBlobFields }); + + // Should only process 2 transactions due to blob field limit + expect(processed.length).toBe(2); + expect(processed[0].hash).toEqual(txs[0].getTxHash()); + expect(processed[1].hash).toEqual(txs[1].getTxHash()); + expect(failed).toEqual([]); + }); + it('does not send a transaction to the prover if pre validation fails', async function () { const tx = await mockPrivateOnlyTx(); diff --git a/yarn-project/simulator/src/public/public_processor/public_processor.ts b/yarn-project/simulator/src/public/public_processor/public_processor.ts index 70b208020e24..47ed03d1ebb7 100644 --- a/yarn-project/simulator/src/public/public_processor/public_processor.ts +++ b/yarn-project/simulator/src/public/public_processor/public_processor.ts @@ -27,7 +27,6 @@ import { StateReference, Tx, TxExecutionPhase, - type TxValidator, makeProcessedTxFromPrivateOnlyTx, makeProcessedTxFromTxWithPublicCalls, } from '@aztec/stdlib/tx'; @@ -154,7 +153,7 @@ export class PublicProcessor implements Traceable { limits: PublicProcessorLimits = {}, validator: PublicProcessorValidator = {}, ): Promise<[ProcessedTx[], FailedTx[], Tx[], NestedProcessReturnValues[]]> { - const { maxTransactions, maxBlockSize, deadline, maxBlockGas } = limits; + const { maxTransactions, maxBlockSize, deadline, maxBlockGas, maxBlobFields } = limits; const { preprocessValidator, nullifierCache } = validator; const result: ProcessedTx[] = []; const usedTxs: Tx[] = []; @@ -165,6 +164,7 @@ export class PublicProcessor implements Traceable { let returns: NestedProcessReturnValues[] = []; let totalPublicGas = new Gas(0, 0); let totalBlockGas = new Gas(0, 0); + let totalBlobFields = 0; for await (const origTx of txs) { // Only process up to the max tx limit @@ -252,6 +252,23 @@ export class PublicProcessor implements Traceable { continue; } + // If the actual blob fields of this tx would exceed the limit, skip it + const txBlobFields = processedTx.txEffect.toBlobFields().length; + if (maxBlobFields !== undefined && totalBlobFields + txBlobFields > maxBlobFields) { + this.log.debug( + `Skipping processed tx ${txHash} with ${txBlobFields} blob fields due to max blob fields limit.`, + { + txHash, + txBlobFields, + totalBlobFields, + maxBlobFields, + }, + ); + // Need to revert the checkpoint here and don't go any further + await checkpoint.revert(); + continue; + } + // FIXME(fcarreiro): it's ugly to have to notify the validator of nullifiers. // I'd rather pass the validators the processedTx as well and let them deal with it. nullifierCache?.addNullifiers(processedTx.txEffect.nullifiers.map(n => n.toBuffer())); @@ -262,6 +279,7 @@ export class PublicProcessor implements Traceable { totalPublicGas = totalPublicGas.add(processedTx.gasUsed.publicGas); totalBlockGas = totalBlockGas.add(processedTx.gasUsed.totalGas); totalSizeInBytes += txSize; + totalBlobFields += txBlobFields; } catch (err: any) { if (err?.name === 'PublicProcessorTimeoutError') { this.log.warn(`Stopping tx processing due to timeout.`); @@ -366,10 +384,7 @@ export class PublicProcessor implements Traceable { return [processedTx, returnValues ?? []]; } - private async doTreeInsertionsForPrivateOnlyTx( - processedTx: ProcessedTx, - txValidator?: TxValidator, - ): Promise { + private async doTreeInsertionsForPrivateOnlyTx(processedTx: ProcessedTx): Promise { const treeInsertionStart = process.hrtime.bigint(); // Update the state so that the next tx in the loop has the correct .startState @@ -388,14 +403,8 @@ export class PublicProcessor implements Traceable { padArrayEnd(processedTx.txEffect.nullifiers, Fr.ZERO, MAX_NULLIFIERS_PER_TX).map(n => n.toBuffer()), NULLIFIER_SUBTREE_HEIGHT, ); - } catch { - if (txValidator) { - // Ideally the validator has already caught this above, but just in case: - throw new Error(`Transaction ${processedTx.hash} invalid after processing public functions`); - } else { - // We have no validator and assume this call should blindly process txs with duplicates being caught later - this.log.warn(`Detected duplicate nullifier after public processing for: ${processedTx.hash}.`); - } + } catch (cause) { + throw new Error(`Transaction ${processedTx.hash} failed with duplicate nullifiers`, { cause }); } const treeInsertionEnd = process.hrtime.bigint(); diff --git a/yarn-project/slasher/src/config.ts b/yarn-project/slasher/src/config.ts index 114de841c75e..ff21ed64321a 100644 --- a/yarn-project/slasher/src/config.ts +++ b/yarn-project/slasher/src/config.ts @@ -29,6 +29,7 @@ export const DefaultSlasherConfig: SlasherConfig = { slashOffenseExpirationRounds: 4, slashMaxPayloadSize: 50, slashGracePeriodL2Slots: 0, + slashExecuteRoundsLookBack: 4, slashSelfAllowed: false, }; @@ -83,7 +84,7 @@ export const slasherConfigMappings: ConfigMappingsType = { }, slashBroadcastedInvalidBlockPenalty: { env: 'SLASH_INVALID_BLOCK_PENALTY', - description: 'Penalty amount for slashing a validator for an invalid block.', + description: 'Penalty amount for slashing a validator for an invalid block proposed via p2p.', ...bigintConfigHelper(DefaultSlasherConfig.slashBroadcastedInvalidBlockPenalty), }, slashInactivityTargetPercentage: { @@ -144,6 +145,11 @@ export const slasherConfigMappings: ConfigMappingsType = { env: 'SLASH_GRACE_PERIOD_L2_SLOTS', ...numberConfigHelper(DefaultSlasherConfig.slashGracePeriodL2Slots), }, + slashExecuteRoundsLookBack: { + env: 'SLASH_EXECUTE_ROUNDS_LOOK_BACK', + description: 'How many rounds to look back when searching for a round to execute.', + ...numberConfigHelper(DefaultSlasherConfig.slashExecuteRoundsLookBack), + }, slashSelfAllowed: { description: 'Whether to allow slashes to own validators', ...booleanConfigHelper(DefaultSlasherConfig.slashSelfAllowed), diff --git a/yarn-project/slasher/src/tally_slasher_client.test.ts b/yarn-project/slasher/src/tally_slasher_client.test.ts index 4c1444fe7316..235ed7e016f6 100644 --- a/yarn-project/slasher/src/tally_slasher_client.test.ts +++ b/yarn-project/slasher/src/tally_slasher_client.test.ts @@ -55,6 +55,25 @@ describe('TallySlasherClient', () => { ...DefaultSlasherConfig, slashGracePeriodL2Slots: 10, slashMaxPayloadSize: 100, + slashExecuteRoundsLookBack: 0, + }; + + const executableRoundData = { + isExecuted: false, + readyToExecute: true, + voteCount: 150n, + }; + + const executedRoundData = { + isExecuted: true, + readyToExecute: false, + voteCount: 150n, + }; + + const emptyRoundData = { + isExecuted: false, + readyToExecute: false, + voteCount: 0n, }; const createOffense = ( @@ -127,7 +146,7 @@ describe('TallySlasherClient', () => { slasherContract = mockDeep(); // Setup mock responses - tallySlashingProposer.getRound.mockResolvedValue({ isExecuted: false, readyToExecute: false, voteCount: 0n }); + tallySlashingProposer.getRound.mockResolvedValue({ ...emptyRoundData }); tallySlashingProposer.getTally.mockResolvedValue({ actions: [{ validator: committee[0], slashAmount: slashingUnit }], committees: [committee], @@ -140,6 +159,7 @@ describe('TallySlasherClient', () => { // Setup rollup and slasher contract mocks rollup.getSlasherContract.mockResolvedValue(slasherContract); slasherContract.isPayloadVetoed.mockResolvedValue(false); + slasherContract.isSlashingEnabled.mockResolvedValue(true); // Mock event listeners to return unwatch functions tallySlashingProposer.listenToVoteCast.mockReturnValue(() => {}); @@ -170,7 +190,7 @@ describe('TallySlasherClient', () => { it('should return vote-offenses action when offenses are available for the target round', async () => { // Round 5 votes on round 3 (offset of 2) const currentRound = 5n; - const currentSlot = currentRound * BigInt(roundSize); // Round 5 + const currentSlot = currentRound * BigInt(roundSize); const targetRound = 3n; // Add slot-based offenses for the target round (slots 576-767 are in round 3) @@ -202,7 +222,7 @@ describe('TallySlasherClient', () => { it('should not vote for offenses outside the target round', async () => { const currentRound = 5n; - const currentSlot = currentRound * BigInt(roundSize); // Round 5 + const currentSlot = currentRound * BigInt(roundSize); const wrongRound = 4n; // Round 5 should vote on round 3, not 4 await offensesStore.addPendingOffense( @@ -219,7 +239,7 @@ describe('TallySlasherClient', () => { it('should handle early rounds where offset cannot be applied', async () => { const currentRound = 0n; - const currentSlot = currentRound * BigInt(roundSize) + 50n; // Round 0 (any slot in round 0) + const currentSlot = currentRound * BigInt(roundSize) + 50n; const action = await tallySlasherClient.getVoteOffensesAction(currentSlot); @@ -228,7 +248,7 @@ describe('TallySlasherClient', () => { it('should use empty committees when epoch cache returns undefined', async () => { const currentRound = 5n; - const currentSlot = currentRound * BigInt(roundSize); // Round 5 + const currentSlot = currentRound * BigInt(roundSize); const targetRound = 3n; await addPendingOffense({ @@ -283,14 +303,10 @@ describe('TallySlasherClient', () => { describe('execute-slash', () => { it('should return execute-slash action when round is ready to execute', async () => { const currentRound = 5n; - const currentSlot = currentRound * BigInt(roundSize); // Round 5 + const currentSlot = currentRound * BigInt(roundSize); const executableRound = 2n; // After execution delay of 2: currentRound - delay - 1 = 5 - 2 - 1 = 2 - tallySlashingProposer.getRound.mockResolvedValueOnce({ - isExecuted: false, - readyToExecute: true, - voteCount: 120n, - }); + tallySlashingProposer.getRound.mockResolvedValueOnce(executableRoundData); const actions = await tallySlasherClient.getProposerActions(currentSlot); @@ -301,64 +317,87 @@ describe('TallySlasherClient', () => { it('should not execute rounds that have already been executed', async () => { const currentRound = 5n; - const currentSlot = currentRound * BigInt(roundSize); // Round 5 + const currentSlot = currentRound * BigInt(roundSize); - tallySlashingProposer.getRound.mockResolvedValueOnce({ - isExecuted: true, - readyToExecute: true, - voteCount: 120n, - }); + tallySlashingProposer.getRound.mockResolvedValueOnce(executedRoundData); const actions = await tallySlasherClient.getProposerActions(currentSlot); expect(actions).toEqual([]); }); - it('should not execute rounds not ready to execute', async () => { + it('should not execute rounds with not enough votes', async () => { const currentRound = 5n; - const currentSlot = currentRound * BigInt(roundSize); // Round 5 + const currentSlot = currentRound * BigInt(roundSize); - tallySlashingProposer.getRound.mockResolvedValueOnce({ - isExecuted: false, - readyToExecute: false, - voteCount: 120n, - }); + tallySlashingProposer.getRound.mockResolvedValueOnce({ ...executableRoundData, voteCount: 10n }); const actions = await tallySlasherClient.getProposerActions(currentSlot); expect(actions).toEqual([]); }); - it('should not execute rounds with not enough votes', async () => { + it('should not execute rounds with no slash actions', async () => { const currentRound = 5n; - const currentSlot = currentRound * BigInt(roundSize); // Round 5 + const currentSlot = currentRound * BigInt(roundSize); - tallySlashingProposer.getRound.mockResolvedValueOnce({ - isExecuted: false, - readyToExecute: true, - voteCount: 10n, - }); + tallySlashingProposer.getRound.mockResolvedValueOnce(executableRoundData); + + tallySlashingProposer.getTally.mockResolvedValueOnce({ actions: [], committees: [committee] }); const actions = await tallySlasherClient.getProposerActions(currentSlot); expect(actions).toEqual([]); }); - it('should not execute rounds with no slash actions', async () => { + it('should not execute vetoed rounds', async () => { const currentRound = 5n; - const currentSlot = currentRound * BigInt(roundSize); // Round 5 + const currentSlot = currentRound * BigInt(roundSize); + const executableRound = 2n; // After execution delay of 2: currentRound - delay - 1 = 5 - 2 - 1 = 2 - tallySlashingProposer.getRound.mockResolvedValueOnce({ - isExecuted: false, - readyToExecute: true, - voteCount: 120n, + tallySlashingProposer.getRound.mockResolvedValueOnce(executableRoundData); + + const payloadAddress = EthAddress.random(); + tallySlashingProposer.getPayload.mockResolvedValue({ + address: payloadAddress, + actions: [{ validator: committee[0], slashAmount: slashingUnit }], }); - tallySlashingProposer.getTally.mockResolvedValueOnce({ actions: [], committees: [committee] }); + slasherContract.isPayloadVetoed.mockResolvedValueOnce(true); + const actions = await tallySlasherClient.getProposerActions(currentSlot); + + expect(actions).toHaveLength(0); + expect(tallySlashingProposer.getRound).toHaveBeenCalledWith(executableRound); + expect(slasherContract.isPayloadVetoed).toHaveBeenCalledWith(payloadAddress); + }); + it('should not execute when slashing is disabled', async () => { + const currentRound = 5n; + const currentSlot = currentRound * BigInt(roundSize); + + slasherContract.isSlashingEnabled.mockResolvedValue(false); const actions = await tallySlasherClient.getProposerActions(currentSlot); - expect(actions).toEqual([]); + expect(actions).toHaveLength(0); + }); + + it('should return earliest execute when multiple are available', async () => { + const currentRound = 5n; + const currentSlot = currentRound * BigInt(roundSize); + + tallySlasherClient.updateConfig({ slashExecuteRoundsLookBack: 5 }); + + tallySlashingProposer.getRound + .mockResolvedValueOnce({ ...executedRoundData }) // round 0 + .mockResolvedValueOnce({ ...executableRoundData }); // round 1 + + const actions = await tallySlasherClient.getProposerActions(currentSlot); + + expect(actions).toHaveLength(1); + expectActionExecuteSlash(actions[0], 1n); + expect(tallySlashingProposer.getRound).toHaveBeenCalledTimes(2); + expect(tallySlashingProposer.getRound).toHaveBeenCalledWith(0n); + expect(tallySlashingProposer.getRound).toHaveBeenCalledWith(1n); }); }); @@ -607,11 +646,7 @@ describe('TallySlasherClient', () => { const executionRound = 7n; const executionSlot = executionRound * BigInt(roundSize); const executableRound = executionRound - BigInt(settings.slashingExecutionDelayInRounds) - 1n; // 7 - 2 - 1 = 4 - tallySlashingProposer.getRound.mockResolvedValueOnce({ - isExecuted: false, - readyToExecute: true, - voteCount: 150n, - }); + tallySlashingProposer.getRound.mockResolvedValueOnce(executableRoundData); const executeActions = await tallySlasherClient.getProposerActions(executionSlot); @@ -619,16 +654,86 @@ describe('TallySlasherClient', () => { expectActionExecuteSlash(executeActions[0], executableRound); // Verify that if round is marked as executed it won't be executed again - tallySlashingProposer.getRound.mockResolvedValueOnce({ - isExecuted: true, - readyToExecute: true, - voteCount: 150n, - }); + tallySlashingProposer.getRound.mockResolvedValueOnce(executedRoundData); const postExecuteActions = await tallySlasherClient.getProposerActions(executionSlot); expect(postExecuteActions).toEqual([]); }); + it('should handle missed execution', async () => { + tallySlasherClient.updateConfig({ slashExecuteRoundsLookBack: 3 }); + await tallySlasherClient.start(); + + // Round 3: An offense occurs + const offenseRound = 3n; + const validator = committee[0]; + const offense: WantToSlashArgs = { + validator, + amount: settings.slashingAmounts[1], + offenseType: OffenseType.PROPOSED_INSUFFICIENT_ATTESTATIONS, // slot-based + epochOrSlot: offenseRound * BigInt(roundSize), + }; + dummyWatcher.triggerSlash([offense]); + await sleep(100); + + // Round 4: Another offense! + const offenseRound4 = 4n; + const offense4: WantToSlashArgs = { + validator, + amount: settings.slashingAmounts[1], + offenseType: OffenseType.PROPOSED_INSUFFICIENT_ATTESTATIONS, // slot-based + epochOrSlot: offenseRound4 * BigInt(roundSize), + }; + dummyWatcher.triggerSlash([offense4]); + await sleep(100); + + // Round 5: Proposers vote on round 3 offenses + const votingSlot = 5n * BigInt(roundSize); + const voteActions = await tallySlasherClient.getProposerActions(votingSlot); + expect(voteActions).toHaveLength(1); + expectActionVoteOffenses(voteActions[0], 5n, []); + + // Round 6: Proposers vote on round 4 offenses + const votingSlot6 = 6n * BigInt(roundSize); + const voteActions6 = await tallySlasherClient.getProposerActions(votingSlot6); + expect(voteActions6).toHaveLength(1); + expectActionVoteOffenses(voteActions6[0], 6n, []); + + // Assume everything after round 4 inclusive is executable + tallySlashingProposer.getRound.mockImplementation((round: bigint) => + Promise.resolve(round >= 4n ? executableRoundData : emptyRoundData), + ); + + // Round 7: Can execute round 4 + const executionRound = 7n; + const executionSlot = executionRound * BigInt(roundSize); + const executableRound = executionRound - BigInt(settings.slashingExecutionDelayInRounds) - 1n; // 7 - 2 - 1 = 4 + expect(executableRound).toBe(4n); + const executeActions = await tallySlasherClient.getProposerActions(executionSlot); + expect(executeActions).toHaveLength(1); + expectActionExecuteSlash(executeActions[0], executableRound); + + // Round 8.0: Assuming no execution on round 7, we should get another chance to execute round 4 + const nextExecutionRound = 8n; + const nextExecutionSlot = nextExecutionRound * BigInt(roundSize); + const nextExecuteActions = await tallySlasherClient.getProposerActions(nextExecutionSlot); + expect(nextExecuteActions).toHaveLength(1); + expectActionExecuteSlash(nextExecuteActions[0], executableRound); + + // Round 8.1: But if there was execution, then we move onto executing round 5 + tallySlashingProposer.getRound.mockImplementation((round: bigint) => + Promise.resolve(round >= 5n ? executableRoundData : emptyRoundData), + ); + const executeActionsRound5 = await tallySlasherClient.getProposerActions(nextExecutionSlot + 1n); + expect(executeActionsRound5).toHaveLength(1); + expectActionExecuteSlash(executeActionsRound5[0], 5n); + + // Round 8.2: And if round 5 is executed as well, then nothing left to do + tallySlashingProposer.getRound.mockResolvedValue(executedRoundData); + const noExecuteActions = await tallySlasherClient.getProposerActions(nextExecutionSlot + 1n); + expect(noExecuteActions).toHaveLength(0); + }); + it('should handle multiple offenses with different slash amounts', async () => { const currentRound = 5n; const currentSlot = currentRound * BigInt(roundSize); // Round 5 diff --git a/yarn-project/slasher/src/tally_slasher_client.ts b/yarn-project/slasher/src/tally_slasher_client.ts index 32fba5d70648..b98de1e9ca72 100644 --- a/yarn-project/slasher/src/tally_slasher_client.ts +++ b/yarn-project/slasher/src/tally_slasher_client.ts @@ -1,6 +1,7 @@ import { EthAddress } from '@aztec/aztec.js'; import type { EpochCache } from '@aztec/epoch-cache'; import { RollupContract, SlasherContract, TallySlashingProposerContract } from '@aztec/ethereum/contracts'; +import { maxBigint } from '@aztec/foundation/bigint'; import { compactArray, partition, times } from '@aztec/foundation/collection'; import { createLogger } from '@aztec/foundation/log'; import { sleep } from '@aztec/foundation/sleep'; @@ -45,7 +46,7 @@ export type TallySlasherSettings = Prettify< >; export type TallySlasherClientConfig = SlashOffensesCollectorConfig & - Pick; + Pick; /** * The Tally Slasher client is responsible for managing slashable offenses using @@ -177,26 +178,62 @@ export class TallySlasherClient implements ProposerSlashActionProvider, SlasherC return compactArray([executeAction, voteAction]); } - /** Returns an execute slash action if there are any rounds ready to be executed */ + /** + * Returns an execute slash action if there are any rounds ready to be executed. + * Returns the oldest slash action if there are multiple rounds pending execution. + */ protected async getExecuteSlashAction(slotNumber: bigint): Promise { const { round: currentRound } = this.roundMonitor.getRoundForSlot(slotNumber); const slashingExecutionDelayInRounds = BigInt(this.settings.slashingExecutionDelayInRounds); const executableRound = currentRound - slashingExecutionDelayInRounds - 1n; - if (executableRound < 0n) { + const lookBack = BigInt(this.config.slashExecuteRoundsLookBack); + const oldestExecutableRound = maxBigint(0n, executableRound - lookBack); + + // Check if slashing is enabled at all + if (!(await this.slasher.isSlashingEnabled())) { + this.log.warn(`Slashing is disabled in the Slasher contract (skipping execution)`); return undefined; } - let logData: Record = { currentRound, executableRound, slotNumber }; + this.log.debug(`Checking slashing rounds ${oldestExecutableRound} to ${executableRound} to execute`, { + slotNumber, + currentRound, + oldestExecutableRound, + executableRound, + slashingExecutionDelayInRounds, + lookBack, + slashingLifetimeInRounds: this.settings.slashingLifetimeInRounds, + }); + + // Iterate over all rounds, starting from the oldest, until we find one that is executable + for (let roundToCheck = oldestExecutableRound; roundToCheck <= executableRound; roundToCheck++) { + const action = await this.tryGetRoundExecuteAction(roundToCheck); + if (action) { + return action; + } + } + + // And return nothing if none are found + return undefined; + } + + /** + * Checks if a given round is executable and returns an execute-slash action for it if so. + * Assumes round number has already been checked against lifetime and execution delay. + */ + private async tryGetRoundExecuteAction(executableRound: bigint): Promise { + let logData: Record = { executableRound }; + this.log.debug(`Testing if slashing round ${executableRound} is executable`, logData); try { + // Note we do not check isReadyToExecute here, since we already know that based on the + // executableRound number. Not just that, but it may be that we are building for the given slot number + // that is in the future, so the contract may think it's not yet ready to execute, whereas it is. const roundInfo = await this.tallySlashingProposer.getRound(executableRound); logData = { ...logData, roundInfo }; if (roundInfo.isExecuted) { this.log.verbose(`Round ${executableRound} has already been executed`, logData); return undefined; - } else if (!roundInfo.readyToExecute) { - this.log.verbose(`Round ${executableRound} is not ready to execute yet`, logData); - return undefined; } else if (roundInfo.voteCount === 0n) { this.log.debug(`Round ${executableRound} received no votes`, logData); return undefined; @@ -205,6 +242,7 @@ export class TallySlasherClient implements ProposerSlashActionProvider, SlasherC return undefined; } + // Check if the round yields any slashing at all const { actions: slashActions, committees } = await this.tallySlashingProposer.getTally(executableRound); if (slashActions.length === 0) { this.log.verbose(`Round ${executableRound} does not resolve in any slashing`, logData); @@ -239,9 +277,8 @@ export class TallySlasherClient implements ProposerSlashActionProvider, SlasherC return { type: 'execute-slash', round: executableRound, committees: slashedCommittees }; } catch (error) { this.log.error(`Error checking round to execute ${executableRound}`, error); + return undefined; } - - return undefined; } /** Returns a vote action based on offenses from the target round (with offset applied) */ diff --git a/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts b/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts index 81684c98279d..a2ca7a0f93b9 100644 --- a/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts +++ b/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts @@ -2,6 +2,7 @@ import type { EpochCache } from '@aztec/epoch-cache'; import { EthAddress } from '@aztec/foundation/eth-address'; import { sleep } from '@aztec/foundation/sleep'; import { L2Block, type L2BlockSourceEventEmitter, L2BlockSourceEvents } from '@aztec/stdlib/block'; +import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; import type { BuildBlockResult, IFullNodeBlockBuilder, @@ -28,6 +29,10 @@ describe('EpochPruneWatcher', () => { let txProvider: MockProxy>; let blockBuilder: MockProxy; let fork: MockProxy; + + let ts: bigint; + let l1Constants: L1RollupConstants; + const validEpochPrunedPenalty = BigInt(1000000000000000000n); const dataWithholdingPenalty = BigInt(2000000000000000000n); @@ -41,6 +46,18 @@ describe('EpochPruneWatcher', () => { fork = mock(); blockBuilder.getFork.mockResolvedValue(fork); + ts = BigInt(Math.ceil(Date.now() / 1000)); + l1Constants = { + l1StartBlock: 1n, + l1GenesisTime: ts, + slotDuration: 24, + epochDuration: 8, + ethereumSlotDuration: 12, + proofSubmissionEpochs: 1, + }; + + epochCache.getL1Constants.mockReturnValue(l1Constants); + watcher = new EpochPruneWatcher(l2BlockSource, l1ToL2MessageSource, epochCache, txProvider, blockBuilder, { slashPrunePenalty: validEpochPrunedPenalty, slashDataWithholdingPenalty: dataWithholdingPenalty, @@ -54,9 +71,10 @@ describe('EpochPruneWatcher', () => { it('should emit WANT_TO_SLASH_EVENT when a validator is in a pruned epoch when data is unavailable', async () => { const emitSpy = jest.spyOn(watcher, 'emit'); + const epochNumber = 1n; const block = await L2Block.random( - 1, // block number + 12, // block number 4, // txs per block ); txProvider.getAvailableTxs.mockResolvedValue({ txs: [], missingTxs: [block.body.txEffects[0].txHash] }); @@ -68,11 +86,11 @@ describe('EpochPruneWatcher', () => { epochCache.getCommitteeForEpoch.mockResolvedValue({ committee: committee.map(EthAddress.fromString), seed: 0n, - epoch: 1n, + epoch: epochNumber, }); l2BlockSource.emit(L2BlockSourceEvents.L2PruneDetected, { - epochNumber: 1n, + epochNumber, blocks: [block], type: L2BlockSourceEvents.L2PruneDetected, }); @@ -85,13 +103,13 @@ describe('EpochPruneWatcher', () => { validator: EthAddress.fromString(committee[0]), amount: dataWithholdingPenalty, offenseType: OffenseType.DATA_WITHHOLDING, - epochOrSlot: 1n, + epochOrSlot: epochNumber, }, { validator: EthAddress.fromString(committee[1]), amount: dataWithholdingPenalty, offenseType: OffenseType.DATA_WITHHOLDING, - epochOrSlot: 1n, + epochOrSlot: epochNumber, }, ] satisfies WantToSlashArgs[]); }); @@ -100,7 +118,7 @@ describe('EpochPruneWatcher', () => { const emitSpy = jest.spyOn(watcher, 'emit'); const block = await L2Block.random( - 1, // block number + 12, // block number 4, // txs per block ); const tx = Tx.random(); @@ -152,11 +170,11 @@ describe('EpochPruneWatcher', () => { const emitSpy = jest.spyOn(watcher, 'emit'); const blockFromL1 = await L2Block.random( - 1, // block number + 12, // block number 1, // txs per block ); const blockFromBuilder = await L2Block.random( - 2, // block number + 13, // block number 1, // txs per block ); const tx = Tx.random(); diff --git a/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts b/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts index 87cb535abcb1..2a62a6ecbd4d 100644 --- a/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts +++ b/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts @@ -1,4 +1,3 @@ -import type { Tx } from '@aztec/aztec.js'; import { EpochCache } from '@aztec/epoch-cache'; import { merge, pick } from '@aztec/foundation/collection'; import { type Logger, createLogger } from '@aztec/foundation/log'; @@ -9,6 +8,7 @@ import { type L2BlockSourceEventEmitter, L2BlockSourceEvents, } from '@aztec/stdlib/block'; +import { getEpochAtSlot } from '@aztec/stdlib/epoch-helpers'; import type { IFullNodeBlockBuilder, ITxProvider, @@ -16,7 +16,7 @@ import type { SlasherConfig, } from '@aztec/stdlib/interfaces/server'; import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; -import { OffenseType } from '@aztec/stdlib/slashing'; +import { OffenseType, getOffenseTypeName } from '@aztec/stdlib/slashing'; import { ReExFailedTxsError, ReExStateMismatchError, @@ -78,49 +78,44 @@ export class EpochPruneWatcher extends (EventEmitter as new () => WatcherEmitter private handlePruneL2Blocks(event: L2BlockPruneEvent): void { const { blocks, epochNumber } = event; - this.log.info(`Detected chain prune. Validating epoch ${epochNumber}`); - - this.validateBlocks(blocks) - .then(async () => { - this.log.info(`Pruned epoch ${epochNumber} was valid. Want to slash committee for not having it proven.`); - const validators = await this.getValidatorsForEpoch(epochNumber); - // need to specify return type to be able to return offense as undefined later on - const result: { validators: EthAddress[]; offense: OffenseType | undefined } = { - validators, - offense: OffenseType.VALID_EPOCH_PRUNED, - }; - return result; - }) - .catch(async error => { - if (error instanceof TransactionsNotAvailableError) { - this.log.info(`Data for pruned epoch ${epochNumber} was not available. Will want to slash.`, { - message: error.message, - }); - const validators = await this.getValidatorsForEpoch(epochNumber); - return { - validators, - offense: OffenseType.DATA_WITHHOLDING, - }; - } else { - this.log.error(`Error while validating pruned epoch ${epochNumber}. Will not want to slash.`, error); - return { - validators: [], - offense: undefined, - }; - } - }) - .then(({ validators, offense }) => { - if (validators.length === 0 || offense === undefined) { - return; - } - const args = this.validatorsToSlashingArgs(validators, offense, BigInt(epochNumber)); - this.log.info(`Slash for epoch ${epochNumber} created`, args); - this.emit(WANT_TO_SLASH_EVENT, args); - }) - .catch(error => { - // This can happen if we fail to get the validators for the epoch. - this.log.error('Error while creating slash for epoch', error); - }); + void this.processPruneL2Blocks(blocks, epochNumber).catch(err => + this.log.error('Error processing pruned L2 blocks', err, { epochNumber }), + ); + } + + private async emitSlashForEpoch(offense: OffenseType, epochNumber: bigint): Promise { + const validators = await this.getValidatorsForEpoch(epochNumber); + if (validators.length === 0) { + this.log.warn(`No validators found for epoch ${epochNumber} (cannot slash for ${getOffenseTypeName(offense)})`); + return; + } + const args = this.validatorsToSlashingArgs(validators, offense, BigInt(epochNumber)); + this.log.verbose(`Created slash for ${getOffenseTypeName(offense)} at epoch ${epochNumber}`, args); + this.emit(WANT_TO_SLASH_EVENT, args); + } + + private async processPruneL2Blocks(blocks: L2Block[], epochNumber: bigint): Promise { + try { + const l1Constants = this.epochCache.getL1Constants(); + const epochBlocks = blocks.filter(b => getEpochAtSlot(b.slot, l1Constants) === epochNumber); + this.log.info( + `Detected chain prune. Validating epoch ${epochNumber} with blocks ${epochBlocks[0]?.number} to ${epochBlocks[epochBlocks.length - 1]?.number}.`, + { blocks: epochBlocks.map(b => b.toBlockInfo()) }, + ); + + await this.validateBlocks(epochBlocks); + this.log.info(`Pruned epoch ${epochNumber} was valid. Want to slash committee for not having it proven.`); + await this.emitSlashForEpoch(OffenseType.VALID_EPOCH_PRUNED, epochNumber); + } catch (error) { + if (error instanceof TransactionsNotAvailableError) { + this.log.info(`Data for pruned epoch ${epochNumber} was not available. Will want to slash.`, { + message: error.message, + }); + await this.emitSlashForEpoch(OffenseType.DATA_WITHHOLDING, epochNumber); + } else { + this.log.error(`Error while validating pruned epoch ${epochNumber}. Will not want to slash.`, error); + } + } } public async validateBlocks(blocks: L2Block[]): Promise { @@ -151,7 +146,7 @@ export class EpochPruneWatcher extends (EventEmitter as new () => WatcherEmitter const l1ToL2Messages = await this.l1ToL2MessageSource.getL1ToL2Messages(blockFromL1.number); const { block, failedTxs, numTxs } = await this.blockBuilder.buildBlock( - txs as Tx[], + txs, l1ToL2Messages, blockFromL1.header.globalVariables, {}, diff --git a/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts b/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts index 034ff8d55767..a33126a57de1 100644 --- a/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts +++ b/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts @@ -157,6 +157,7 @@ class MockAztecNodeAdmin implements AztecNodeAdmin { slashMaxPayloadSize: 50, slashUnknownPenalty: 1000n, slashGracePeriodL2Slots: 0, + slashExecuteRoundsLookBack: 4, slasherClientType: 'tally' as const, disableValidator: false, disabledValidators: [], diff --git a/yarn-project/stdlib/src/interfaces/block-builder.ts b/yarn-project/stdlib/src/interfaces/block-builder.ts index 2e8cde98a35f..53f62ee7169e 100644 --- a/yarn-project/stdlib/src/interfaces/block-builder.ts +++ b/yarn-project/stdlib/src/interfaces/block-builder.ts @@ -43,6 +43,7 @@ export interface PublicProcessorLimits { maxTransactions?: number; maxBlockSize?: number; maxBlockGas?: Gas; + maxBlobFields?: number; deadline?: Date; } diff --git a/yarn-project/stdlib/src/interfaces/slasher.ts b/yarn-project/stdlib/src/interfaces/slasher.ts index 01daaf34d442..74fda5dd844d 100644 --- a/yarn-project/stdlib/src/interfaces/slasher.ts +++ b/yarn-project/stdlib/src/interfaces/slasher.ts @@ -24,6 +24,7 @@ export interface SlasherConfig { slashOffenseExpirationRounds: number; // Number of rounds after which pending offenses expire slashMaxPayloadSize: number; // Maximum number of offenses to include in a single slash payload slashGracePeriodL2Slots: number; // Number of L2 slots to wait after genesis before slashing for most offenses + slashExecuteRoundsLookBack: number; // How many rounds to look back when searching for a round to execute } export const SlasherConfigSchema = z.object({ @@ -44,5 +45,6 @@ export const SlasherConfigSchema = z.object({ slashMaxPayloadSize: z.number(), slashGracePeriodL2Slots: z.number(), slashBroadcastedInvalidBlockPenalty: schemas.BigInt, + slashExecuteRoundsLookBack: z.number(), slashSelfAllowed: z.boolean().optional(), }) satisfies ZodFor; diff --git a/yarn-project/stdlib/src/interfaces/validator.ts b/yarn-project/stdlib/src/interfaces/validator.ts index deef6fa6fe3e..55f0fa219ad6 100644 --- a/yarn-project/stdlib/src/interfaces/validator.ts +++ b/yarn-project/stdlib/src/interfaces/validator.ts @@ -28,11 +28,14 @@ export interface ValidatorClientConfig { /** Interval between polling for new attestations from peers */ attestationPollingIntervalMs: number; - /** Re-execute transactions before attesting */ + /** Whether to re-execute transactions in a block proposal before attesting */ validatorReexecute: boolean; /** Will re-execute until this many milliseconds are left in the slot */ validatorReexecuteDeadlineMs: number; + + /** Whether to always reexecute block proposals, even for non-validator nodes or when out of the currnet committee */ + alwaysReexecuteBlockProposals?: boolean; } export type ValidatorClientFullConfig = ValidatorClientConfig & @@ -46,11 +49,11 @@ export const ValidatorClientConfigSchema = z.object({ attestationPollingIntervalMs: z.number().min(0), validatorReexecute: z.boolean(), validatorReexecuteDeadlineMs: z.number().min(0), + alwaysReexecuteBlockProposals: z.boolean().optional(), }) satisfies ZodFor>; export interface Validator { start(): Promise; - registerBlockProposalHandler(): void; updateConfig(config: Partial): void; // Block validation responsibilities diff --git a/yarn-project/stdlib/src/p2p/consensus_payload.ts b/yarn-project/stdlib/src/p2p/consensus_payload.ts index 3deb441d6aa9..53d65dfe0192 100644 --- a/yarn-project/stdlib/src/p2p/consensus_payload.ts +++ b/yarn-project/stdlib/src/p2p/consensus_payload.ts @@ -63,6 +63,14 @@ export class ConsensusPayload implements Signable { return serializeToBuffer([this.header, this.archive, this.stateReference]); } + public equals(other: ConsensusPayload): boolean { + return ( + this.header.equals(other.header) && + this.archive.equals(other.archive) && + this.stateReference.equals(other.stateReference) + ); + } + static fromBuffer(buf: Buffer | BufferReader): ConsensusPayload { const reader = BufferReader.asReader(buf); const payload = new ConsensusPayload( @@ -102,6 +110,14 @@ export class ConsensusPayload implements Signable { return this.size; } + toInspect() { + return { + header: this.header.toInspect(), + archive: this.archive.toString(), + stateReference: this.stateReference.toInspect(), + }; + } + toString() { return `header: ${this.header.toString()}, archive: ${this.archive.toString()}, stateReference: ${this.stateReference.l1ToL2MessageTree.root.toString()}`; } diff --git a/yarn-project/stdlib/src/slashing/types.ts b/yarn-project/stdlib/src/slashing/types.ts index 39c7e7f7c671..e58bf64232d5 100644 --- a/yarn-project/stdlib/src/slashing/types.ts +++ b/yarn-project/stdlib/src/slashing/types.ts @@ -22,6 +22,29 @@ export enum OffenseType { ATTESTED_DESCENDANT_OF_INVALID = 7, } +export function getOffenseTypeName(offense: OffenseType) { + switch (offense) { + case OffenseType.UNKNOWN: + return 'unknown'; + case OffenseType.DATA_WITHHOLDING: + return 'data_withholding'; + case OffenseType.VALID_EPOCH_PRUNED: + return 'valid_epoch_pruned'; + case OffenseType.INACTIVITY: + return 'inactivity'; + case OffenseType.BROADCASTED_INVALID_BLOCK_PROPOSAL: + return 'broadcasted_invalid_block_proposal'; + case OffenseType.PROPOSED_INSUFFICIENT_ATTESTATIONS: + return 'proposed_insufficient_attestations'; + case OffenseType.PROPOSED_INCORRECT_ATTESTATIONS: + return 'proposed_incorrect_attestations'; + case OffenseType.ATTESTED_DESCENDANT_OF_INVALID: + return 'attested_descendant_of_invalid'; + default: + throw new Error(`Unknown offense type: ${offense}`); + } +} + export const OffenseTypeSchema = z.nativeEnum(OffenseType); export const OffenseToBigInt: Record = { diff --git a/yarn-project/stdlib/src/tx/proposed_block_header.ts b/yarn-project/stdlib/src/tx/proposed_block_header.ts index ffbbe88f3a39..7b4ddd86c44f 100644 --- a/yarn-project/stdlib/src/tx/proposed_block_header.ts +++ b/yarn-project/stdlib/src/tx/proposed_block_header.ts @@ -84,6 +84,19 @@ export class ProposedBlockHeader { ); } + equals(other: ProposedBlockHeader): boolean { + return ( + this.lastArchiveRoot.equals(other.lastArchiveRoot) && + this.contentCommitment.equals(other.contentCommitment) && + this.slotNumber.equals(other.slotNumber) && + this.timestamp === other.timestamp && + this.coinbase.equals(other.coinbase) && + this.feeRecipient.equals(other.feeRecipient) && + this.gasFees.equals(other.gasFees) && + this.totalManaUsed.equals(other.totalManaUsed) + ); + } + toBuffer() { // Note: The order here must match the order in the ProposedHeaderLib solidity library. return serializeToBuffer([ diff --git a/yarn-project/stdlib/src/tx/state_reference.ts b/yarn-project/stdlib/src/tx/state_reference.ts index 017516d72b2e..7267a85252da 100644 --- a/yarn-project/stdlib/src/tx/state_reference.ts +++ b/yarn-project/stdlib/src/tx/state_reference.ts @@ -137,6 +137,6 @@ export class StateReference { } public equals(other: this): boolean { - return this.l1ToL2MessageTree.root.equals(other.l1ToL2MessageTree.root) && this.partial.equals(other.partial); + return this.l1ToL2MessageTree.equals(other.l1ToL2MessageTree) && this.partial.equals(other.partial); } } diff --git a/yarn-project/stdlib/src/tx/tx.ts b/yarn-project/stdlib/src/tx/tx.ts index 57ff42f76686..f60016af8c95 100644 --- a/yarn-project/stdlib/src/tx/tx.ts +++ b/yarn-project/stdlib/src/tx/tx.ts @@ -207,16 +207,6 @@ export class Tx extends Gossipable { return this.txHash; } - /** - * Allows setting the hash of the Tx. - * Use this when you want to skip computing it from the original data. - * Don't set a Tx hash received from an untrusted source. - * @param hash - The hash to set. - */ - setTxHash(_hash: TxHash) { - return this; - } - getCalldataMap(): Map { if (!this.calldataMap) { const calldataMap = new Map(); diff --git a/yarn-project/telemetry-client/src/attributes.ts b/yarn-project/telemetry-client/src/attributes.ts index 9fa073c471a6..e3c7971f1904 100644 --- a/yarn-project/telemetry-client/src/attributes.ts +++ b/yarn-project/telemetry-client/src/attributes.ts @@ -79,12 +79,14 @@ export const P2P_GOODBYE_REASON = 'aztec.p2p.goodbye.reason'; export const PROVING_JOB_TYPE = 'aztec.proving.job_type'; /** The proving job id */ export const PROVING_JOB_ID = 'aztec.proving.job_id'; - +/** Merkle tree name */ export const MERKLE_TREE_NAME = 'aztec.merkle_tree.name'; /** The prover-id in a root rollup proof. */ export const ROLLUP_PROVER_ID = 'aztec.rollup.prover_id'; /** Whether the proof submission was timed out (delayed more than 20 min) */ export const PROOF_TIMED_OUT = 'aztec.proof.timed_out'; +/** Status of the validator (eg proposer, in-committee, none) */ +export const VALIDATOR_STATUS = 'aztec.validator_status'; export const P2P_ID = 'aztec.p2p.id'; export const P2P_REQ_RESP_PROTOCOL = 'aztec.p2p.req_resp.protocol'; diff --git a/yarn-project/telemetry-client/src/metrics.ts b/yarn-project/telemetry-client/src/metrics.ts index 872e9855e968..81a6009ab4c4 100644 --- a/yarn-project/telemetry-client/src/metrics.ts +++ b/yarn-project/telemetry-client/src/metrics.ts @@ -67,14 +67,9 @@ export const SEQUENCER_STATE_TRANSITION_BUFFER_DURATION = 'aztec.sequencer.state export const SEQUENCER_BLOCK_BUILD_DURATION = 'aztec.sequencer.block.build_duration'; export const SEQUENCER_BLOCK_BUILD_MANA_PER_SECOND = 'aztec.sequencer.block.build_mana_per_second'; export const SEQUENCER_BLOCK_COUNT = 'aztec.sequencer.block.count'; -export const SEQUENCER_CURRENT_STATE = 'aztec.sequencer.current.state'; -export const SEQUENCER_CURRENT_BLOCK_NUMBER = 'aztec.sequencer.current.block_number'; -export const SEQUENCER_CURRENT_BLOCK_SIZE = 'aztec.sequencer.current.block_size'; -export const SEQUENCER_BLOCK_BUILD_INSERTION_TIME = 'aztec.sequencer.block_builder_tree_insertion_duration'; export const SEQUENCER_CURRENT_BLOCK_REWARDS = 'aztec.sequencer.current_block_rewards'; export const SEQUENCER_SLOT_COUNT = 'aztec.sequencer.slot.total_count'; export const SEQUENCER_FILLED_SLOT_COUNT = 'aztec.sequencer.slot.filled_count'; -export const SEQUENCER_MISSED_SLOT_COUNT = 'aztec.sequencer.slot.missed_count'; export const SEQUENCER_COLLECTED_ATTESTATIONS_COUNT = 'aztec.sequencer.attestations.collected_count'; export const SEQUENCER_REQUIRED_ATTESTATIONS_COUNT = 'aztec.sequencer.attestations.required_count'; diff --git a/yarn-project/telemetry-client/src/nodejs_metrics_monitor.ts b/yarn-project/telemetry-client/src/nodejs_metrics_monitor.ts index e00a60e0b64e..c2c303a1a11f 100644 --- a/yarn-project/telemetry-client/src/nodejs_metrics_monitor.ts +++ b/yarn-project/telemetry-client/src/nodejs_metrics_monitor.ts @@ -1,3 +1,4 @@ +import type { Observable } from '@opentelemetry/api'; import { type EventLoopUtilization, type IntervalHistogram, monitorEventLoopDelay, performance } from 'node:perf_hooks'; import * as Attributes from './attributes.js'; @@ -142,17 +143,26 @@ export class NodejsMetricsMonitor { // - https://youtu.be/WetXnEPraYM obs.observe(this.eventLoopUilization, delta.utilization); - this.eventLoopTime.add(Math.floor(delta.idle), { [Attributes.NODEJS_EVENT_LOOP_STATE]: 'idle' }); - this.eventLoopTime.add(Math.floor(delta.active), { [Attributes.NODEJS_EVENT_LOOP_STATE]: 'active' }); + this.eventLoopTime.add(Math.trunc(delta.idle), { [Attributes.NODEJS_EVENT_LOOP_STATE]: 'idle' }); + this.eventLoopTime.add(Math.trunc(delta.active), { [Attributes.NODEJS_EVENT_LOOP_STATE]: 'active' }); - obs.observe(this.eventLoopDelayGauges.min, Math.floor(this.eventLoopDelay.min)); - obs.observe(this.eventLoopDelayGauges.mean, Math.floor(this.eventLoopDelay.mean)); - obs.observe(this.eventLoopDelayGauges.max, Math.floor(this.eventLoopDelay.max)); - obs.observe(this.eventLoopDelayGauges.stddev, Math.floor(this.eventLoopDelay.stddev)); - obs.observe(this.eventLoopDelayGauges.p50, Math.floor(this.eventLoopDelay.percentile(50))); - obs.observe(this.eventLoopDelayGauges.p90, Math.floor(this.eventLoopDelay.percentile(90))); - obs.observe(this.eventLoopDelayGauges.p99, Math.floor(this.eventLoopDelay.percentile(99))); + safeObserveInt(obs, this.eventLoopDelayGauges.min, this.eventLoopDelay.min); + safeObserveInt(obs, this.eventLoopDelayGauges.mean, this.eventLoopDelay.mean); + safeObserveInt(obs, this.eventLoopDelayGauges.max, this.eventLoopDelay.max); + safeObserveInt(obs, this.eventLoopDelayGauges.stddev, this.eventLoopDelay.stddev); + safeObserveInt(obs, this.eventLoopDelayGauges.p50, this.eventLoopDelay.percentile(50)); + safeObserveInt(obs, this.eventLoopDelayGauges.p90, this.eventLoopDelay.percentile(90)); + safeObserveInt(obs, this.eventLoopDelayGauges.p99, this.eventLoopDelay.percentile(99)); this.eventLoopDelay.reset(); }; } + +function safeObserveInt(observer: BatchObservableResult, metric: Observable, value: number, attrs?: object) { + // discard NaN, Infinity, -Infinity + if (!Number.isFinite(value)) { + return; + } + + observer.observe(metric, Math.trunc(value), attrs); +} diff --git a/yarn-project/validator-client/src/block_proposal_handler.ts b/yarn-project/validator-client/src/block_proposal_handler.ts new file mode 100644 index 000000000000..d0b7a218b68e --- /dev/null +++ b/yarn-project/validator-client/src/block_proposal_handler.ts @@ -0,0 +1,314 @@ +import { INITIAL_L2_BLOCK_NUM } from '@aztec/constants'; +import { Fr } from '@aztec/foundation/fields'; +import { createLogger } from '@aztec/foundation/log'; +import { retryUntil } from '@aztec/foundation/retry'; +import { DateProvider, Timer } from '@aztec/foundation/timer'; +import type { P2P, PeerId } from '@aztec/p2p'; +import { TxProvider } from '@aztec/p2p'; +import { BlockProposalValidator } from '@aztec/p2p/msg_validators'; +import { computeInHashFromL1ToL2Messages } from '@aztec/prover-client/helpers'; +import type { L2BlockSource } from '@aztec/stdlib/block'; +import { getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; +import type { IFullNodeBlockBuilder, ValidatorClientFullConfig } from '@aztec/stdlib/interfaces/server'; +import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; +import { type BlockProposal, ConsensusPayload } from '@aztec/stdlib/p2p'; +import { type FailedTx, GlobalVariables, type Tx } from '@aztec/stdlib/tx'; +import { + ReExFailedTxsError, + ReExStateMismatchError, + ReExTimeoutError, + TransactionsNotAvailableError, +} from '@aztec/stdlib/validators'; +import { type TelemetryClient, type Tracer, getTelemetryClient } from '@aztec/telemetry-client'; + +import type { ValidatorMetrics } from './metrics.js'; + +export type BlockProposalValidationFailureReason = + | 'invalid_proposal' + | 'parent_block_not_found' + | 'parent_block_does_not_match' + | 'in_hash_mismatch' + | 'block_number_already_exists' + | 'txs_not_available' + | 'state_mismatch' + | 'failed_txs' + | 'timeout' + | 'unknown_error'; + +export interface BlockProposalValidationResult { + isValid: boolean; + reason?: BlockProposalValidationFailureReason; + reexecutionResult?: { + block: any; + failedTxs: FailedTx[]; + reexecutionTimeMs: number; + totalManaUsed: number; + }; +} + +export class BlockProposalHandler { + public readonly tracer: Tracer; + + constructor( + private blockBuilder: IFullNodeBlockBuilder, + private blockSource: L2BlockSource, + private l1ToL2MessageSource: L1ToL2MessageSource, + private txProvider: TxProvider, + private blockProposalValidator: BlockProposalValidator, + private config: ValidatorClientFullConfig, + private metrics?: ValidatorMetrics, + private dateProvider: DateProvider = new DateProvider(), + telemetry: TelemetryClient = getTelemetryClient(), + private log = createLogger('validator:block-proposal-handler'), + ) { + this.tracer = telemetry.getTracer('BlockProposalHandler'); + } + + registerForReexecution(p2pClient: P2P): BlockProposalHandler { + const handler = async (proposal: BlockProposal, proposalSender: PeerId) => { + try { + const result = await this.handleBlockProposal(proposal, proposalSender, true); + if (result.isValid && result.reexecutionResult) { + this.log.info(`Non-validator reexecution completed for slot ${proposal.slotNumber.toBigInt()}`, { + blockNumber: proposal.blockNumber, + reexecutionTimeMs: result.reexecutionResult.reexecutionTimeMs, + totalManaUsed: result.reexecutionResult.totalManaUsed, + numTxs: result.reexecutionResult.block?.body?.txEffects?.length ?? 0, + }); + } else { + this.log.warn(`Non-validator reexecution failed for slot ${proposal.slotNumber.toBigInt()}`, { + blockNumber: proposal.blockNumber, + reason: result.reason, + }); + } + } catch (error) { + this.log.error('Error processing block proposal in non-validator handler', error); + } + return undefined; // Non-validator nodes don't return attestations + }; + + p2pClient.registerBlockProposalHandler(handler); + return this; + } + + async handleBlockProposal( + proposal: BlockProposal, + proposalSender: PeerId, + shouldReexecute: boolean, + ): Promise { + const slotNumber = proposal.slotNumber.toBigInt(); + const blockNumber = proposal.blockNumber; + const proposer = proposal.getSender(); + + const proposalInfo = { ...proposal.toBlockInfo(), proposer: proposer.toString() }; + this.log.info(`Processing proposal for slot ${slotNumber}`, { + ...proposalInfo, + txHashes: proposal.txHashes.map(t => t.toString()), + }); + + // Check that the proposal is from the current proposer, or the next proposer + // This should have been handled by the p2p layer, but we double check here out of caution + const invalidProposal = await this.blockProposalValidator.validate(proposal); + if (invalidProposal) { + this.log.warn(`Proposal is not valid, skipping processing`, proposalInfo); + return { isValid: false, reason: 'invalid_proposal' }; + } + + // Collect txs from the proposal. We start doing this as early as possible, + // and we do it even if we don't plan to re-execute the txs, so that we have them + // if another node needs them. + const config = this.blockBuilder.getConfig(); + const { txs, missingTxs } = await this.txProvider.getTxsForBlockProposal(proposal, { + pinnedPeer: proposalSender, + deadline: this.getReexecutionDeadline(proposal, config), + }); + + // Check that the parent proposal is a block we know, otherwise reexecution would fail + if (blockNumber > INITIAL_L2_BLOCK_NUM) { + const deadline = this.getReexecutionDeadline(proposal, config); + const currentTime = this.dateProvider.now(); + const timeoutDurationMs = deadline.getTime() - currentTime; + const parentBlock = + timeoutDurationMs <= 0 + ? undefined + : await retryUntil( + async () => { + const block = await this.blockSource.getBlock(blockNumber - 1); + if (block) { + return block; + } + await this.blockSource.syncImmediate(); + return await this.blockSource.getBlock(blockNumber - 1); + }, + 'Force Archiver Sync', + timeoutDurationMs / 1000, + 0.5, + ); + + if (parentBlock === undefined) { + this.log.warn(`Parent block for ${blockNumber} not found, skipping processing`, proposalInfo); + return { isValid: false, reason: 'parent_block_not_found' }; + } + + if (!proposal.payload.header.lastArchiveRoot.equals(parentBlock.archive.root)) { + this.log.warn(`Parent block archive root for proposal does not match, skipping processing`, { + proposalLastArchiveRoot: proposal.payload.header.lastArchiveRoot.toString(), + parentBlockArchiveRoot: parentBlock.archive.root.toString(), + ...proposalInfo, + }); + return { isValid: false, reason: 'parent_block_does_not_match' }; + } + } + + // Check that I have the same set of l1ToL2Messages as the proposal + const l1ToL2Messages = await this.l1ToL2MessageSource.getL1ToL2Messages(blockNumber); + const computedInHash = await computeInHashFromL1ToL2Messages(l1ToL2Messages); + const proposalInHash = proposal.payload.header.contentCommitment.inHash; + if (!computedInHash.equals(proposalInHash)) { + this.log.warn(`L1 to L2 messages in hash mismatch, skipping processing`, { + proposalInHash: proposalInHash.toString(), + computedInHash: computedInHash.toString(), + ...proposalInfo, + }); + return { isValid: false, reason: 'in_hash_mismatch' }; + } + + // Check that this block number does not exist already + const existingBlock = await this.blockSource.getBlockHeader(blockNumber); + if (existingBlock) { + this.log.warn(`Block number ${blockNumber} already exists, skipping processing`, proposalInfo); + return { isValid: false, reason: 'block_number_already_exists' }; + } + + // Check that all of the transactions in the proposal are available + if (missingTxs.length > 0) { + this.log.warn(`Missing ${missingTxs.length} txs to process proposal`, { ...proposalInfo, missingTxs }); + return { isValid: false, reason: 'txs_not_available' }; + } + + // Try re-executing the transactions in the proposal if needed + let reexecutionResult; + if (shouldReexecute) { + try { + this.log.verbose(`Re-executing transactions in the proposal`, proposalInfo); + reexecutionResult = await this.reexecuteTransactions(proposal, txs, l1ToL2Messages); + } catch (error) { + this.log.error(`Error reexecuting txs while processing block proposal`, error, proposalInfo); + const reason = this.getReexecuteFailureReason(error); + return { isValid: false, reason, reexecutionResult }; + } + } + + this.log.info(`Successfully processed proposal for slot ${slotNumber}`, proposalInfo); + return { isValid: true, reexecutionResult }; + } + + private getReexecutionDeadline( + proposal: BlockProposal, + config: { l1GenesisTime: bigint; slotDuration: number }, + ): Date { + const nextSlotTimestampSeconds = Number(getTimestampForSlot(proposal.slotNumber.toBigInt() + 1n, config)); + const msNeededForPropagationAndPublishing = this.config.validatorReexecuteDeadlineMs; + return new Date(nextSlotTimestampSeconds * 1000 - msNeededForPropagationAndPublishing); + } + + private getReexecuteFailureReason(err: any) { + if (err instanceof ReExStateMismatchError) { + return 'state_mismatch'; + } else if (err instanceof ReExFailedTxsError) { + return 'failed_txs'; + } else if (err instanceof ReExTimeoutError) { + return 'timeout'; + } else if (err instanceof Error) { + return 'unknown_error'; + } + } + + async reexecuteTransactions( + proposal: BlockProposal, + txs: Tx[], + l1ToL2Messages: Fr[], + ): Promise<{ + block: any; + failedTxs: FailedTx[]; + reexecutionTimeMs: number; + totalManaUsed: number; + }> { + const { header } = proposal.payload; + const { txHashes } = proposal; + + // If we do not have all of the transactions, then we should fail + if (txs.length !== txHashes.length) { + const foundTxHashes = txs.map(tx => tx.getTxHash()); + const missingTxHashes = txHashes.filter(txHash => !foundTxHashes.includes(txHash)); + throw new TransactionsNotAvailableError(missingTxHashes); + } + + // Use the sequencer's block building logic to re-execute the transactions + const timer = new Timer(); + const config = this.blockBuilder.getConfig(); + + // We source most global variables from the proposal + const globalVariables = GlobalVariables.from({ + slotNumber: proposal.payload.header.slotNumber, // checked in the block proposal validator + coinbase: proposal.payload.header.coinbase, // set arbitrarily by the proposer + feeRecipient: proposal.payload.header.feeRecipient, // set arbitrarily by the proposer + gasFees: proposal.payload.header.gasFees, // validated by the rollup contract + blockNumber: proposal.blockNumber, // checked blockNumber-1 exists in archiver but blockNumber doesnt + timestamp: header.timestamp, // checked in the rollup contract against the slot number + chainId: new Fr(config.l1ChainId), + version: new Fr(config.rollupVersion), + }); + + const { block, failedTxs } = await this.blockBuilder.buildBlock(txs, l1ToL2Messages, globalVariables, { + deadline: this.getReexecutionDeadline(proposal, config), + }); + + const numFailedTxs = failedTxs.length; + const slot = proposal.slotNumber; + this.log.verbose(`Transaction re-execution complete for slot ${slot}`, { + numFailedTxs, + numProposalTxs: txHashes.length, + numProcessedTxs: block.body.txEffects.length, + slot, + }); + + if (numFailedTxs > 0) { + this.metrics?.recordFailedReexecution(proposal); + throw new ReExFailedTxsError(numFailedTxs); + } + + if (block.body.txEffects.length !== txHashes.length) { + this.metrics?.recordFailedReexecution(proposal); + throw new ReExTimeoutError(); + } + + // Throw a ReExStateMismatchError error if state updates do not match + const blockPayload = ConsensusPayload.fromBlock(block); + if (!blockPayload.equals(proposal.payload)) { + this.log.warn(`Re-execution state mismatch for slot ${slot}`, { + expected: blockPayload.toInspect(), + actual: proposal.payload.toInspect(), + }); + this.metrics?.recordFailedReexecution(proposal); + throw new ReExStateMismatchError( + proposal.archive, + block.archive.root, + proposal.payload.stateReference, + block.header.state, + ); + } + + const reexecutionTimeMs = timer.ms(); + const totalManaUsed = block.header.totalManaUsed.toNumber() / 1e6; + + this.metrics?.recordReex(reexecutionTimeMs, txs.length, totalManaUsed); + + return { + block, + failedTxs, + reexecutionTimeMs, + totalManaUsed, + }; + } +} diff --git a/yarn-project/validator-client/src/config.ts b/yarn-project/validator-client/src/config.ts index 0fbfd69c852e..2b7125f33dbd 100644 --- a/yarn-project/validator-client/src/config.ts +++ b/yarn-project/validator-client/src/config.ts @@ -58,6 +58,12 @@ export const validatorClientConfigMappings: ConfigMappingsType, + config: ValidatorClientFullConfig, deps: { blockBuilder: IFullNodeBlockBuilder; p2pClient: P2PClient; diff --git a/yarn-project/validator-client/src/index.ts b/yarn-project/validator-client/src/index.ts index 056a9badd892..21314d97da4b 100644 --- a/yarn-project/validator-client/src/index.ts +++ b/yarn-project/validator-client/src/index.ts @@ -1,3 +1,4 @@ +export * from './block_proposal_handler.js'; export * from './config.js'; export * from './factory.js'; export * from './validator.js'; diff --git a/yarn-project/validator-client/src/metrics.ts b/yarn-project/validator-client/src/metrics.ts index 25e0c26593f1..5f0b73e80fb9 100644 --- a/yarn-project/validator-client/src/metrics.ts +++ b/yarn-project/validator-client/src/metrics.ts @@ -72,9 +72,10 @@ export class ValidatorMetrics { this.attestationsCount.add(num); } - public incFailedAttestations(num: number, reason: string) { + public incFailedAttestations(num: number, reason: string, inCommittee: boolean) { this.failedAttestationsCount.add(num, { [Attributes.ERROR_TYPE]: reason, + [Attributes.VALIDATOR_STATUS]: inCommittee ? 'in-committee' : 'none', }); } } diff --git a/yarn-project/validator-client/src/validator.test.ts b/yarn-project/validator-client/src/validator.test.ts index 2833266bf5fd..0f0fb69a32c8 100644 --- a/yarn-project/validator-client/src/validator.test.ts +++ b/yarn-project/validator-client/src/validator.test.ts @@ -6,6 +6,7 @@ import { Secp256k1Signer, makeEthSignDigest } from '@aztec/foundation/crypto'; import { EthAddress } from '@aztec/foundation/eth-address'; import { Fr } from '@aztec/foundation/fields'; import { TestDateProvider, Timer } from '@aztec/foundation/timer'; +import { unfreeze } from '@aztec/foundation/types'; import { type Hex, type KeyStore, KeystoreManager } from '@aztec/node-keystore'; import { AuthRequest, @@ -26,7 +27,7 @@ import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; import type { BlockProposal } from '@aztec/stdlib/p2p'; import { makeBlockAttestation, makeBlockProposal, makeHeader, mockTx } from '@aztec/stdlib/testing'; import { AppendOnlyTreeSnapshot } from '@aztec/stdlib/trees'; -import { ContentCommitment, type Tx, TxHash } from '@aztec/stdlib/tx'; +import { BlockHeader, ContentCommitment, type Tx, TxHash } from '@aztec/stdlib/tx'; import { AttestationTimeoutError } from '@aztec/stdlib/validators'; import { describe, expect, it, jest } from '@jest/globals'; @@ -196,14 +197,15 @@ describe('ValidatorClient', () => { const makeTxFromHash = (txHash: TxHash) => ({ getTxHash: () => txHash, txHash }) as Tx; const enableReexecution = () => { - (validatorClient as any).config.validatorReexecute = true; + validatorClient.updateConfig({ validatorReexecute: true }); blockBuilder.buildBlock.mockImplementation(() => Promise.resolve(blockBuildResult)); }; beforeEach(async () => { const emptyInHash = await computeInHashFromL1ToL2Messages([]); const contentCommitment = new ContentCommitment(Fr.random(), emptyInHash, Fr.random()); - proposal = makeBlockProposal({ header: makeHeader(1, 100, 100, { contentCommitment }) }); + const blockHeader = makeHeader(1, 100, 100, { contentCommitment }); + proposal = makeBlockProposal({ header: blockHeader }); // Set the current time to the start of the slot of the proposal const genesisTime = 1n; const slotTime = genesisTime + proposal.slotNumber.toBigInt() * BigInt(blockBuilder.getConfig().slotDuration); @@ -244,7 +246,7 @@ describe('ValidatorClient', () => { numMsgs: 0, usedTxs: [], block: { - header: makeHeader(), + header: blockHeader.clone(), body: { txEffects: times(proposal.txHashes.length, () => ({})) }, archive: new AppendOnlyTreeSnapshot(proposal.archive, proposal.blockNumber), } as L2Block, @@ -275,7 +277,7 @@ describe('ValidatorClient', () => { expect(attestations?.length).toBeGreaterThan(0); }); - it('should not attest to proposal if roots do not match, and should emit WANT_TO_SLASH_EVENT', async () => { + it('should not attest to proposal if roots do not match and should emit WANT_TO_SLASH_EVENT', async () => { // Block builder returns a block with a different root const emitSpy = jest.spyOn(validatorClient, 'emit'); enableReexecution(); @@ -297,6 +299,24 @@ describe('ValidatorClient', () => { ]); }); + it('should not attest to proposal if a random field in the proposal does not match', async () => { + // Block builder returns a block with a different nullifier tree root + enableReexecution(); + unfreeze(blockBuildResult.block.header.state.partial).nullifierTree.root = Fr.random(); + + // We should not attest to the proposal + const attestations = await validatorClient.attestToProposal(proposal, sender); + expect(attestations).toBeUndefined(); + }); + + it('should not attest to proposal if the proposed block number is taken', async () => { + enableReexecution(); + blockSource.getBlockHeader.mockResolvedValue({} as BlockHeader); + const attestations = await validatorClient.attestToProposal(proposal, sender); + expect(attestations).toBeUndefined(); + expect(blockSource.getBlockHeader).toHaveBeenCalledWith(proposal.blockNumber); + }); + it('should not emit WANT_TO_SLASH_EVENT if slashing is disabled', async () => { validatorClient.updateConfig({ slashBroadcastedInvalidBlockPenalty: 0n }); diff --git a/yarn-project/validator-client/src/validator.ts b/yarn-project/validator-client/src/validator.ts index 38e88261c2fb..eb0201aece4a 100644 --- a/yarn-project/validator-client/src/validator.ts +++ b/yarn-project/validator-client/src/validator.ts @@ -1,17 +1,13 @@ -import { INITIAL_L2_BLOCK_NUM } from '@aztec/constants'; import type { EpochCache } from '@aztec/epoch-cache'; import type { EthAddress } from '@aztec/foundation/eth-address'; import { Fr } from '@aztec/foundation/fields'; import { createLogger } from '@aztec/foundation/log'; -import { retryUntil } from '@aztec/foundation/retry'; import { RunningPromise } from '@aztec/foundation/running-promise'; import { sleep } from '@aztec/foundation/sleep'; -import { DateProvider, Timer } from '@aztec/foundation/timer'; +import { DateProvider } from '@aztec/foundation/timer'; import type { KeystoreManager } from '@aztec/node-keystore'; -import type { P2P, PeerId } from '@aztec/p2p'; -import { AuthRequest, AuthResponse, ReqRespSubProtocol, TxProvider } from '@aztec/p2p'; -import { BlockProposalValidator } from '@aztec/p2p/msg_validators'; -import { computeInHashFromL1ToL2Messages } from '@aztec/prover-client/helpers'; +import type { P2P, PeerId, TxProvider } from '@aztec/p2p'; +import { AuthRequest, AuthResponse, BlockProposalValidator, ReqRespSubProtocol } from '@aztec/p2p'; import { OffenseType, type SlasherConfig, @@ -21,23 +17,17 @@ import { } from '@aztec/slasher'; import type { AztecAddress } from '@aztec/stdlib/aztec-address'; import type { L2BlockSource } from '@aztec/stdlib/block'; -import { getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; import type { IFullNodeBlockBuilder, Validator, ValidatorClientFullConfig } from '@aztec/stdlib/interfaces/server'; import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; import type { BlockAttestation, BlockProposal, BlockProposalOptions } from '@aztec/stdlib/p2p'; -import { GlobalVariables, type ProposedBlockHeader, type StateReference, type Tx } from '@aztec/stdlib/tx'; -import { - AttestationTimeoutError, - ReExFailedTxsError, - ReExStateMismatchError, - ReExTimeoutError, - TransactionsNotAvailableError, -} from '@aztec/stdlib/validators'; +import type { ProposedBlockHeader, StateReference, Tx } from '@aztec/stdlib/tx'; +import { AttestationTimeoutError } from '@aztec/stdlib/validators'; import { type TelemetryClient, type Tracer, getTelemetryClient } from '@aztec/telemetry-client'; import { EventEmitter } from 'events'; import type { TypedDataDefinition } from 'viem'; +import { BlockProposalHandler, type BlockProposalValidationFailureReason } from './block_proposal_handler.js'; import type { ValidatorClientConfig } from './config.js'; import { ValidationService } from './duties/validation_service.js'; import { NodeKeystoreAdapter } from './key_store/node_keystore_adapter.js'; @@ -47,6 +37,12 @@ import { ValidatorMetrics } from './metrics.js'; // Just cap the set to avoid unbounded growth. const MAX_PROPOSERS_OF_INVALID_BLOCKS = 1000; +// What errors from the block proposal handler result in slashing +const SLASHABLE_BLOCK_PROPOSAL_VALIDATION_RESULT: BlockProposalValidationFailureReason[] = [ + 'state_mismatch', + 'failed_txs', +]; + /** * Validator Client */ @@ -55,24 +51,22 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) private validationService: ValidationService; private metrics: ValidatorMetrics; + // Whether it has already registered handlers on the p2p client + private hasRegisteredHandlers = false; + // Used to check if we are sending the same proposal twice private previousProposal?: BlockProposal; private lastEpochForCommitteeUpdateLoop: bigint | undefined; private epochCacheUpdateLoop: RunningPromise; - private blockProposalValidator: BlockProposalValidator; - private proposersOfInvalidBlocks: Set = new Set(); protected constructor( - private blockBuilder: IFullNodeBlockBuilder, private keyStore: NodeKeystoreAdapter, private epochCache: EpochCache, private p2pClient: P2P, - private blockSource: L2BlockSource, - private l1ToL2MessageSource: L1ToL2MessageSource, - private txProvider: TxProvider, + private blockProposalHandler: BlockProposalHandler, private config: ValidatorClientFullConfig, private dateProvider: DateProvider = new DateProvider(), telemetry: TelemetryClient = getTelemetryClient(), @@ -84,8 +78,6 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) this.validationService = new ValidationService(keyStore); - this.blockProposalValidator = new BlockProposalValidator(epochCache); - // Refresh epoch cache every second to trigger alert if participation in committee changes this.epochCacheUpdateLoop = new RunningPromise(this.handleEpochCommitteeUpdate.bind(this), log, 1000); @@ -152,21 +144,30 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) dateProvider: DateProvider = new DateProvider(), telemetry: TelemetryClient = getTelemetryClient(), ) { - const validator = new ValidatorClient( + const metrics = new ValidatorMetrics(telemetry); + const blockProposalValidator = new BlockProposalValidator(epochCache); + const blockProposalHandler = new BlockProposalHandler( blockBuilder, - NodeKeystoreAdapter.fromKeyStoreManager(keyStoreManager), - epochCache, - p2pClient, blockSource, l1ToL2MessageSource, txProvider, + blockProposalValidator, + config, + metrics, + dateProvider, + telemetry, + ); + + const validator = new ValidatorClient( + NodeKeystoreAdapter.fromKeyStoreManager(keyStoreManager), + epochCache, + p2pClient, + blockProposalHandler, config, dateProvider, telemetry, ); - // TODO(PhilWindle): This seems like it could/should be done inside start() - validator.registerBlockProposalHandler(); return validator; } @@ -176,6 +177,15 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) .filter(addr => !this.config.disabledValidators.some(disabled => disabled.equals(addr))); } + public getBlockProposalHandler() { + return this.blockProposalHandler; + } + + // Proxy method for backwards compatibility with tests + public reExecuteTransactions(proposal: BlockProposal, txs: any[], l1ToL2Messages: Fr[]): Promise { + return this.blockProposalHandler.reexecuteTransactions(proposal, txs, l1ToL2Messages); + } + public signWithAddress(addr: EthAddress, msg: TypedDataDefinition) { return this.keyStore.signTypedDataWithAddress(addr, msg); } @@ -197,11 +207,14 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) } public async start() { - // Sync the committee from the smart contract - // https://github.com/AztecProtocol/aztec-packages/issues/7962 + if (this.epochCacheUpdateLoop.isRunning()) { + this.log.warn(`Validator client already started`); + return; + } - const myAddresses = this.getValidatorAddresses(); + await this.registerHandlers(); + const myAddresses = this.getValidatorAddresses(); const inCommittee = await this.epochCache.filterInCommittee('now', myAddresses); if (inCommittee.length > 0) { this.log.info( @@ -214,9 +227,6 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) } this.epochCacheUpdateLoop.start(); - this.p2pClient.registerThisValidatorAddresses(myAddresses); - await this.p2pClient.addReqRespSubProtocol(ReqRespSubProtocol.AUTH, this.handleAuthRequest.bind(this)); - return Promise.resolve(); } @@ -224,143 +234,71 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) await this.epochCacheUpdateLoop.stop(); } - public registerBlockProposalHandler() { - const handler = (block: BlockProposal, proposalSender: PeerId): Promise => - this.attestToProposal(block, proposalSender); - this.p2pClient.registerBlockProposalHandler(handler); + /** Register handlers on the p2p client */ + public async registerHandlers() { + if (!this.hasRegisteredHandlers) { + this.hasRegisteredHandlers = true; + this.log.debug(`Registering validator handlers for p2p client`); + + const handler = (block: BlockProposal, proposalSender: PeerId): Promise => + this.attestToProposal(block, proposalSender); + this.p2pClient.registerBlockProposalHandler(handler); + + const myAddresses = this.getValidatorAddresses(); + this.p2pClient.registerThisValidatorAddresses(myAddresses); + + await this.p2pClient.addReqRespSubProtocol(ReqRespSubProtocol.AUTH, this.handleAuthRequest.bind(this)); + } } async attestToProposal(proposal: BlockProposal, proposalSender: PeerId): Promise { const slotNumber = proposal.slotNumber.toBigInt(); - const blockNumber = proposal.blockNumber; const proposer = proposal.getSender(); // Check that I have any address in current committee before attesting const inCommittee = await this.epochCache.filterInCommittee(slotNumber, this.getValidatorAddresses()); const partOfCommittee = inCommittee.length > 0; + const incFailedAttestation = (reason: string) => this.metrics.incFailedAttestations(1, reason, partOfCommittee); - const proposalInfo = { - ...proposal.toBlockInfo(), - proposer: proposer.toString(), - }; - + const proposalInfo = { ...proposal.toBlockInfo(), proposer: proposer.toString() }; this.log.info(`Received proposal for slot ${slotNumber}`, { ...proposalInfo, - txHashes: proposal.txHashes.map(txHash => txHash.toString()), + txHashes: proposal.txHashes.map(t => t.toString()), }); - // Collect txs from the proposal. Note that we do this before checking if we have an address in the - // current committee, since we want to collect txs anyway to facilitate propagation. - const { txs, missingTxs } = await this.txProvider.getTxsForBlockProposal(proposal, { - pinnedPeer: proposalSender, - deadline: this.getReexecutionDeadline(proposal, this.blockBuilder.getConfig()), - }); - - // Check that I have any address in current committee before attesting - if (!partOfCommittee) { - this.log.verbose(`No validator in the current committee, skipping attestation`, proposalInfo); - return undefined; - } - - // Check that the proposal is from the current proposer, or the next proposer. - // Q: Should this be moved to the block proposal validator, so we disregard proposals from anyone? - const invalidProposal = await this.blockProposalValidator.validate(proposal); - if (invalidProposal) { - this.log.warn(`Proposal is not valid, skipping attestation`, proposalInfo); - if (partOfCommittee) { - this.metrics.incFailedAttestations(1, 'invalid_proposal'); - } - return undefined; - } - - // Check that the parent proposal is a block we know, otherwise reexecution would fail. - // Q: Should we move this to the block proposal validator? If there, then p2p would check it - // before re-broadcasting it. This means that proposals built on top of an L1-reorg'ed-out block - // would not be rebroadcasted. But it also means that nodes that have not fully synced would - // not rebroadcast the proposal. - if (blockNumber > INITIAL_L2_BLOCK_NUM) { - const config = this.blockBuilder.getConfig(); - const deadline = this.getReexecutionDeadline(proposal, config); - const currentTime = this.dateProvider.now(); - const timeoutDurationMs = deadline.getTime() - currentTime; - const parentBlock = - timeoutDurationMs <= 0 - ? undefined - : await retryUntil( - async () => { - const block = await this.blockSource.getBlock(blockNumber - 1); - if (block) { - return block; - } - await this.blockSource.syncImmediate(); - return await this.blockSource.getBlock(blockNumber - 1); - }, - 'Force Archiver Sync', - timeoutDurationMs / 1000, // Continue retrying until the deadline - 0.5, // Retry every 500ms - ); - - if (parentBlock === undefined) { - this.log.warn(`Parent block for ${blockNumber} not found, skipping attestation`, proposalInfo); - if (partOfCommittee) { - this.metrics.incFailedAttestations(1, 'parent_block_not_found'); - } - return undefined; - } - - if (!proposal.payload.header.lastArchiveRoot.equals(parentBlock.archive.root)) { - this.log.warn(`Parent block archive root for proposal does not match, skipping attestation`, { - proposalLastArchiveRoot: proposal.payload.header.lastArchiveRoot.toString(), - parentBlockArchiveRoot: parentBlock.archive.root.toString(), - ...proposalInfo, - }); - if (partOfCommittee) { - this.metrics.incFailedAttestations(1, 'parent_block_does_not_match'); - } - return undefined; - } - } + // Reexecute txs if we are part of the committee so we can attest, or if slashing is enabled so we can slash + // invalid proposals even when not in the committee, or if we are configured to always reexecute for monitoring purposes. + const { validatorReexecute, slashBroadcastedInvalidBlockPenalty, alwaysReexecuteBlockProposals } = this.config; + const shouldReexecute = + (slashBroadcastedInvalidBlockPenalty > 0n && validatorReexecute) || + (partOfCommittee && validatorReexecute) || + alwaysReexecuteBlockProposals; + + const validationResult = await this.blockProposalHandler.handleBlockProposal( + proposal, + proposalSender, + !!shouldReexecute, + ); - // Check that I have the same set of l1ToL2Messages as the proposal - // Q: Same as above, should this be part of p2p validation? - const l1ToL2Messages = await this.l1ToL2MessageSource.getL1ToL2Messages(blockNumber); - const computedInHash = await computeInHashFromL1ToL2Messages(l1ToL2Messages); - const proposalInHash = proposal.payload.header.contentCommitment.inHash; - if (!computedInHash.equals(proposalInHash)) { - this.log.warn(`L1 to L2 messages in hash mismatch, skipping attestation`, { - proposalInHash: proposalInHash.toString(), - computedInHash: computedInHash.toString(), - ...proposalInfo, - }); - if (partOfCommittee) { - this.metrics.incFailedAttestations(1, 'in_hash_mismatch'); - } - return undefined; - } + if (!validationResult.isValid) { + this.log.warn(`Proposal validation failed: ${validationResult.reason}`, proposalInfo); + incFailedAttestation(validationResult.reason || 'unknown'); - // Check that all of the transactions in the proposal are available in the tx pool before attesting - if (missingTxs.length > 0) { - this.log.warn(`Missing ${missingTxs.length} txs to attest to proposal`, { ...proposalInfo, missingTxs }); - if (partOfCommittee) { - this.metrics.incFailedAttestations(1, 'TransactionsNotAvailableError'); + // Slash invalid block proposals + if ( + validationResult.reason && + SLASHABLE_BLOCK_PROPOSAL_VALIDATION_RESULT.includes(validationResult.reason) && + slashBroadcastedInvalidBlockPenalty > 0n + ) { + this.log.warn(`Slashing proposer for invalid block proposal`, proposalInfo); + this.slashInvalidBlock(proposal); } return undefined; } - // Try re-executing the transactions in the proposal - try { - this.log.verbose(`Processing attestation for slot ${slotNumber}`, proposalInfo); - if (this.config.validatorReexecute) { - this.log.verbose(`Re-executing transactions in the proposal before attesting`); - await this.reExecuteTransactions(proposal, txs, l1ToL2Messages); - } - } catch (error: any) { - this.metrics.incFailedAttestations(1, error instanceof Error ? error.name : 'unknown'); - this.log.error(`Error reexecuting txs while processing block proposal`, error, proposalInfo); - if (error instanceof ReExStateMismatchError && this.config.slashBroadcastedInvalidBlockPenalty > 0n) { - this.log.warn(`Slashing proposer for invalid block proposal`, proposalInfo); - this.slashInvalidBlock(proposal); - } + // Check that I have any address in current committee before attesting + if (!partOfCommittee) { + this.log.verbose(`No validator in the current committee, skipping attestation`, proposalInfo); return undefined; } @@ -369,73 +307,7 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) this.metrics.incAttestations(inCommittee.length); // If the above function does not throw an error, then we can attest to the proposal - return this.doAttestToProposal(proposal, inCommittee); - } - - private getReexecutionDeadline( - proposal: BlockProposal, - config: { l1GenesisTime: bigint; slotDuration: number }, - ): Date { - const nextSlotTimestampSeconds = Number(getTimestampForSlot(proposal.slotNumber.toBigInt() + 1n, config)); - const msNeededForPropagationAndPublishing = this.config.validatorReexecuteDeadlineMs; - return new Date(nextSlotTimestampSeconds * 1000 - msNeededForPropagationAndPublishing); - } - - /** - * Re-execute the transactions in the proposal and check that the state updates match the header state - * @param proposal - The proposal to re-execute - */ - async reExecuteTransactions(proposal: BlockProposal, txs: Tx[], l1ToL2Messages: Fr[]): Promise { - const { header } = proposal.payload; - const { txHashes } = proposal; - - // If we do not have all of the transactions, then we should fail - if (txs.length !== txHashes.length) { - const foundTxHashes = txs.map(tx => tx.getTxHash()); - const missingTxHashes = txHashes.filter(txHash => !foundTxHashes.includes(txHash)); - throw new TransactionsNotAvailableError(missingTxHashes); - } - - // Use the sequencer's block building logic to re-execute the transactions - const timer = new Timer(); - const config = this.blockBuilder.getConfig(); - const globalVariables = GlobalVariables.from({ - ...proposal.payload.header, - blockNumber: proposal.blockNumber, - timestamp: header.timestamp, - chainId: new Fr(config.l1ChainId), - version: new Fr(config.rollupVersion), - }); - - const { block, failedTxs } = await this.blockBuilder.buildBlock(txs, l1ToL2Messages, globalVariables, { - deadline: this.getReexecutionDeadline(proposal, config), - }); - - this.log.verbose(`Transaction re-execution complete`); - const numFailedTxs = failedTxs.length; - - if (numFailedTxs > 0) { - this.metrics.recordFailedReexecution(proposal); - throw new ReExFailedTxsError(numFailedTxs); - } - - if (block.body.txEffects.length !== txHashes.length) { - this.metrics.recordFailedReexecution(proposal); - throw new ReExTimeoutError(); - } - - // This function will throw an error if state updates do not match - if (!block.archive.root.equals(proposal.archive)) { - this.metrics.recordFailedReexecution(proposal); - throw new ReExStateMismatchError( - proposal.archive, - block.archive.root, - proposal.payload.stateReference, - block.header.state, - ); - } - - this.metrics.recordReex(timer.ms(), txs.length, block.header.totalManaUsed.toNumber() / 1e6); + return this.createBlockAttestationsFromProposal(proposal, inCommittee); } private slashInvalidBlock(proposal: BlockProposal) { @@ -494,7 +366,7 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) const slot = proposal.payload.header.slotNumber.toBigInt(); const inCommittee = await this.epochCache.filterInCommittee(slot, this.getValidatorAddresses()); this.log.debug(`Collecting ${inCommittee.length} self-attestations for slot ${slot}`, { inCommittee }); - return this.doAttestToProposal(proposal, inCommittee); + return this.createBlockAttestationsFromProposal(proposal, inCommittee); } async collectAttestations(proposal: BlockProposal, required: number, deadline: Date): Promise { @@ -544,7 +416,10 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) } } - private async doAttestToProposal(proposal: BlockProposal, attestors: EthAddress[] = []): Promise { + private async createBlockAttestationsFromProposal( + proposal: BlockProposal, + attestors: EthAddress[] = [], + ): Promise { const attestations = await this.validationService.attestToProposal(proposal, attestors); await this.p2pClient.addAttestations(attestations); return attestations; @@ -573,5 +448,3 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) return authResponse.toBuffer(); } } - -// Conversion helpers moved into NodeKeystoreAdapter. diff --git a/yarn-project/world-state/src/native/native_world_state.test.ts b/yarn-project/world-state/src/native/native_world_state.test.ts index 5ba9deae7721..62495d820e2f 100644 --- a/yarn-project/world-state/src/native/native_world_state.test.ts +++ b/yarn-project/world-state/src/native/native_world_state.test.ts @@ -15,7 +15,7 @@ import { EthAddress } from '@aztec/foundation/eth-address'; import { Fr } from '@aztec/foundation/fields'; import type { SiblingPath } from '@aztec/foundation/trees'; import { PublicDataWrite } from '@aztec/stdlib/avm'; -import type { L2Block } from '@aztec/stdlib/block'; +import { L2Block } from '@aztec/stdlib/block'; import { DatabaseVersion, DatabaseVersionManager } from '@aztec/stdlib/database-version'; import type { MerkleTreeLeafType, MerkleTreeWriteOperations } from '@aztec/stdlib/interfaces/server'; import { makeContentCommitment, makeGlobalVariables } from '@aztec/stdlib/testing'; @@ -832,6 +832,80 @@ describe('NativeWorldState', () => { }); }); + describe('Invalid Blocks', () => { + let ws: NativeWorldStateService; + let rollupAddress!: EthAddress; + + beforeEach(async () => { + rollupAddress = EthAddress.random(); + ws = await NativeWorldStateService.new(rollupAddress, dataDir, wsTreeMapSizes); + }); + + afterEach(async () => { + await ws.close(); + }); + + it('handles invalid blocks', async () => { + const fork = await ws.fork(); + + // Insert a few blocks + for (let i = 0; i < 4; i++) { + const blockNumber = i + 1; + const provenBlock = blockNumber - 2; + const { block, messages } = await mockBlock(blockNumber, 1, fork); + const status = await ws.handleL2BlockAndMessages(block, messages); + + expect(status.summary.unfinalizedBlockNumber).toBe(BigInt(blockNumber)); + expect(status.summary.oldestHistoricalBlock).toBe(1n); + + if (provenBlock > 0) { + const provenStatus = await ws.setFinalized(BigInt(provenBlock)); + expect(provenStatus.unfinalizedBlockNumber).toBe(BigInt(blockNumber)); + expect(provenStatus.finalizedBlockNumber).toBe(BigInt(provenBlock)); + expect(provenStatus.oldestHistoricalBlock).toBe(1n); + } else { + expect(status.summary.finalizedBlockNumber).toBe(0n); + } + } + + // Now build an invalid block, see that it is rejected and that we can then insert the correct block + { + const { block: block, messages } = await mockBlock(5, 1, fork); + const invalidBlock = L2Block.fromBuffer(block.toBuffer()); + invalidBlock.header.state.partial.nullifierTree.root = Fr.random(); + + await expect(ws.handleL2BlockAndMessages(invalidBlock, messages)).rejects.toThrow( + "Can't synch block: block state does not match world state", + ); + + // Accepts the correct block + await expect(ws.handleL2BlockAndMessages(block, messages)).resolves.toBeDefined(); + + const summary = await ws.getStatusSummary(); + expect(summary.unfinalizedBlockNumber).toBe(5n); + expect(summary.finalizedBlockNumber).toBe(2n); + expect(summary.oldestHistoricalBlock).toBe(1n); + } + + // Now we push another invalid block, see that it is rejected and check we can unwind to the last proven block + { + const { block: block, messages } = await mockBlock(6, 1, fork); + const invalidBlock = L2Block.fromBuffer(block.toBuffer()); + invalidBlock.header.state.partial.nullifierTree.root = Fr.random(); + + await expect(ws.handleL2BlockAndMessages(invalidBlock, messages)).rejects.toThrow( + "Can't synch block: block state does not match world state", + ); + + // Now we want to unwind to the last proven block + const unwindStatus = await ws.unwindBlocks(2n); + expect(unwindStatus.summary.unfinalizedBlockNumber).toBe(2n); + expect(unwindStatus.summary.finalizedBlockNumber).toBe(2n); + expect(unwindStatus.summary.oldestHistoricalBlock).toBe(1n); + } + }); + }); + describe('Finding leaves', () => { let block: L2Block; let messages: Fr[];