diff --git a/yarn-project/archiver/src/archiver-store.test.ts b/yarn-project/archiver/src/archiver-store.test.ts index 0a4dca6963c6..d5cd45f3a3fa 100644 --- a/yarn-project/archiver/src/archiver-store.test.ts +++ b/yarn-project/archiver/src/archiver-store.test.ts @@ -71,6 +71,7 @@ describe('Archiver Store', () => { slotDuration: 24, ethereumSlotDuration: 12, proofSubmissionEpochs: 1, + targetCommitteeSize: 48, genesisArchiveRoot: new Fr(GENESIS_ARCHIVE_ROOT), }; diff --git a/yarn-project/archiver/src/archiver-sync.test.ts b/yarn-project/archiver/src/archiver-sync.test.ts index b2ffa92029dc..ab5ae6f9d2bd 100644 --- a/yarn-project/archiver/src/archiver-sync.test.ts +++ b/yarn-project/archiver/src/archiver-sync.test.ts @@ -73,6 +73,7 @@ describe('Archiver Sync', () => { slotDuration: 24, ethereumSlotDuration: DefaultL1ContractsConfig.ethereumSlotDuration, proofSubmissionEpochs: 1, + targetCommitteeSize: 48, genesisArchiveRoot: GENESIS_ROOT, }; diff --git a/yarn-project/archiver/src/factory.ts b/yarn-project/archiver/src/factory.ts index 111b10d54fce..eeb5090c406e 100644 --- a/yarn-project/archiver/src/factory.ts +++ b/yarn-project/archiver/src/factory.ts @@ -77,14 +77,21 @@ export async function createArchiver( const inbox = new InboxContract(publicClient, config.l1Contracts.inboxAddress); // Fetch L1 constants from rollup contract - const [l1StartBlock, l1GenesisTime, proofSubmissionEpochs, genesisArchiveRoot, slashingProposerAddress] = - await Promise.all([ - rollup.getL1StartBlock(), - rollup.getL1GenesisTime(), - rollup.getProofSubmissionEpochs(), - rollup.getGenesisArchiveTreeRoot(), - rollup.getSlashingProposerAddress(), - ] as const); + const [ + l1StartBlock, + l1GenesisTime, + proofSubmissionEpochs, + genesisArchiveRoot, + slashingProposerAddress, + targetCommitteeSize, + ] = await Promise.all([ + rollup.getL1StartBlock(), + rollup.getL1GenesisTime(), + rollup.getProofSubmissionEpochs(), + rollup.getGenesisArchiveTreeRoot(), + rollup.getSlashingProposerAddress(), + rollup.getTargetCommitteeSize(), + ] as const); const l1StartBlockHash = await publicClient .getBlock({ blockNumber: l1StartBlock, includeTransactions: false }) @@ -100,6 +107,7 @@ export async function createArchiver( slotDuration, ethereumSlotDuration, proofSubmissionEpochs: Number(proofSubmissionEpochs), + targetCommitteeSize, genesisArchiveRoot: Fr.fromString(genesisArchiveRoot.toString()), }; diff --git a/yarn-project/aztec-node/src/sentinel/sentinel.test.ts b/yarn-project/aztec-node/src/sentinel/sentinel.test.ts index 117e71bfddc0..9a939a74b2b8 100644 --- a/yarn-project/aztec-node/src/sentinel/sentinel.test.ts +++ b/yarn-project/aztec-node/src/sentinel/sentinel.test.ts @@ -75,6 +75,7 @@ describe('sentinel', () => { epochDuration: 8, ethereumSlotDuration: 12, proofSubmissionEpochs: 1, + targetCommitteeSize: 48, }; epochCache.getEpochAndSlotNow.mockReturnValue({ epoch, slot, ts, nowMs: ts * 1000n }); diff --git a/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts b/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts index 9860e443c417..894e488e0802 100644 --- a/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts +++ b/yarn-project/end-to-end/src/e2e_epochs/epochs_test.ts @@ -194,6 +194,7 @@ export class EpochsTestContext { l1GenesisTime: await this.rollup.getL1GenesisTime(), ethereumSlotDuration, proofSubmissionEpochs: Number(await this.rollup.getProofSubmissionEpochs()), + targetCommitteeSize: await this.rollup.getTargetCommitteeSize(), }; this.logger.info( diff --git a/yarn-project/epoch-cache/src/epoch_cache.test.ts b/yarn-project/epoch-cache/src/epoch_cache.test.ts index 946dc1cc9b93..555cf707f2bf 100644 --- a/yarn-project/epoch-cache/src/epoch_cache.test.ts +++ b/yarn-project/epoch-cache/src/epoch_cache.test.ts @@ -72,6 +72,7 @@ describe('EpochCache', () => { ethereumSlotDuration: SLOT_DURATION, epochDuration: EPOCH_DURATION, proofSubmissionEpochs: 1, + targetCommitteeSize: 48, lagInEpochsForValidatorSet: 2, lagInEpochsForRandao: 2, }; diff --git a/yarn-project/epoch-cache/src/epoch_cache.ts b/yarn-project/epoch-cache/src/epoch_cache.ts index 919f8b529d62..ec197341406a 100644 --- a/yarn-project/epoch-cache/src/epoch_cache.ts +++ b/yarn-project/epoch-cache/src/epoch_cache.ts @@ -45,6 +45,7 @@ export interface EpochCacheInterface { getRegisteredValidators(): Promise; isInCommittee(slot: SlotTag, validator: EthAddress): Promise; filterInCommittee(slot: SlotTag, validators: EthAddress[]): Promise; + getL1Constants(): L1RollupConstants; } /** @@ -106,6 +107,7 @@ export class EpochCache implements EpochCacheInterface { epochDuration, lagInEpochsForValidatorSet, lagInEpochsForRandao, + targetCommitteeSize, ] = await Promise.all([ rollup.getL1StartBlock(), rollup.getL1GenesisTime(), @@ -114,6 +116,7 @@ export class EpochCache implements EpochCacheInterface { rollup.getEpochDuration(), rollup.getLagInEpochsForValidatorSet(), rollup.getLagInEpochsForRandao(), + rollup.getTargetCommitteeSize(), ] as const); const l1RollupConstants = { @@ -125,6 +128,7 @@ export class EpochCache implements EpochCacheInterface { ethereumSlotDuration: config.ethereumSlotDuration, lagInEpochsForValidatorSet: Number(lagInEpochsForValidatorSet), lagInEpochsForRandao: Number(lagInEpochsForRandao), + targetCommitteeSize: Number(targetCommitteeSize), }; return new EpochCache(rollup, l1RollupConstants, deps.dateProvider); diff --git a/yarn-project/epoch-cache/src/test/test_epoch_cache.ts b/yarn-project/epoch-cache/src/test/test_epoch_cache.ts index a3c184e00e89..04e8c0afdd72 100644 --- a/yarn-project/epoch-cache/src/test/test_epoch_cache.ts +++ b/yarn-project/epoch-cache/src/test/test_epoch_cache.ts @@ -13,6 +13,7 @@ const DEFAULT_L1_CONSTANTS: L1RollupConstants = { epochDuration: 16, ethereumSlotDuration: 12, proofSubmissionEpochs: 2, + targetCommitteeSize: 48, }; /** diff --git a/yarn-project/ethereum/src/contracts/rollup.ts b/yarn-project/ethereum/src/contracts/rollup.ts index 789ea303cef6..30a1ba33ef84 100644 --- a/yarn-project/ethereum/src/contracts/rollup.ts +++ b/yarn-project/ethereum/src/contracts/rollup.ts @@ -391,20 +391,24 @@ export class RollupContract { slotDuration: number; epochDuration: number; proofSubmissionEpochs: number; + targetCommitteeSize: number; }> { - const [l1StartBlock, l1GenesisTime, slotDuration, epochDuration, proofSubmissionEpochs] = await Promise.all([ - this.getL1StartBlock(), - this.getL1GenesisTime(), - this.getSlotDuration(), - this.getEpochDuration(), - this.getProofSubmissionEpochs(), - ]); + const [l1StartBlock, l1GenesisTime, slotDuration, epochDuration, proofSubmissionEpochs, targetCommitteeSize] = + await Promise.all([ + this.getL1StartBlock(), + this.getL1GenesisTime(), + this.getSlotDuration(), + this.getEpochDuration(), + this.getProofSubmissionEpochs(), + this.getTargetCommitteeSize(), + ]); return { l1StartBlock, l1GenesisTime, slotDuration, epochDuration: Number(epochDuration), proofSubmissionEpochs: Number(proofSubmissionEpochs), + targetCommitteeSize, }; } diff --git a/yarn-project/p2p/src/client/test/p2p_client.integration_batch_txs.test.ts b/yarn-project/p2p/src/client/test/p2p_client.integration_batch_txs.test.ts index a2d03ca7bd89..a8e926c6c956 100644 --- a/yarn-project/p2p/src/client/test/p2p_client.integration_batch_txs.test.ts +++ b/yarn-project/p2p/src/client/test/p2p_client.integration_batch_txs.test.ts @@ -64,6 +64,15 @@ describe('p2p client integration batch txs', () => { //@ts-expect-error - we want to mock the getEpochAndSlotInNextL1Slot method, mocking ts is enough epochCache.getEpochAndSlotInNextL1Slot.mockReturnValue({ ts: BigInt(0) }); epochCache.getRegisteredValidators.mockResolvedValue([]); + epochCache.getL1Constants.mockReturnValue({ + l1StartBlock: 0n, + l1GenesisTime: 0n, + slotDuration: 24, + epochDuration: 16, + ethereumSlotDuration: 12, + proofSubmissionEpochs: 2, + targetCommitteeSize: 48, + }); txPool.hasTxs.mockResolvedValue([]); txPool.getAllTxs.mockImplementation(() => { diff --git a/yarn-project/p2p/src/client/test/p2p_client.integration_block_txs.test.ts b/yarn-project/p2p/src/client/test/p2p_client.integration_block_txs.test.ts index acb4945a5258..b83eccda403f 100644 --- a/yarn-project/p2p/src/client/test/p2p_client.integration_block_txs.test.ts +++ b/yarn-project/p2p/src/client/test/p2p_client.integration_block_txs.test.ts @@ -56,6 +56,15 @@ describe('p2p client integration block txs protocol ', () => { //@ts-expect-error - we want to mock the getEpochAndSlotInNextL1Slot method, mocking ts is enough epochCache.getEpochAndSlotInNextL1Slot.mockReturnValue({ ts: BigInt(0) }); epochCache.getRegisteredValidators.mockResolvedValue([]); + epochCache.getL1Constants.mockReturnValue({ + l1StartBlock: 0n, + l1GenesisTime: 0n, + slotDuration: 24, + epochDuration: 16, + ethereumSlotDuration: 12, + proofSubmissionEpochs: 2, + targetCommitteeSize: 48, + }); txPool.isEmpty.mockResolvedValue(true); txPool.hasTxs.mockResolvedValue([]); diff --git a/yarn-project/p2p/src/client/test/p2p_client.integration_message_propagation.test.ts b/yarn-project/p2p/src/client/test/p2p_client.integration_message_propagation.test.ts index d1a46b5dc807..b1c8c22a3d83 100644 --- a/yarn-project/p2p/src/client/test/p2p_client.integration_message_propagation.test.ts +++ b/yarn-project/p2p/src/client/test/p2p_client.integration_message_propagation.test.ts @@ -59,6 +59,15 @@ describe('p2p client integration message propagation', () => { //@ts-expect-error - we want to mock the getEpochAndSlotInNextL1Slot method, mocking ts is enough epochCache.getEpochAndSlotInNextL1Slot.mockReturnValue({ ts: BigInt(0) }); epochCache.getRegisteredValidators.mockResolvedValue([]); + epochCache.getL1Constants.mockReturnValue({ + l1StartBlock: 0n, + l1GenesisTime: 0n, + slotDuration: 24, + epochDuration: 16, + ethereumSlotDuration: 12, + proofSubmissionEpochs: 2, + targetCommitteeSize: 48, + }); txPool.isEmpty.mockResolvedValue(true); txPool.hasTxs.mockResolvedValue([]); diff --git a/yarn-project/p2p/src/client/test/p2p_client.integration_status_handshake.test.ts b/yarn-project/p2p/src/client/test/p2p_client.integration_status_handshake.test.ts index e88109d37ffb..91c23abd573d 100644 --- a/yarn-project/p2p/src/client/test/p2p_client.integration_status_handshake.test.ts +++ b/yarn-project/p2p/src/client/test/p2p_client.integration_status_handshake.test.ts @@ -47,6 +47,15 @@ describe('p2p client integration status handshake', () => { //@ts-expect-error - we want to mock the getEpochAndSlotInNextL1Slot method, mocking ts is enough epochCache.getEpochAndSlotInNextL1Slot.mockReturnValue({ ts: BigInt(0) }); epochCache.getRegisteredValidators.mockResolvedValue([]); + epochCache.getL1Constants.mockReturnValue({ + l1StartBlock: 0n, + l1GenesisTime: 0n, + slotDuration: 24, + epochDuration: 16, + ethereumSlotDuration: 12, + proofSubmissionEpochs: 2, + targetCommitteeSize: 48, + }); txPool.isEmpty.mockResolvedValue(true); attestationPool.isEmpty.mockResolvedValue(true); diff --git a/yarn-project/p2p/src/client/test/p2p_client.integration_txs.test.ts b/yarn-project/p2p/src/client/test/p2p_client.integration_txs.test.ts index d6d628215d7f..4eff7c08ed66 100644 --- a/yarn-project/p2p/src/client/test/p2p_client.integration_txs.test.ts +++ b/yarn-project/p2p/src/client/test/p2p_client.integration_txs.test.ts @@ -49,6 +49,15 @@ describe('p2p client integration', () => { //@ts-expect-error - we want to mock the getEpochAndSlotInNextL1Slot method, mocking ts is enough epochCache.getEpochAndSlotInNextL1Slot.mockReturnValue({ ts: BigInt(0) }); epochCache.getRegisteredValidators.mockResolvedValue([]); + epochCache.getL1Constants.mockReturnValue({ + l1StartBlock: 0n, + l1GenesisTime: 0n, + slotDuration: 24, + epochDuration: 16, + ethereumSlotDuration: 12, + proofSubmissionEpochs: 2, + targetCommitteeSize: 48, + }); txPool.isEmpty.mockResolvedValue(true); txPool.hasTxs.mockResolvedValue([]); diff --git a/yarn-project/p2p/src/config.ts b/yarn-project/p2p/src/config.ts index e8124b6ad882..e9b44881d873 100644 --- a/yarn-project/p2p/src/config.ts +++ b/yarn-project/p2p/src/config.ts @@ -13,7 +13,13 @@ import { Fr } from '@aztec/foundation/curves/bn254'; import { type DataStoreConfig, dataConfigMappings } from '@aztec/kv-store/config'; import { FunctionSelector } from '@aztec/stdlib/abi/function-selector'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; -import { type AllowedElement, type ChainConfig, chainConfigMappings } from '@aztec/stdlib/config'; +import { + type AllowedElement, + type ChainConfig, + type SequencerConfig, + chainConfigMappings, + sharedSequencerConfigMappings, +} from '@aztec/stdlib/config'; import { type BatchTxRequesterConfig, @@ -31,7 +37,8 @@ export interface P2PConfig BatchTxRequesterConfig, ChainConfig, TxCollectionConfig, - TxFileStoreConfig { + TxFileStoreConfig, + Pick { /** A flag dictating whether the P2P subsystem should be enabled. */ p2pEnabled: boolean; @@ -441,6 +448,7 @@ export const p2pConfigMappings: ConfigMappingsType = { 'Whether to run in fisherman mode: validates all proposals and attestations but does not broadcast attestations or participate in consensus.', ...booleanConfigHelper(false), }, + ...sharedSequencerConfigMappings, ...p2pReqRespConfigMappings, ...batchTxRequesterConfigMappings, ...chainConfigMappings, diff --git a/yarn-project/p2p/src/services/gossipsub/README.md b/yarn-project/p2p/src/services/gossipsub/README.md new file mode 100644 index 000000000000..e7d132544b62 --- /dev/null +++ b/yarn-project/p2p/src/services/gossipsub/README.md @@ -0,0 +1,626 @@ +# Gossipsub Peer Scoring + +This module configures gossipsub peer scoring parameters for the Aztec P2P network. Peer scoring helps maintain network health by rewarding well-behaving peers and penalizing misbehaving ones. + +## Overview + +Gossipsub v1.1 introduces peer scoring to defend against various attacks and improve message propagation. Each peer accumulates a score based on their behavior, and peers with low scores may be pruned from the mesh or even disconnected. + +For the full specification, see: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring + +## Scoring Parameters + +The peer score is computed as a weighted sum of topic-specific and application-specific scores: + +``` +Score = TopicScore + AppSpecificScore + IPColocationPenalty + BehaviorPenalty +``` + +### Topic-Specific Parameters (P1-P4) + +Each topic has its own scoring parameters: + +| Parameter | Type | Description | +|-----------|------|-------------| +| **P1: timeInMesh** | Positive | Rewards peers for time spent in the mesh | +| **P2: firstMessageDeliveries** | Positive | Rewards peers who deliver messages first | +| **P3: meshMessageDeliveries** | Negative | Penalizes peers who under-deliver messages | +| **P3b: meshFailurePenalty** | Negative | Sticky penalty applied when pruned from mesh | +| **P4: invalidMessageDeliveries** | Negative | Penalizes peers who deliver invalid messages | + +### Our Configuration + +We configure all parameters (P1-P4) with values calculated dynamically from network configuration: + +| Parameter | Max Score | Configuration | +|-----------|-----------|---------------| +| P1: timeInMesh | +8 per topic | Slot-based, caps at 1 hour | +| P2: firstMessageDeliveries | +25 per topic | Convergence-based, fast decay | +| P3: meshMessageDeliveries | -34 per topic | Must exceed P1+P2 for pruning | +| P3b: meshFailurePenalty | -34 per topic | Sticky penalty after pruning | +| P4: invalidMessageDeliveries | -20 per message | Attack detection | + +**Important:** P1 and P2 are only enabled on topics with P3 enabled (block_proposal, checkpoint_proposal, checkpoint_attestation). The tx topic has all scoring disabled except P4, to prevent free positive score accumulation that would offset penalties from other topics. + +## Exponential Decay + +All counters in gossipsub use exponential decay. Each heartbeat (default: 700ms), counters are multiplied by a decay factor: + +``` +counter = counter * decay +``` + +### Multi-Slot Decay Windows + +For low-frequency topics (like 1 message per 72-second slot), naive decay would cause counters to drop to near-zero before the next message arrives. Instead, we use **multi-slot decay windows**: + +| Frequency | Decay Window | +|-----------|--------------| +| <= 1 msg/slot | 5 slots | +| 2-10 msg/slot | 3 slots | +| > 10 msg/slot | 2 slots | + +### Decay Factor Calculation + +To decay to 1% of the original value over the decay window: + +```typescript +heartbeatsPerSlot = slotDurationMs / heartbeatIntervalMs +heartbeatsInWindow = heartbeatsPerSlot * decayWindowSlots +decay = 0.01 ^ (1 / heartbeatsInWindow) +``` + +**Example** (72s slot, 700ms heartbeat, 5-slot decay window): +``` +heartbeatsPerSlot = 72000 / 700 ≈ 103 +heartbeatsInWindow = 103 * 5 = 515 +decay = 0.01^(1/515) ≈ 0.991 +``` + +## Convergence and Thresholds + +### Convergence (Steady-State Value) + +If messages arrive at a constant rate, the decaying counter converges to: + +```typescript +messagesPerHeartbeat = expectedPerSlot * (heartbeatMs / slotDurationMs) +convergence = messagesPerHeartbeat / (1 - decay) +``` + +### Threshold Calculation + +The P3 threshold determines when penalties apply. We use a conservative threshold at 30% of convergence to avoid penalizing honest peers experiencing normal variance: + +```typescript +threshold = convergence * 0.3 +``` + +## meshMessageDeliveriesWindow + +This parameter determines how long after validating a message other peers can still receive credit for delivering it. + +**How it works:** +1. Peer A delivers a message first +2. We validate the message +3. Timer starts for `meshMessageDeliveriesWindow` duration (5 seconds) +4. Any mesh peer delivering within this window gets credit + +**Why 5 seconds?** + +The [gossipsub v1.1 spec](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) recommends this window be "small (in the order of milliseconds)" to prevent peers from gaming P3 scores by simply replaying messages back. A peer can echo a message within ~100ms, so a large window allows score inflation. + +However, real-world implementations use significantly larger values due to practical constraints: +- **Prysm** (Go): 2 seconds - the go-libp2p default for low-latency Go runtime +- **Lodestar** (TypeScript): 12 seconds - accounts for JavaScript I/O lag +- **Ethereum spec proposal**: 400ms was proposed but rejected as "too tight" + +We use **5 seconds** as a balanced middle ground because: +1. **Runtime considerations**: Our implementation is TypeScript (like Lodestar), not Go (like Prysm). JavaScript has higher I/O latency due to single-threaded event loop and garbage collection pauses. +2. **Network variance**: Even on healthy networks, message propagation can vary due to: + - Concurrent validation of multiple messages + - CPU-intensive proof verification + - Network congestion during high transaction volume + - Geographic distribution of validators +3. **Conservative but not excessive**: 5s is 2.5× the Go default (allowing for JS overhead) but still well below Lodestar's 12s, maintaining reasonable protection against replay attacks. +4. **Attack mitigation**: A 5s window still prevents score gaming - peers would need to consistently echo messages within 5s to maintain positive P3 scores, which requires them to stay connected and somewhat functional. + +## meshMessageDeliveriesActivation + +This is the grace period before P3 penalties can be applied to a peer. During this time, the message delivery counter accumulates without any penalty. + +**Why activation is 5× the decay window:** + +We set activation time to **5× the decay window** (10-25 slots depending on topic frequency) because: + +1. **Timer starts at mesh join, not first message**: The activation countdown begins when a peer joins the mesh, not when they receive their first message. During network bootstrap, peers may join before any messages are flowing. + +2. **Bootstrap grace period**: When the network is starting up, message flow may be delayed. Peers need time for the network to stabilize and messages to start propagating. + +3. **Counter convergence**: The threshold is set at 30% of the *converged* counter value. If activation is too short, the counter hasn't approached convergence yet, and honest peers could be penalized unfairly. + +4. **Join timing variance**: Peers may join at any point during a slot. With longer activation time, even peers joining at an unlucky time will have accumulated enough messages before penalties start. + +5. **Ethereum precedent**: Ethereum's Lodestar implementation uses very long activation times (1-2 epochs ≈ 16-32 slots) for similar reasons. + +| Topic | Decay Window | Activation Time (5×) | +|-------|--------------|----------------------| +| checkpoint_proposal | 5 slots (360s) | 25 slots (1800s / 30min) | +| block_proposal | 3 slots (216s) | 15 slots (1080s / 18min) | +| checkpoint_attestation | 2 slots (144s) | 10 slots (720s / 12min) | + +## P1: Time in Mesh (Positive Score) + +P1 rewards peers for time spent in the mesh. We use Lodestar-style slot-based normalization: + +```typescript +timeInMeshQuantum = slotDurationMs // Score increases by ~1 per slot +timeInMeshCap = 3600 / slotDurationSeconds // Cap at 1 hour (50 slots for 72s slots) +timeInMeshWeight = MAX_P1_SCORE / cap // Normalized so max P1 = 8 +``` + +**Key properties:** +- Score increases gradually: ~1 per slot of mesh membership +- Caps at 1 hour: prevents runaway positive scores +- Resets on mesh leave: no credit carried after pruning + +**Example (72s slots):** +- After 10 minutes in mesh: P1 ≈ 1.3 +- After 30 minutes in mesh: P1 ≈ 4 +- After 1 hour in mesh: P1 = 8 (max) + +## P2: First Message Deliveries (Positive Score) + +P2 rewards peers who deliver messages first to us. We use convergence-based normalization: + +```typescript +firstMessageDeliveriesDecay = computeDecay(2 slots) // Fast decay +firstMessageDeliveriesCap = convergence(1 msg/heartbeat) +firstMessageDeliveriesWeight = MAX_P2_SCORE / cap // Normalized so max P2 = 25 +``` + +**Key properties:** +- Fast decay (2 slots): rewards recent behavior, not historical +- Caps at convergence: prevents score inflation from bursts +- Resets quickly after mesh leave: decays to near-zero over ~2 slots (e.g., ~144s with 72s slots) + +## P3 Weight Formula + +The P3 weight is calculated to ensure the max penalty equals `MAX_P3_PENALTY_PER_TOPIC` (-34): + +```typescript +// Weight formula: max_penalty / threshold² +meshMessageDeliveriesWeight = MAX_P3_PENALTY_PER_TOPIC / (threshold * threshold) + +// When peer delivers nothing (deficit = threshold): +// penalty = deficit² × weight = threshold² × (-34 / threshold²) = -34 +``` + +This ensures P3 max penalty (-34) exceeds P1 + P2 max (+33), causing mesh pruning. + +## Per-Topic Configuration + +### Topic Types and Expected Rates + +| Topic | Expected/Slot | Decay Window | Notes | +|-------|--------------|--------------|-------| +| `tx` | Unpredictable | N/A | P3/P3b disabled | +| `block_proposal` | N-1 | 3 slots | N = blocks per slot (MBPS mode) | +| `checkpoint_proposal` | 1 | 5 slots | One per slot | +| `checkpoint_attestation` | C (~48) | 2 slots | C = committee size | + +### Transactions (tx) + +Transactions are submitted unpredictably by users, so we cannot set meaningful delivery thresholds. **All scoring (P1, P2, P3, P3b) is disabled** for this topic except P4 (invalid message detection). + +**Rationale:** If P1/P2 were enabled without P3, the tx topic would contribute free positive scores that could offset penalties from other topics, preventing proper mesh pruning of non-contributing peers. + +### Block Proposals (block_proposal) + +In Multi-Block-Per-Slot (MBPS) mode, N-1 block proposals are gossiped per slot (the last block is bundled with the checkpoint). In single-block mode, this is 0. + +### Checkpoint Proposals (checkpoint_proposal) + +Exactly one checkpoint proposal per slot, containing the final block and proof commitments. + +### Checkpoint Attestations (checkpoint_attestation) + +Each committee member sends one attestation per slot. With a target committee size of 48, we expect ~48 attestations per slot. + +### Topic Weights + +All topics use equal weight (1). Block proposals contain transaction hashes, so transactions must propagate for block proposals to validate - making all message types equally important for network health. + +## Configuration Dependencies + +The scoring parameters depend on: + +| Parameter | Source | Default | +|-----------|--------|---------| +| `slotDuration` | L1RollupConstants | 72s | +| `targetCommitteeSize` | L1RollupConstants | 48 | +| `heartbeatInterval` | P2PConfig.gossipsubInterval | 700ms | +| `blockDurationMs` | P2PConfig.blockDurationMs | undefined (single block) | + +## Invalid Message Handling (P4) + +P4 penalizes peers who deliver invalid messages. All topics have this enabled with: +- Weight: -20 +- Decay: Over 4 slots + +Invalid messages include malformed data, invalid signatures, or messages failing validation. + +## Tuning Guidelines + +### Signs of Too-Strict Scoring + +- Honest peers frequently pruned from mesh +- High peer churn +- Slow message propagation despite good network + +**Solution:** Increase thresholds, use longer decay windows + +### Signs of Too-Lenient Scoring + +- Slow or stalled message propagation +- Bad peers remaining in mesh too long +- Network vulnerable to eclipse attacks + +**Solution:** Decrease thresholds, use shorter decay windows + +### Monitoring + +Key metrics to monitor: +- Peer scores distribution +- P3 penalty frequency per topic +- Invalid message rate per peer +- Mesh membership stability + +## Code Structure + +- `scoring.ts` - Global peer score thresholds +- `topic_score_params.ts` - Per-topic parameter calculation +- `index.ts` - Module exports + +## Global Score Thresholds + +Gossipsub uses global thresholds to determine peer behavior based on total score: + +| Threshold | Value | Effect | +|-----------|-------|--------| +| gossipThreshold | -500 | Below this, peer doesn't receive gossip | +| publishThreshold | -1000 | Below this, peer's messages aren't relayed | +| graylistThreshold | -2000 | Below this, all RPCs from peer are ignored | + +### Alignment with Application-Level Scoring + +The thresholds are designed to align with Aztec's application-level peer scoring: + +``` +Total Gossipsub Score = TopicScore + (AppScore × AppSpecificWeight) +``` + +With `appSpecificWeight = 10` (topic score assumed ~0): + +| App Score State | App Score | Gossipsub Contribution | Threshold Triggered | +|-----------------|-----------|------------------------|---------------------| +| Healthy | 0 to -49 | 0 to -490 | None | +| Disconnect | -50 | -500 | gossipThreshold | +| Ban | -100 | -1000 | publishThreshold | + +This means (best-effort alignment): +- When a peer reaches **Disconnect** state, they generally stop receiving gossip +- When a peer reaches **Ban** state, their messages are generally not relayed +- **Graylist** requires ban-level score PLUS significant topic penalties (attacks) + +**Important:** Positive topic scores (P1/P2) can temporarily offset app penalties, so alignment is not strict. +Conversely, if topic scores are low, a peer slightly above the disconnect threshold may still dip below `gossipThreshold`. This is acceptable and tends to recover quickly as topic scores accumulate. + +### Topic Score Contribution + +Topic scores provide **burst response** to attacks, while app score provides **stable baseline**: + +- P1 (time in mesh): Max +8 per topic (+24 across 3 topics) +- P2 (first deliveries): Max +25 per topic (+75 across 3 topics, but decays fast) +- P3 (under-delivery): Max -34 per topic (-102 across 3 topics in MBPS; -68 in single-block mode) +- P4 (invalid messages): -20 per invalid message, can spike to -2000+ during attacks + +Example attack scenario: +- App score: -100 (banned) → -1000 gossipsub +- P4 burst (10 invalid messages): -2000 per topic +- **Total: -3000+** → Triggers graylistThreshold + +The P4 penalty decays to 1% over 4 slots (~5 minutes), allowing recovery if the attack stops. + +## Non-Contributing Peers + +### How P3 Handles Under-Delivery + +The P3 (meshMessageDeliveries) penalty applies when a peer's message delivery counter falls below the threshold. The penalty formula is: + +``` +deficit = max(0, threshold - counter) +penalty = deficit² × weight +``` + +Where `weight = MAX_P3_PENALTY_PER_TOPIC / (threshold × threshold)`. This design ensures: + +``` +If counter = 0 (delivers nothing): + deficit = threshold + penalty = threshold² × (-34/threshold²) = -34 per topic +``` + +### Score Balance for Mesh Pruning + +For a peer to be pruned from the mesh, their **topic score** must be negative. We balance P1/P2/P3 so that non-contributors get pruned: + +| Scenario | P1 | P2 | P3 | Topic Score | Result | +|----------|----|----|-----|-------------|--------| +| Healthy peer (delivering) | +8 | +25 | 0 | +33 | In mesh | +| New peer (just joined) | +1 | +5 | 0 | +6 | In mesh | +| Non-contributor (1 hour in mesh) | +8 | 0 | -34 | **-26** | **Pruned** | +| Non-contributor (new) | +1 | 0 | -34 | **-33** | **Pruned** | + +The key insight: **P3 max (-34) exceeds P1 + P2 max (+33)**, so even a peer that has been in the mesh for 1 hour will still be pruned if they stop delivering messages. + +### What Happens After Pruning + +When a peer is pruned from the mesh: + +1. **P1 resets to 0**: The timeInMesh counter is cleared +2. **P2 decays to 0**: Fast decay (2-slot window) makes it negligible over minutes +3. **P3b captures the penalty**: The P3 deficit at prune time becomes P3b, which decays slowly + +After pruning, the peer's score consists mainly of P3b: +- **Total P3b across 3 topics: -102** (max) +- **Recovery time**: P3b decays to ~1% over one decay window (2-5 slots = 2-6 minutes) +- **Grafting eligibility**: Peer can be grafted when score ≥ 0, but asymptotic decay means recovery is slow + +### Why Non-Contributors Aren't Disconnected + +With P3b capped at -102 total after pruning (MBPS mode). In single-block mode, the cap is -68: + +| Threshold | Value | P3b Score | Triggered? | +|-----------|-------|-----------|------------| +| gossipThreshold | -500 | -102 (MBPS) / -68 (single) | No | +| publishThreshold | -1000 | -102 (MBPS) / -68 (single) | No | +| graylistThreshold | -2000 | -102 (MBPS) / -68 (single) | No | + +**A score of -102 (MBPS) or -68 (single-block) is well above -500**, so non-contributing peers: +- Are pruned from mesh (good - stops them slowing propagation) +- Still receive gossip (can recover by reconnecting/restarting) +- Are NOT disconnected unless they also have application-level penalties + +### Design Philosophy + +The system distinguishes between: + +| Peer Type | Score Range | Effect | +|-----------|-------------|--------| +| **Productive** | ≥ 0 | Full mesh participation | +| **Unproductive** | -1 to -499 | Pruned from mesh, still receives gossip | +| **Misbehaving** | -500 to -999 | Stops receiving gossip (app: Disconnect) | +| **Malicious** | -1000 to -1999 | Cannot publish (app: Banned) | +| **Attacking** | ≤ -2000 | Graylisted, all RPCs ignored | + +Note: These ranges are approximate; positive topic scores can shift a peer upward temporarily. + +This is similar to Ethereum's approach: non-contributing peers are removed from the mesh (preventing them from slowing propagation) but not disconnected, as they might be starting up or experiencing temporary connectivity issues. + +### When Non-Contributors ARE Penalized + +Non-contributors will trigger thresholds if they also: +1. **Send invalid messages**: P4 penalty of -20 per invalid message accumulates quickly +2. **Fail protocol validation**: Application penalties for deserialization errors, manipulation attempts +3. **Violate rate limits**: Repeated per-peer limit hits accumulate application penalties + +## Application-Level Penalties + +Beyond gossipsub's topic scoring, Aztec has application-level penalties for protocol violations: + +### Penalty Severities + +| Severity | Points | Errors to Disconnect | Errors to Ban | +|----------|--------|----------------------|---------------| +| **HighToleranceError** | 2 | 25 | 50 | +| **MidToleranceError** | 10 | 5 | 10 | +| **LowToleranceError** | 50 | 1 | 2 | + +### What Triggers Each Severity + +**HighToleranceError (2 points)** - Transient issues: +- Rate limit exceeded +- Failed responses (FAILURE/UNKNOWN status) +- Recent double spend attempts (within penalty window) + +**MidToleranceError (10 points)** - Protocol violations: +- Block/checkpoint exceeds per-slot cap +- Response hash mismatches +- Duplicate transactions in response +- Unrequested transactions in response + +**LowToleranceError (50 points)** - Serious violations: +- Message deserialization errors +- Invalid message manipulation attempts +- Block number/order mismatches +- Invalid transactions +- Badly formed requests +- Confirmed double spends + +### Score Decay + +Application scores decay by 10% per minute (`decayFactor = 0.9`): +- Score -100 → -90 after 1 minute +- Score -100 → -35 after 10 minutes +- Score -100 → -12 after 20 minutes + +This allows honest peers to recover from temporary issues. + +## Score Calculation Examples + +### Example 1: Honest Peer + +``` +App score: 0 +Topic P3: 0 (delivering messages) +Topic P4: 0 (no invalid messages) +───────────────────────────────── +Total: 0 → Full participation ✓ +``` + +### Example 2: Peer with Rate Limit Issues + +``` +App score: -20 (10 HighToleranceErrors) + → Gossipsub contribution: -200 +Topic P3: -1 (slightly under-delivering) +Topic P4: 0 +───────────────────────────────── +Total: -201 → Still receives gossip ✓ +``` + +### Example 3: Validation Failure + +``` +App score: -50 (1 LowToleranceError for invalid message) + → Gossipsub contribution: -500 +Topic P3: 0 +Topic P4: -20 (the invalid message) +───────────────────────────────── +Total: -520 → Stops receiving gossip (gossipThreshold = -500) + → Application disconnects peer +``` + +### Example 4: Banned Peer + +``` +App score: -100 (2 LowToleranceErrors) + → Gossipsub contribution: -1000 +Topic P3: -2 +Topic P4: -40 (2 invalid messages) +───────────────────────────────── +Total: -1042 → Cannot publish (publishThreshold = -1000) + → Application bans peer +``` + +### Example 5: Active Attack (Burst of Invalid Messages) + +``` +App score: -100 (banned) + → Gossipsub contribution: -1000 +Topic P3: -3 +Topic P4: -200 (10 invalid messages: 10 × -20) +───────────────────────────────── +Total: -1203 → Cannot publish (publishThreshold = -1000) + +If the attacker sends 100 invalid messages quickly: + +Topic P4: -2000 (100 invalid messages: 100 × -20) +───────────────────────────────── +Total: -3003 → Graylisted (graylistThreshold = -2000) + → All RPCs ignored +``` + +### Example 6: Recovery After Attack + +``` +Initial state: Total score -3003 + +After 4 slots (~5 min): + P4 decays to 1%: -2000 → -20 + App score unchanged: -1000 + Total: -1023 → Still banned, but no longer graylisted + +After 10 min: + App score decays: -100 → -35 → -350 contribution + P4 further decayed: ~-5 + Total: -358 → Above gossipThreshold, starting to recover +``` + +## Network Outage Analysis + +What happens when a peer experiences a network outage and stops delivering messages? + +### During the Outage + +While the peer is disconnected: + +1. **P3 penalty accumulates**: The message delivery counter decays toward 0, causing increasing P3 penalty +2. **Max P3 penalty reached**: Once counter drops below threshold, penalty hits -34 per topic (-102 total in MBPS; -68 single-block) +3. **Mesh pruning**: Topic score goes negative → peer is pruned from mesh +4. **P3b captures penalty**: The P3 deficit at prune time becomes P3b (sticky penalty) + +### Outage Timeline + +| Time | Event | Score Impact | +|------|-------|--------------| +| 0s | Outage begins | P3 = 0 | +| ~1 decay window (2-5 slots) | Counter decays below threshold | P3 starts decreasing | +| ~1-2 decay windows | Counter approaches 0 | P3 ≈ -34 per topic | +| ~1-2 decay windows | Peer pruned from mesh | P3b ≈ -34 per topic | +| Thereafter | P3b decays slowly | Recovery begins | + +Note: If the peer just joined the mesh, P3 penalties only start after +`meshMessageDeliveriesActivation` (10-25 slots depending on topic frequency). + +### Key Insight: No Application Penalties + +During a network outage, the peer: +- **Does NOT send invalid messages** → No P4 penalty +- **Does NOT violate protocols** → No application-level penalty +- **Only accumulates topic-level penalties** → Max -102 (P3b, MBPS) or -68 (single-block) + +This is the crucial difference from malicious behavior: + +| Scenario | App Score | Topic Score | Total | Threshold Hit | +|----------|-----------|-------------|-------|---------------| +| Network outage | 0 | -102 (MBPS) / -68 (single) | -102 / -68 | None | +| Validation failure | -50 | -20 | -520 | gossipThreshold | +| Malicious peer | -100 | -2000+ | -2100+ | graylistThreshold | + +### Recovery After Outage + +When the peer reconnects: + +1. **Peer re-joins mesh**: Can request graft (topic score must be > 0 for acceptance) +2. **P3b decays**: To ~1% over decay window (2-5 slots depending on topic) +3. **P1 restarts from 0**: timeInMesh counter begins accumulating +4. **P2 restarts from 0**: firstMessageDeliveries counter begins accumulating + +**Recovery timeline:** +- Immediate: Peer can attempt to re-graft +- ~3-5 minutes: P3b decays to near-zero +- ~10+ minutes: P1 builds up again (if staying in mesh) + +### Why This Design Works + +The system correctly distinguishes between: + +| Behavior | Treatment | +|----------|-----------| +| **Network issues** | Pruned from mesh (stops slowing propagation), can recover quickly | +| **Protocol violations** | Disconnected (gossipThreshold), must wait for app score decay | +| **Malicious activity** | Banned/graylisted, requires both app and topic score decay | + +A peer experiencing network problems will: +- Be temporarily removed from mesh propagation (good for network health) +- NOT be disconnected or banned (they haven't misbehaved) +- Recover automatically when connectivity returns +- Retain their connections for recovery + +This matches Ethereum's approach: **honest peers with temporary issues are inconvenienced but not punished**. + +### Rate Limiting During Outages + +Note: Simply not sending messages does NOT trigger rate limit penalties. Rate limits apply to: +- **Per-peer rate limit exceeded** → HighToleranceError (2 points) +- **Other protocol violations** → MidToleranceError or LowToleranceError depending on severity + +A peer that sends nothing receives no rate limit penalties. The only penalty for not delivering messages is P3, which is explicitly designed to be recoverable. + +## References + +- [Gossipsub v1.1 Specification](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) +- [Lighthouse Scoring Implementation](https://github.com/sigp/lighthouse/blob/stable/beacon_node/lighthouse_network/src/peer_manager/score.rs) +- [Lodestar Scoring Implementation](https://github.com/ChainSafe/lodestar/tree/unstable/packages/beacon-node/src/network/gossip) diff --git a/yarn-project/p2p/src/services/gossipsub/index.ts b/yarn-project/p2p/src/services/gossipsub/index.ts new file mode 100644 index 000000000000..95c85d269336 --- /dev/null +++ b/yarn-project/p2p/src/services/gossipsub/index.ts @@ -0,0 +1,2 @@ +export * from './scoring.js'; +export * from './topic_score_params.js'; diff --git a/yarn-project/p2p/src/services/gossipsub/scoring.ts b/yarn-project/p2p/src/services/gossipsub/scoring.ts index 3f118a2db141..7acbb94efb95 100644 --- a/yarn-project/p2p/src/services/gossipsub/scoring.ts +++ b/yarn-project/p2p/src/services/gossipsub/scoring.ts @@ -1,13 +1,37 @@ import type { PeerScoreThresholds } from '@chainsafe/libp2p-gossipsub/score'; /** - * The following params is implemented by Lighthouse at - * https://github.com/sigp/lighthouse/blob/b0ac3464ca5fb1e9d75060b56c83bfaf990a3d25/beacon_node/eth2_libp2p/src/behaviour/gossipsub_scoring_parameters.rs#L83 + * Weight applied to application-level peer scores before contributing to gossipsub score. + * + * Note: positive topic scores can partially offset app penalties, so alignment with + * app-level thresholds is best-effort rather than strict. + */ +export const APP_SPECIFIC_WEIGHT = 10; + +/** + * Gossipsub peer score thresholds aligned with application-level scoring. + * + * These thresholds work with appSpecificWeight=10 to align gossipsub behavior + * with application-level peer states (Healthy → Disconnect → Banned). + * + * Alignment: + * - gossipThreshold (-500): Matches Disconnect state (app score -50 × weight 10) + * - publishThreshold (-1000): Matches Ban state (app score -100 × weight 10) + * - graylistThreshold (-2000): For severe attacks (ban + topic penalties) + * + * The 1:2:4 ratio follows Lodestar's approach and gossipsub spec recommendations. + * + * @see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring */ export const gossipScoreThresholds: PeerScoreThresholds = { - gossipThreshold: -4000, - publishThreshold: -8000, - graylistThreshold: -16000, + /** Below this, peer is not gossiped to (matches Disconnect state) */ + gossipThreshold: -500, + /** Below this, self-published messages are not propagated to peer (matches Ban state) */ + publishThreshold: -1000, + /** Below this, all RPCs from peer are ignored (severe attack scenario) */ + graylistThreshold: -2000, + /** Above this, peer can offer peer exchange (PX) */ acceptPXThreshold: 100, + /** Above this, peer can be grafted to mesh opportunistically */ opportunisticGraftThreshold: 5, }; diff --git a/yarn-project/p2p/src/services/gossipsub/topic_score_params.test.ts b/yarn-project/p2p/src/services/gossipsub/topic_score_params.test.ts new file mode 100644 index 000000000000..fed55b4535ff --- /dev/null +++ b/yarn-project/p2p/src/services/gossipsub/topic_score_params.test.ts @@ -0,0 +1,492 @@ +import { TopicType, createTopicString } from '@aztec/stdlib/p2p'; + +import { describe, expect, it } from '@jest/globals'; + +import { + MAX_P3_PENALTY_PER_TOPIC, + TopicScoreParamsFactory, + calculateBlocksPerSlot, + computeConvergence, + computeDecay, + computeThreshold, + createAllTopicScoreParams, + createTopicScoreParamsForTopic, + getDecayWindowSlots, + getExpectedMessagesPerSlot, +} from './topic_score_params.js'; + +describe('Topic Score Params', () => { + // Standard network parameters for testing (matching production values) + const standardParams = { + slotDurationMs: 72000, // 72 seconds + heartbeatIntervalMs: 700, // 700ms gossipsub heartbeat + targetCommitteeSize: 48, + }; + + describe('calculateBlocksPerSlot', () => { + it('returns 1 when blockDurationMs is undefined (single block mode)', () => { + expect(calculateBlocksPerSlot(72000, undefined)).toBe(1); + }); + + it('returns 1 when blockDurationMs is 0', () => { + // Edge case - should treat 0 as undefined + expect(calculateBlocksPerSlot(72000, 0)).toBe(1); + }); + + it('calculates correct blocks per slot for MBPS mode', () => { + // With 72s slot and 10s block duration + // Using timetable formula: floor((72 - 1 - 10 - (1 + 2*2 + 12)) / 10) + // = floor((72 - 1 - 10 - 17) / 10) = floor(44 / 10) = 4 + const result = calculateBlocksPerSlot(72000, 10000); + expect(result).toBeGreaterThanOrEqual(1); + }); + + it('returns at least 1 block per slot', () => { + // Even with very long block duration, should return at least 1 + const result = calculateBlocksPerSlot(72000, 60000); + expect(result).toBeGreaterThanOrEqual(1); + }); + }); + + describe('getDecayWindowSlots', () => { + it('returns 5 slots for low frequency topics (<=1 msg/slot)', () => { + expect(getDecayWindowSlots(0)).toBe(5); + expect(getDecayWindowSlots(0.5)).toBe(5); + expect(getDecayWindowSlots(1)).toBe(5); + }); + + it('returns 3 slots for medium frequency topics (2-10 msg/slot)', () => { + expect(getDecayWindowSlots(2)).toBe(3); + expect(getDecayWindowSlots(5)).toBe(3); + expect(getDecayWindowSlots(10)).toBe(3); + }); + + it('returns 2 slots for high frequency topics (>10 msg/slot)', () => { + expect(getDecayWindowSlots(11)).toBe(2); + expect(getDecayWindowSlots(48)).toBe(2); + expect(getDecayWindowSlots(100)).toBe(2); + }); + }); + + describe('computeDecay', () => { + it('returns a value between 0 and 1', () => { + const decay = computeDecay(700, 72000, 5); + expect(decay).toBeGreaterThan(0); + expect(decay).toBeLessThan(1); + }); + + it('produces ~1% after heartbeatsInWindow iterations', () => { + const heartbeatMs = 700; + const slotMs = 72000; + const decayWindowSlots = 5; + + const decay = computeDecay(heartbeatMs, slotMs, decayWindowSlots); + + // Verify: decay^heartbeatsInWindow should be approximately 0.01 + const heartbeatsPerSlot = slotMs / heartbeatMs; + const heartbeatsInWindow = heartbeatsPerSlot * decayWindowSlots; + const result = Math.pow(decay, heartbeatsInWindow); + + expect(result).toBeCloseTo(0.01, 5); + }); + + it('returns higher decay factor for longer windows', () => { + // Longer window = slower decay = higher decay factor (closer to 1) + const shortWindow = computeDecay(700, 72000, 2); + const longWindow = computeDecay(700, 72000, 5); + + expect(longWindow).toBeGreaterThan(shortWindow); + }); + + it('returns higher decay factor for shorter heartbeat intervals', () => { + // More heartbeats = need slower decay per heartbeat + const longHeartbeat = computeDecay(1000, 72000, 5); + const shortHeartbeat = computeDecay(500, 72000, 5); + + expect(shortHeartbeat).toBeGreaterThan(longHeartbeat); + }); + }); + + describe('computeConvergence', () => { + it('returns rate / (1 - decay) for geometric series', () => { + const messagesPerHeartbeat = 0.1; + const decay = 0.9; + + const convergence = computeConvergence(messagesPerHeartbeat, decay); + + // Expected: 0.1 / (1 - 0.9) = 0.1 / 0.1 = 1 + expect(convergence).toBeCloseTo(1, 10); + }); + + it('returns higher convergence for higher message rates', () => { + const decay = 0.9; + const lowRate = computeConvergence(0.1, decay); + const highRate = computeConvergence(1.0, decay); + + expect(highRate).toBeGreaterThan(lowRate); + }); + + it('returns higher convergence for higher decay (slower decay)', () => { + const rate = 0.1; + const fastDecay = computeConvergence(rate, 0.8); + const slowDecay = computeConvergence(rate, 0.95); + + expect(slowDecay).toBeGreaterThan(fastDecay); + }); + }); + + describe('computeThreshold', () => { + it('returns convergence * conservativeFactor', () => { + const convergence = 10; + const factor = 0.3; + + expect(computeThreshold(convergence, factor)).toBe(3); + }); + + it('returns 0 when convergence is 0', () => { + expect(computeThreshold(0, 0.3)).toBe(0); + }); + }); + + describe('getExpectedMessagesPerSlot', () => { + it('returns undefined for tx topic (unpredictable)', () => { + expect(getExpectedMessagesPerSlot(TopicType.tx, 48, 5)).toBeUndefined(); + }); + + it('returns N-1 for block_proposal in MBPS mode', () => { + expect(getExpectedMessagesPerSlot(TopicType.block_proposal, 48, 5)).toBe(4); + expect(getExpectedMessagesPerSlot(TopicType.block_proposal, 48, 3)).toBe(2); + }); + + it('returns 0 for block_proposal in single block mode', () => { + expect(getExpectedMessagesPerSlot(TopicType.block_proposal, 48, 1)).toBe(0); + }); + + it('returns 1 for checkpoint_proposal', () => { + expect(getExpectedMessagesPerSlot(TopicType.checkpoint_proposal, 48, 5)).toBe(1); + expect(getExpectedMessagesPerSlot(TopicType.checkpoint_proposal, 48, 1)).toBe(1); + }); + + it('returns committee size for checkpoint_attestation', () => { + expect(getExpectedMessagesPerSlot(TopicType.checkpoint_attestation, 48, 5)).toBe(48); + expect(getExpectedMessagesPerSlot(TopicType.checkpoint_attestation, 100, 5)).toBe(100); + }); + }); + + describe('TopicScoreParamsFactory', () => { + it('computes shared values once', () => { + const factory = new TopicScoreParamsFactory(standardParams); + + expect(factory.blocksPerSlot).toBe(1); // undefined blockDuration = single block + expect(factory.heartbeatsPerSlot).toBeCloseTo(72000 / 700); + expect(factory.invalidDecay).toBeGreaterThan(0); + expect(factory.invalidDecay).toBeLessThan(1); + }); + + it('uses provided blockDurationMs', () => { + const factory = new TopicScoreParamsFactory({ ...standardParams, blockDurationMs: 10000 }); + + expect(factory.blocksPerSlot).toBeGreaterThan(1); + }); + + describe('createForTopic', () => { + it('disables P3/P3b for tx topic', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.tx); + + expect(params.meshMessageDeliveriesWeight).toBe(0); + expect(params.meshFailurePenaltyWeight).toBe(0); + }); + + it('disables P3/P3b for block_proposal in single block mode', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.block_proposal); + + // Single block mode = 0 block proposals = disabled + expect(params.meshMessageDeliveriesWeight).toBe(0); + expect(params.meshFailurePenaltyWeight).toBe(0); + }); + + it('enables P3/P3b for block_proposal in MBPS mode', () => { + const factory = new TopicScoreParamsFactory({ ...standardParams, blockDurationMs: 10000 }); + const params = factory.createForTopic(TopicType.block_proposal); + + expect(params.meshMessageDeliveriesWeight).toBeLessThan(0); + expect(params.meshFailurePenaltyWeight).toBeLessThan(0); + }); + + it('enables P3/P3b for checkpoint_proposal', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.checkpoint_proposal); + + expect(params.meshMessageDeliveriesWeight).toBeLessThan(0); + expect(params.meshFailurePenaltyWeight).toBeLessThan(0); + expect(params.meshMessageDeliveriesThreshold).toBeGreaterThan(0); + }); + + it('enables P3/P3b for checkpoint_attestation', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.checkpoint_attestation); + + expect(params.meshMessageDeliveriesWeight).toBeLessThan(0); + expect(params.meshFailurePenaltyWeight).toBeLessThan(0); + expect(params.meshMessageDeliveriesThreshold).toBeGreaterThan(0); + }); + + it('sets higher threshold for attestation topic than checkpoint topic', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const checkpointParams = factory.createForTopic(TopicType.checkpoint_proposal); + const attestationParams = factory.createForTopic(TopicType.checkpoint_attestation); + + // Attestation has ~48 messages vs 1 for checkpoint, so higher threshold + expect(attestationParams.meshMessageDeliveriesThreshold).toBeGreaterThan( + checkpointParams.meshMessageDeliveriesThreshold, + ); + }); + + it('all topics have same base params (topicWeight, invalidMessageDeliveries)', () => { + const factory = new TopicScoreParamsFactory(standardParams); + + const txParams = factory.createForTopic(TopicType.tx); + const checkpointParams = factory.createForTopic(TopicType.checkpoint_proposal); + const attestationParams = factory.createForTopic(TopicType.checkpoint_attestation); + + // All should have same topicWeight + expect(txParams.topicWeight).toBe(1); + expect(checkpointParams.topicWeight).toBe(1); + expect(attestationParams.topicWeight).toBe(1); + + // All should have same invalidMessageDeliveries params + expect(txParams.invalidMessageDeliveriesWeight).toBe(-20); + expect(checkpointParams.invalidMessageDeliveriesWeight).toBe(-20); + expect(attestationParams.invalidMessageDeliveriesWeight).toBe(-20); + + expect(txParams.invalidMessageDeliveriesDecay).toBe(checkpointParams.invalidMessageDeliveriesDecay); + expect(checkpointParams.invalidMessageDeliveriesDecay).toBe(attestationParams.invalidMessageDeliveriesDecay); + }); + }); + + describe('createAll', () => { + it('creates params for all topic types', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const allParams = factory.createAll('0.1.0'); + + const topicTypes = Object.values(TopicType); + expect(Object.keys(allParams).length).toBe(topicTypes.length); + + for (const topicType of topicTypes) { + const topicString = createTopicString(topicType, '0.1.0'); + expect(allParams[topicString]).toBeDefined(); + } + }); + }); + }); + + describe('createTopicScoreParamsForTopic (convenience function)', () => { + it('creates params for tx topic', () => { + const params = createTopicScoreParamsForTopic(TopicType.tx, standardParams); + + expect(params.topicWeight).toBe(1); + expect(params.meshMessageDeliveriesWeight).toBe(0); + }); + + it('creates params for checkpoint_attestation topic', () => { + const params = createTopicScoreParamsForTopic(TopicType.checkpoint_attestation, standardParams); + + expect(params.topicWeight).toBe(1); + expect(params.meshMessageDeliveriesWeight).toBeLessThan(0); + }); + }); + + describe('createAllTopicScoreParams (convenience function)', () => { + it('creates params for all topics', () => { + const allParams = createAllTopicScoreParams('0.1.0', standardParams); + + expect(Object.keys(allParams).length).toBe(Object.values(TopicType).length); + }); + + it('uses correct topic string format', () => { + const allParams = createAllTopicScoreParams('0.1.0', standardParams); + const expectedTopicString = createTopicString(TopicType.tx, '0.1.0'); + + expect(allParams[expectedTopicString]).toBeDefined(); + }); + }); + + describe('mathematical properties', () => { + it('decay factor produces decreasing counter values', () => { + const decay = computeDecay(700, 72000, 5); + let counter = 100; + + // Simulate several heartbeats + for (let i = 0; i < 10; i++) { + const newCounter = counter * decay; + expect(newCounter).toBeLessThan(counter); + counter = newCounter; + } + }); + + it('counter with constant input converges to expected value', () => { + const messagesPerHeartbeat = 0.5; + const decay = computeDecay(700, 72000, 5); + const expectedConvergence = computeConvergence(messagesPerHeartbeat, decay); + + // Simulate many heartbeats with constant message arrival + let counter = 0; + for (let i = 0; i < 1000; i++) { + counter = counter * decay + messagesPerHeartbeat; + } + + // Counter should converge close to expected value + expect(counter).toBeCloseTo(expectedConvergence, 1); + }); + + it('weight produces meaningful penalty when below threshold', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.checkpoint_proposal); + + const threshold = params.meshMessageDeliveriesThreshold; + const weight = params.meshMessageDeliveriesWeight; + + // If counter is 0 (way below threshold), penalty should be threshold^2 * |weight| = MAX_P3_PENALTY_PER_TOPIC + // deficit = max(0, threshold - counter)^2 = threshold^2 + // penalty = deficit * weight (negative) + const penalty = threshold * threshold * weight; + + // Should produce max penalty of MAX_P3_PENALTY_PER_TOPIC (-34) + // This exceeds P1 + P2 (33) to ensure non-contributing peers get pruned + expect(penalty).toBeLessThan(0); + expect(penalty).toBeCloseTo(MAX_P3_PENALTY_PER_TOPIC, 5); + }); + }); + + describe('realistic network scenarios', () => { + it('configures checkpoint_proposal for 1 msg/slot', () => { + const params = createTopicScoreParamsForTopic(TopicType.checkpoint_proposal, standardParams); + + // Should use 5-slot decay window for 1 msg/slot + // Threshold should be ~30% of convergence + expect(params.meshMessageDeliveriesThreshold).toBeGreaterThan(0); + expect(params.meshMessageDeliveriesThreshold).toBeLessThan(1); // Below 1 msg due to 30% factor + + // Activation should be 5x the decay window (5 slots × 5) for bootstrap grace period + expect(params.meshMessageDeliveriesActivation).toBe(72000 * 5 * 5); + + // Window should be 5 seconds (balanced for TypeScript runtime) + expect(params.meshMessageDeliveriesWindow).toBe(5000); + }); + + it('configures checkpoint_attestation for 48 msg/slot', () => { + const params = createTopicScoreParamsForTopic(TopicType.checkpoint_attestation, standardParams); + + // Should use 2-slot decay window for high volume + // Threshold should be ~30% of convergence for 48 msgs/slot + expect(params.meshMessageDeliveriesThreshold).toBeGreaterThan(1); + + // Activation should be 5x the decay window (2 slots × 5) for bootstrap grace period + expect(params.meshMessageDeliveriesActivation).toBe(72000 * 2 * 5); + + // Cap should use 8x factor for high volume topics + expect(params.meshMessageDeliveriesCap).toBeGreaterThanOrEqual(params.meshMessageDeliveriesThreshold * 8); + }); + }); + + describe('P1/P2/P3 score balance', () => { + it('P1 is configured with slot-based quantum for topics with P3 enabled', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.checkpoint_proposal); + + // P1 quantum should be slot duration (score increases by ~1 per slot) + expect(params.timeInMeshQuantum).toBe(standardParams.slotDurationMs); + + // P1 cap should be number of slots in 1 hour + const expectedCap = 3600 / (standardParams.slotDurationMs / 1000); + expect(params.timeInMeshCap).toBe(expectedCap); + + // P1 weight should give max score of MAX_P1_SCORE (8) + const maxP1 = params.timeInMeshCap * params.timeInMeshWeight; + expect(maxP1).toBeCloseTo(8, 5); + }); + + it('P2 is configured with convergence-based cap for topics with P3 enabled', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.checkpoint_proposal); + + // P2 cap and weight should give max score of MAX_P2_SCORE (25) + const maxP2 = params.firstMessageDeliveriesCap * params.firstMessageDeliveriesWeight; + expect(maxP2).toBeCloseTo(25, 5); + }); + + it('P1 and P2 are disabled for tx topic (no free positive scores)', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.tx); + + // P1 should be disabled (weight = 0 or cap = 0) + expect(params.timeInMeshWeight).toBe(0); + expect(params.timeInMeshCap).toBe(0); + + // P2 should be disabled + expect(params.firstMessageDeliveriesWeight).toBe(0); + expect(params.firstMessageDeliveriesCap).toBe(0); + }); + + it('P3 max penalty exceeds P1 + P2 to ensure pruning', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.checkpoint_proposal); + + // Calculate max scores + const maxP1 = params.timeInMeshCap * params.timeInMeshWeight; + const maxP2 = params.firstMessageDeliveriesCap * params.firstMessageDeliveriesWeight; + const maxP3 = + params.meshMessageDeliveriesThreshold * + params.meshMessageDeliveriesThreshold * + params.meshMessageDeliveriesWeight; + + // P3 (negative) must exceed P1 + P2 (positive) for pruning to occur + // |P3| > P1 + P2 + expect(Math.abs(maxP3)).toBeGreaterThan(maxP1 + maxP2); + }); + + it('total P3b across all topics is approximately -102', () => { + const factory = new TopicScoreParamsFactory(standardParams); + + // Topics with P3 enabled: checkpoint_proposal, checkpoint_attestation, block_proposal (in MBPS) + const mbpsParams = { ...standardParams, blockDurationMs: 4000 }; + const mbpsFactory = new TopicScoreParamsFactory(mbpsParams); + + const checkpointParams = factory.createForTopic(TopicType.checkpoint_proposal); + const attestationParams = factory.createForTopic(TopicType.checkpoint_attestation); + const blockParams = mbpsFactory.createForTopic(TopicType.block_proposal); + + // Calculate max P3 for each topic + const p3Checkpoint = + checkpointParams.meshMessageDeliveriesThreshold ** 2 * checkpointParams.meshMessageDeliveriesWeight; + const p3Attestation = + attestationParams.meshMessageDeliveriesThreshold ** 2 * attestationParams.meshMessageDeliveriesWeight; + const p3Block = blockParams.meshMessageDeliveriesThreshold ** 2 * blockParams.meshMessageDeliveriesWeight; + + // Each should be approximately -34 + expect(p3Checkpoint).toBeCloseTo(-34, 0); + expect(p3Attestation).toBeCloseTo(-34, 0); + expect(p3Block).toBeCloseTo(-34, 0); + + // Total should be approximately -102 + expect(p3Checkpoint + p3Attestation + p3Block).toBeCloseTo(-102, 0); + }); + + it('non-contributing peer has negative topic score and gets pruned', () => { + const factory = new TopicScoreParamsFactory(standardParams); + const params = factory.createForTopic(TopicType.checkpoint_proposal); + + // Simulate a peer that has been in mesh for 1 hour (max P1) but delivers nothing + const maxP1 = params.timeInMeshCap * params.timeInMeshWeight; // ~8 + const p2Score = 0; // No first deliveries + const maxP3Penalty = params.meshMessageDeliveriesThreshold ** 2 * params.meshMessageDeliveriesWeight; // ~-34 + + const topicScore = maxP1 + p2Score + maxP3Penalty; + + // Topic score should be negative, causing mesh pruning + expect(topicScore).toBeLessThan(0); + }); + }); +}); diff --git a/yarn-project/p2p/src/services/gossipsub/topic_score_params.ts b/yarn-project/p2p/src/services/gossipsub/topic_score_params.ts new file mode 100644 index 000000000000..aca68a536843 --- /dev/null +++ b/yarn-project/p2p/src/services/gossipsub/topic_score_params.ts @@ -0,0 +1,451 @@ +import { TopicType, createTopicString } from '@aztec/stdlib/p2p'; +import { calculateMaxBlocksPerSlot } from '@aztec/stdlib/timetable'; + +import { createTopicScoreParams } from '@chainsafe/libp2p-gossipsub/score'; + +/** + * Network parameters needed to compute topic-specific gossipsub scoring parameters. + */ +export type TopicScoringNetworkParams = { + /** L2 slot duration in milliseconds */ + slotDurationMs: number; + /** Gossipsub heartbeat interval in milliseconds */ + heartbeatIntervalMs: number; + /** Target committee size (number of validators expected to attest per slot) */ + targetCommitteeSize: number; + /** Duration per block in milliseconds when building multiple blocks per slot. If undefined, single block mode. */ + blockDurationMs?: number; +}; + +/** + * Calculates the number of blocks per slot based on timing parameters. + * Uses the shared calculation from @aztec/stdlib/timetable. + * + * @param slotDurationMs - L2 slot duration in milliseconds + * @param blockDurationMs - Duration per block in milliseconds (undefined = single block mode) + * @returns Number of blocks per slot + */ +export function calculateBlocksPerSlot(slotDurationMs: number, blockDurationMs: number | undefined): number { + return calculateMaxBlocksPerSlot(slotDurationMs / 1000, blockDurationMs ? blockDurationMs / 1000 : undefined); +} + +/** + * Determines the decay window in slots based on expected message frequency. + * Low-frequency topics need longer decay windows to accumulate meaningful counter values. + * + * @param expectedMessagesPerSlot - Expected messages per slot for this topic + * @returns Number of slots over which the counter should decay to ~1% + */ +export function getDecayWindowSlots(expectedMessagesPerSlot: number): number { + if (expectedMessagesPerSlot <= 1) { + return 5; // Low frequency: decay over 5 slots + } else if (expectedMessagesPerSlot <= 10) { + return 3; // Medium frequency: decay over 3 slots + } else { + return 2; // High frequency: decay over 2 slots + } +} + +/** + * Computes the decay factor for exponential decay over a given window. + * After `heartbeatsInWindow` heartbeats, the counter decays to ~1% of its original value. + * + * @param heartbeatIntervalMs - Gossipsub heartbeat interval in milliseconds + * @param slotDurationMs - L2 slot duration in milliseconds + * @param decayWindowSlots - Number of slots over which to decay + * @returns Decay factor (0 < decay < 1), applied each heartbeat + */ +export function computeDecay(heartbeatIntervalMs: number, slotDurationMs: number, decayWindowSlots: number): number { + const heartbeatsPerSlot = slotDurationMs / heartbeatIntervalMs; + const heartbeatsInWindow = heartbeatsPerSlot * decayWindowSlots; + + // Decay to 1% over the window: decay^heartbeatsInWindow = 0.01 + // decay = 0.01^(1/heartbeatsInWindow) + return Math.pow(0.01, 1 / heartbeatsInWindow); +} + +/** + * Computes the steady-state convergence value for a decaying counter. + * If messages arrive at a constant rate and decay is applied each heartbeat, + * the counter converges to: rate / (1 - decay) + * + * @param messagesPerHeartbeat - Expected messages per heartbeat + * @param decay - Decay factor applied each heartbeat + * @returns Convergence value (steady-state counter value) + */ +export function computeConvergence(messagesPerHeartbeat: number, decay: number): number { + return messagesPerHeartbeat / (1 - decay); +} + +/** + * Computes a conservative threshold for mesh message deliveries. + * The threshold should be low enough to avoid penalizing honest peers with normal variance. + * + * @param convergence - Steady-state counter value + * @param conservativeFactor - Fraction of convergence to use as threshold (e.g., 0.3) + * @returns Threshold value + */ +export function computeThreshold(convergence: number, conservativeFactor: number): number { + return convergence * conservativeFactor; +} + +/** + * Gets the expected messages per slot for a given topic type. + * + * @param topicType - The topic type + * @param targetCommitteeSize - Target committee size + * @param blocksPerSlot - Number of blocks per slot + * @returns Expected messages per slot, or undefined if unpredictable + */ +export function getExpectedMessagesPerSlot( + topicType: TopicType, + targetCommitteeSize: number, + blocksPerSlot: number, +): number | undefined { + switch (topicType) { + case TopicType.tx: + // Transactions are unpredictable - disable mesh message delivery scoring + return undefined; + + case TopicType.block_proposal: + // In MBPS mode, N-1 block proposals per slot (last one bundled with checkpoint) + // In single block mode (blocksPerSlot=1), this is 0 + return Math.max(0, blocksPerSlot - 1); + + case TopicType.checkpoint_proposal: + // Exactly 1 checkpoint proposal per slot + return 1; + + case TopicType.checkpoint_attestation: + // Each committee member sends one attestation per slot + return targetCommitteeSize; + + default: + return undefined; + } +} + +/** Conservative factor for threshold calculation (30% of convergence) */ +const CONSERVATIVE_FACTOR = 0.3; + +/** Number of slots over which invalid message penalty decays */ +const INVALID_DECAY_WINDOW_SLOTS = 4; + +/** Weight for invalid message deliveries penalty */ +const INVALID_MESSAGE_WEIGHT = -20; + +/** Mesh message deliveries window in milliseconds (5 seconds - balanced for TypeScript runtime) */ +const MESH_DELIVERIES_WINDOW_MS = 5000; + +/** + * Multiplier for activation time to provide extra grace period during network bootstrap. + * The activation timer starts from mesh join time, not from first message received. + * During bootstrap, peers may join before messages start flowing, so we need extra time. + */ +const ACTIVATION_MULTIPLIER = 5; + +// ============================================================================ +// P1 (timeInMesh) Configuration +// ============================================================================ +// P1 rewards peers for time spent in the mesh. Following Lodestar's approach, +// we normalize the score so that max P1 = MAX_P1_SCORE after P1_CAP_TIME_SECONDS. +// +// Formula: P1 = min(timeInMesh / quantum, cap) * weight +// - quantum = slotDurationMs (one increment per slot worth of time) +// - cap = P1_CAP_TIME_SECONDS / slotSeconds (number of slots to reach cap) +// - weight = MAX_P1_SCORE / cap +// +// This ensures: max P1 = cap * weight = MAX_P1_SCORE + +/** Maximum P1 score contribution per topic */ +export const MAX_P1_SCORE = 8; + +/** Time in seconds to reach P1 cap (1 hour) */ +const P1_CAP_TIME_SECONDS = 3600; + +// ============================================================================ +// P2 (firstMessageDeliveries) Configuration +// ============================================================================ +// P2 rewards peers who deliver messages first. We normalize so max P2 = MAX_P2_SCORE. +// P2 uses a decaying counter, so we set the cap based on convergence and scale the weight. +// +// Formula: P2 = min(firstMessageDeliveries, cap) * weight +// - cap = convergence value for first deliveries +// - weight = MAX_P2_SCORE / cap + +/** Maximum P2 score contribution per topic */ +export const MAX_P2_SCORE = 25; + +/** Decay window for first message deliveries in slots (fast decay) */ +const P2_DECAY_WINDOW_SLOTS = 2; + +// ============================================================================ +// P3 (meshMessageDeliveries) Configuration +// ============================================================================ +// P3 penalizes peers who under-deliver messages. For a peer to be pruned from +// the mesh, their topic score must be negative: P1 + P2 + P3 < 0 +// +// Therefore, P3 max penalty must exceed (P1 + P2) to cause pruning: +// |P3| > P1 + P2 +// |P3| > 8 + 25 = 33 +// +// We set P3 max = -34 per topic (slightly more than P1+P2) to ensure pruning. +// With 3 topics having P3 enabled, total P3b after pruning = -102. +// +// With appSpecificWeight=10, ~20 HighTolerance errors (-40 app score) plus max P3b (-102) +// would cross gossipThreshold (-500). This keeps non-contributors from being disconnected +// unless they also accrue app-level penalties. +// +// The weight formula ensures max penalty equals MAX_P3_PENALTY_PER_TOPIC: +// weight = MAX_P3_PENALTY_PER_TOPIC / threshold² +// When deficit = threshold: penalty = threshold² * weight = MAX_P3_PENALTY_PER_TOPIC + +/** Maximum P3 penalty per topic (must exceed P1 + P2 to cause pruning) */ +export const MAX_P3_PENALTY_PER_TOPIC = -(MAX_P1_SCORE + MAX_P2_SCORE + 1); // -34 + +/** Number of topics with P3 enabled in MBPS mode (block_proposal + checkpoint_proposal + checkpoint_attestation) */ +export const NUM_P3_ENABLED_TOPICS = 3; + +/** Total maximum P3b penalty across all topics after pruning in MBPS mode */ +export const TOTAL_MAX_P3B_PENALTY = MAX_P3_PENALTY_PER_TOPIC * NUM_P3_ENABLED_TOPICS; // -102 + +/** + * Factory class for creating gossipsub topic scoring parameters. + * Computes shared values once and reuses them across all topics. + */ +export class TopicScoreParamsFactory { + /** Number of blocks per slot based on timetable configuration */ + public readonly blocksPerSlot: number; + + /** Decay factor for invalid message penalties (P4) */ + public readonly invalidDecay: number; + + /** Number of heartbeats per slot */ + public readonly heartbeatsPerSlot: number; + + /** P1: Time in mesh quantum (slot duration in ms - score increases by ~1 per slot) */ + public readonly timeInMeshQuantum: number; + + /** P1: Time in mesh cap (number of slots to reach max score) */ + public readonly timeInMeshCap: number; + + /** P1: Time in mesh weight (normalized so max P1 = MAX_P1_SCORE) */ + public readonly timeInMeshWeight: number; + + /** P2: First message deliveries decay factor */ + public readonly firstMessageDeliveriesDecay: number; + + /** P2: First message deliveries cap (convergence-based) */ + public readonly firstMessageDeliveriesCap: number; + + /** P2: First message deliveries weight (normalized so max P2 = MAX_P2_SCORE) */ + public readonly firstMessageDeliveriesWeight: number; + + /** Base parameters common to all topics */ + private readonly baseParams: { + topicWeight: number; + invalidMessageDeliveriesWeight: number; + invalidMessageDeliveriesDecay: number; + // P1: timeInMesh + timeInMeshQuantum: number; + timeInMeshCap: number; + timeInMeshWeight: number; + // P2: firstMessageDeliveries + firstMessageDeliveriesDecay: number; + firstMessageDeliveriesCap: number; + firstMessageDeliveriesWeight: number; + }; + + constructor(private readonly params: TopicScoringNetworkParams) { + const { slotDurationMs, heartbeatIntervalMs, blockDurationMs } = params; + + // Compute values that are the same for all topics + this.blocksPerSlot = calculateBlocksPerSlot(slotDurationMs, blockDurationMs); + this.heartbeatsPerSlot = slotDurationMs / heartbeatIntervalMs; + this.invalidDecay = computeDecay(heartbeatIntervalMs, slotDurationMs, INVALID_DECAY_WINDOW_SLOTS); + + // P1: timeInMesh - Lodestar style slot-based normalization + // quantum = slot duration, so score increases by ~1 per slot of mesh membership + // cap = number of slots in P1_CAP_TIME_SECONDS (1 hour) + // weight = MAX_P1_SCORE / cap, so max P1 = cap * weight = MAX_P1_SCORE + const slotDurationSeconds = slotDurationMs / 1000; + this.timeInMeshQuantum = slotDurationMs; + this.timeInMeshCap = P1_CAP_TIME_SECONDS / slotDurationSeconds; + this.timeInMeshWeight = MAX_P1_SCORE / this.timeInMeshCap; + + // P2: firstMessageDeliveries - convergence-based cap with normalized weight + // Uses fast decay (2 slots) so it rewards recent first deliveries + // cap = convergence at 1 first delivery per heartbeat (theoretical max rate) + // weight = MAX_P2_SCORE / cap, so max P2 = cap * weight = MAX_P2_SCORE + this.firstMessageDeliveriesDecay = computeDecay(heartbeatIntervalMs, slotDurationMs, P2_DECAY_WINDOW_SLOTS); + // Convergence for 1 message per heartbeat (generous estimate for first deliveries) + this.firstMessageDeliveriesCap = computeConvergence(1, this.firstMessageDeliveriesDecay); + this.firstMessageDeliveriesWeight = MAX_P2_SCORE / this.firstMessageDeliveriesCap; + + // Base params are identical for all topics + this.baseParams = { + topicWeight: 1, + invalidMessageDeliveriesWeight: INVALID_MESSAGE_WEIGHT, + invalidMessageDeliveriesDecay: this.invalidDecay, + // P1: timeInMesh (same for all topics) + timeInMeshQuantum: this.timeInMeshQuantum, + timeInMeshCap: this.timeInMeshCap, + timeInMeshWeight: this.timeInMeshWeight, + // P2: firstMessageDeliveries (same for all topics) + firstMessageDeliveriesDecay: this.firstMessageDeliveriesDecay, + firstMessageDeliveriesCap: this.firstMessageDeliveriesCap, + firstMessageDeliveriesWeight: this.firstMessageDeliveriesWeight, + }; + } + + /** + * Creates scoring parameters for topics with unpredictable or zero message rates. + * Disables P1, P2, P3, and P3b to avoid unbalanced positive score accumulation. + * + * Rationale: If P1/P2 were enabled without P3, the topic would contribute free + * positive scores that could offset penalties from other topics, preventing + * proper mesh pruning of non-contributing peers. + */ + private createDisabledP3Params(): ReturnType { + return createTopicScoreParams({ + topicWeight: 1, + // P1: timeInMesh - disabled (no free positive scores) + timeInMeshQuantum: 1, + timeInMeshCap: 0, + timeInMeshWeight: 0, + // P2: firstMessageDeliveries - disabled + firstMessageDeliveriesDecay: 0.5, + firstMessageDeliveriesCap: 0, + firstMessageDeliveriesWeight: 0, + // P3: meshMessageDeliveries - disabled + meshMessageDeliveriesWeight: 0, + meshMessageDeliveriesDecay: 0.5, + meshMessageDeliveriesThreshold: 0, + meshMessageDeliveriesWindow: 0, + meshMessageDeliveriesActivation: 0, + meshMessageDeliveriesCap: 0, + // P3b: meshFailurePenalty - disabled + meshFailurePenaltyWeight: 0, + meshFailurePenaltyDecay: 0.5, + // P4: invalidMessageDeliveries - still enabled for attack detection + invalidMessageDeliveriesWeight: INVALID_MESSAGE_WEIGHT, + invalidMessageDeliveriesDecay: this.invalidDecay, + }); + } + + /** + * Creates scoring parameters for topics with predictable message rates. + * Enables P1, P2, P3, and P3b for balanced scoring. + * + * The scoring is designed so that: + * - P1 + P2 max = 8 + 25 = 33 (positive rewards for good behavior) + * - P3 max = -34 (penalty exceeds P1+P2 to ensure pruning of non-contributors) + * - After pruning: P1 resets, P2 decays, P3b persists with slow decay + * + * @param expectedPerSlot - Expected messages per slot + */ + private createEnabledP3Params(expectedPerSlot: number): ReturnType { + const { slotDurationMs, heartbeatIntervalMs } = this.params; + + // Calculate decay based on message frequency + const decayWindowSlots = getDecayWindowSlots(expectedPerSlot); + const decay = computeDecay(heartbeatIntervalMs, slotDurationMs, decayWindowSlots); + + // Calculate convergence and threshold + const messagesPerHeartbeat = expectedPerSlot / this.heartbeatsPerSlot; + const convergence = computeConvergence(messagesPerHeartbeat, decay); + const threshold = computeThreshold(convergence, CONSERVATIVE_FACTOR); + + // Cap factor: higher for high-volume topics + const capFactor = expectedPerSlot > 10 ? 8 : 4; + + // P3 Weight: scaled so max penalty = MAX_P3_PENALTY_PER_TOPIC (-34) + // When deficit = threshold (peer delivers nothing): + // penalty = deficit² × weight = threshold² × (MAX_P3_PENALTY_PER_TOPIC / threshold²) = MAX_P3_PENALTY_PER_TOPIC + const meshDeliveriesWeight = threshold > 0 ? MAX_P3_PENALTY_PER_TOPIC / (threshold * threshold) : 0; + + // Activation time: use the decay window multiplied by ACTIVATION_MULTIPLIER for extra grace + // during network bootstrap. The timer starts from mesh join time, not from first message, + // so peers joining before messages flow need extra time to accumulate counter values. + const activationMs = slotDurationMs * decayWindowSlots * ACTIVATION_MULTIPLIER; + + return createTopicScoreParams({ + ...this.baseParams, + // P3: meshMessageDeliveries + meshMessageDeliveriesWeight: meshDeliveriesWeight, + meshMessageDeliveriesDecay: decay, + meshMessageDeliveriesThreshold: threshold, + meshMessageDeliveriesWindow: MESH_DELIVERIES_WINDOW_MS, + meshMessageDeliveriesActivation: activationMs, + meshMessageDeliveriesCap: Math.max(threshold * capFactor, 2), + // P3b: meshFailurePenalty (same weight and decay as P3) + meshFailurePenaltyWeight: meshDeliveriesWeight, + meshFailurePenaltyDecay: decay, + }); + } + + /** + * Creates topic score parameters for a specific topic type. + * + * @param topicType - The topic type + * @returns TopicScoreParams for the topic + */ + createForTopic(topicType: TopicType): ReturnType { + const expectedPerSlot = getExpectedMessagesPerSlot(topicType, this.params.targetCommitteeSize, this.blocksPerSlot); + + // For unpredictable topics (tx) or topics with 0 expected messages, disable P3/P3b + if (expectedPerSlot === undefined || expectedPerSlot === 0) { + return this.createDisabledP3Params(); + } + + return this.createEnabledP3Params(expectedPerSlot); + } + + /** + * Creates all topic score parameters for gossipsub configuration. + * + * @param protocolVersion - Protocol version string for topic naming + * @returns Record mapping topic strings to their score parameters + */ + createAll(protocolVersion: string): Record> { + const topics: Record> = {}; + + for (const topicType of Object.values(TopicType)) { + const topicString = createTopicString(topicType, protocolVersion); + topics[topicString] = this.createForTopic(topicType); + } + + return topics; + } +} + +/** + * Creates topic score parameters for a specific topic type. + * Convenience function that creates a factory internally. + * + * @param topicType - The topic type + * @param params - Network parameters for scoring calculation + * @returns TopicScoreParams for the topic + */ +export function createTopicScoreParamsForTopic( + topicType: TopicType, + params: TopicScoringNetworkParams, +): ReturnType { + const factory = new TopicScoreParamsFactory(params); + return factory.createForTopic(topicType); +} + +/** + * Creates all topic score parameters for gossipsub configuration. + * + * @param protocolVersion - Protocol version string for topic naming + * @param params - Network parameters for scoring calculation + * @returns Record mapping topic strings to their score parameters + */ +export function createAllTopicScoreParams( + protocolVersion: string, + params: TopicScoringNetworkParams, +): Record> { + const factory = new TopicScoreParamsFactory(params); + return factory.createAll(protocolVersion); +} diff --git a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts index 92b368c645a7..255b5206121e 100644 --- a/yarn-project/p2p/src/services/libp2p/libp2p_service.ts +++ b/yarn-project/p2p/src/services/libp2p/libp2p_service.ts @@ -45,7 +45,7 @@ import { type GossipsubMessage, gossipsub, } from '@chainsafe/libp2p-gossipsub'; -import { createPeerScoreParams, createTopicScoreParams } from '@chainsafe/libp2p-gossipsub/score'; +import { createPeerScoreParams } from '@chainsafe/libp2p-gossipsub/score'; import { SignaturePolicy } from '@chainsafe/libp2p-gossipsub/types'; import { noise } from '@chainsafe/libp2p-noise'; import { yamux } from '@chainsafe/libp2p-yamux'; @@ -80,7 +80,8 @@ import { getVersions } from '../../versioning.js'; import { AztecDatastore } from '../data_store.js'; import { DiscV5Service } from '../discv5/discV5_service.js'; import { SnappyTransform, fastMsgIdFn, getMsgIdFn, msgIdToStrFn } from '../encoding.js'; -import { gossipScoreThresholds } from '../gossipsub/scoring.js'; +import { APP_SPECIFIC_WEIGHT, gossipScoreThresholds } from '../gossipsub/scoring.js'; +import { createAllTopicScoreParams } from '../gossipsub/topic_score_params.js'; import type { PeerManagerInterface } from '../peer-manager/interface.js'; import { PeerManager } from '../peer-manager/peer_manager.js'; import { PeerScoring } from '../peer-manager/peer_scoring.js'; @@ -311,11 +312,6 @@ export class LibP2PService extends const versions = getVersions(config); const protocolVersion = compressComponentVersions(versions); - const txTopic = createTopicString(TopicType.tx, protocolVersion); - const blockProposalTopic = createTopicString(TopicType.block_proposal, protocolVersion); - const checkpointProposalTopic = createTopicString(TopicType.checkpoint_proposal, protocolVersion); - const checkpointAttestationTopic = createTopicString(TopicType.checkpoint_attestation, protocolVersion); - const preferredPeersEnrs: ENR[] = config.preferredPeers.map(enr => ENR.decodeTxt(enr)); const directPeers = ( await Promise.all( @@ -335,6 +331,15 @@ export class LibP2PService extends const announceTcpMultiaddr = config.p2pIp ? [convertToMultiaddr(config.p2pIp, p2pPort, 'tcp')] : []; + // Create dynamic topic score params based on network configuration + const l1Constants = epochCache.getL1Constants(); + const topicScoreParams = createAllTopicScoreParams(protocolVersion, { + slotDurationMs: l1Constants.slotDuration * 1000, + heartbeatIntervalMs: config.gossipsubInterval, + targetCommitteeSize: l1Constants.targetCommitteeSize, + blockDurationMs: config.blockDurationMs, + }); + const node = await createLibp2p({ start: false, peerId, @@ -430,28 +435,7 @@ export class LibP2PService extends scoreParams: createPeerScoreParams({ // IPColocation factor can be disabled for local testing - default to -5 IPColocationFactorWeight: config.debugDisableColocationPenalty ? 0 : -5.0, - topics: { - [txTopic]: createTopicScoreParams({ - topicWeight: 1, - invalidMessageDeliveriesWeight: -20, - invalidMessageDeliveriesDecay: 0.5, - }), - [blockProposalTopic]: createTopicScoreParams({ - topicWeight: 1, - invalidMessageDeliveriesWeight: -20, - invalidMessageDeliveriesDecay: 0.5, - }), - [checkpointProposalTopic]: createTopicScoreParams({ - topicWeight: 1, - invalidMessageDeliveriesWeight: -20, - invalidMessageDeliveriesDecay: 0.5, - }), - [checkpointAttestationTopic]: createTopicScoreParams({ - topicWeight: 1, - invalidMessageDeliveriesWeight: -20, - invalidMessageDeliveriesDecay: 0.5, - }), - }, + topics: topicScoreParams, }), }) as (components: GossipSubComponents) => GossipSub, components: (components: { connectionManager: ConnectionManager }) => ({ @@ -477,8 +461,12 @@ export class LibP2PService extends epochCache, ); - // Update gossipsub score params - node.services.pubsub.score.params.appSpecificWeight = 10; + // Configure application-specific scoring for gossipsub. + // The weight scales app score to align with gossipsub thresholds: + // - Disconnect (-50) × 10 = -500 = gossipThreshold (stops receiving gossip) + // - Ban (-100) × 10 = -1000 = publishThreshold (cannot publish) + // Note: positive topic scores can offset penalties, so alignment is best-effort. + node.services.pubsub.score.params.appSpecificWeight = APP_SPECIFIC_WEIGHT; node.services.pubsub.score.params.appSpecificScore = (peerId: string) => peerManager.shouldDisableP2PGossip(peerId) ? -Infinity : peerManager.getPeerScore(peerId); diff --git a/yarn-project/p2p/src/services/peer-manager/peer_scoring.ts b/yarn-project/p2p/src/services/peer-manager/peer_scoring.ts index 9de2305b9ccf..695eaa702318 100644 --- a/yarn-project/p2p/src/services/peer-manager/peer_scoring.ts +++ b/yarn-project/p2p/src/services/peer-manager/peer_scoring.ts @@ -14,6 +14,21 @@ import type { PeerId } from '@libp2p/interface'; import type { P2PConfig } from '../../config.js'; +/** + * Application-level peer penalties. + * + * These scores are multiplied by appSpecificWeight (10) when contributing to gossipsub score. + * The values are designed to align with gossipsub thresholds: + * + * - LowToleranceError (50): 1 error → app score -50 → gossipsub -500 → gossipThreshold + * - MidToleranceError (10): 5 errors → app score -50 → gossipsub -500 → gossipThreshold + * - HighToleranceError (2): 25 errors → app score -50 → gossipsub -500 → gossipThreshold + * + * Examples of each severity: + * - LowToleranceError: Invalid messages, deserialization errors, manipulation attempts + * - MidToleranceError: Hash mismatches, protocol violations + * - HighToleranceError: Rate limit exceeded, failed responses, transient errors + */ const DefaultPeerPenalties = { [PeerErrorSeverity.LowToleranceError]: 50, [PeerErrorSeverity.MidToleranceError]: 10, @@ -26,6 +41,16 @@ export enum PeerScoreState { Healthy, } +/** + * Score thresholds for peer states. + * + * These values align with gossipsub thresholds when multiplied by appSpecificWeight (10): + * - MIN_SCORE_BEFORE_DISCONNECT (-50) × 10 = -500 = gossipThreshold + * - MIN_SCORE_BEFORE_BAN (-100) × 10 = -1000 = publishThreshold + * + * This ensures that when a peer is disconnected at the application level, + * they also stop receiving gossip, and when banned, they cannot publish. + */ // TODO: move into config / constants const MIN_SCORE_BEFORE_BAN = -100; const MIN_SCORE_BEFORE_DISCONNECT = -50; diff --git a/yarn-project/p2p/src/test-helpers/reqresp-nodes.ts b/yarn-project/p2p/src/test-helpers/reqresp-nodes.ts index 85db33293790..a3c0fe5443b9 100644 --- a/yarn-project/p2p/src/test-helpers/reqresp-nodes.ts +++ b/yarn-project/p2p/src/test-helpers/reqresp-nodes.ts @@ -34,6 +34,7 @@ import { BootstrapNode } from '../bootstrap/bootstrap.js'; import type { BootnodeConfig, P2PConfig } from '../config.js'; import type { MemPools } from '../mem_pools/interface.js'; import { DiscV5Service } from '../services/discv5/discV5_service.js'; +import { APP_SPECIFIC_WEIGHT } from '../services/gossipsub/scoring.js'; import { LibP2PService } from '../services/libp2p/libp2p_service.js'; import { PeerManager } from '../services/peer-manager/peer_manager.js'; import { PeerScoring } from '../services/peer-manager/peer_scoring.js'; @@ -154,7 +155,7 @@ export async function createTestLibP2PService( epochCache, ); - p2pNode.services.pubsub.score.params.appSpecificWeight = 10; + p2pNode.services.pubsub.score.params.appSpecificWeight = APP_SPECIFIC_WEIGHT; p2pNode.services.pubsub.score.params.appSpecificScore = (peerId: string) => peerManager.shouldDisableP2PGossip(peerId) ? -Infinity : peerManager.getPeerScore(peerId); diff --git a/yarn-project/p2p/src/test-helpers/testbench-utils.ts b/yarn-project/p2p/src/test-helpers/testbench-utils.ts index 0ae3ece65537..9fbd09495938 100644 --- a/yarn-project/p2p/src/test-helpers/testbench-utils.ts +++ b/yarn-project/p2p/src/test-helpers/testbench-utils.ts @@ -216,6 +216,15 @@ export function createMockEpochCache(): EpochCacheInterface { isInCommittee: () => Promise.resolve(false), getRegisteredValidators: () => Promise.resolve([]), filterInCommittee: () => Promise.resolve([]), + getL1Constants: () => ({ + l1StartBlock: 0n, + l1GenesisTime: 0n, + epochDuration: 1, + slotDuration: 1, + ethereumSlotDuration: 1, + proofSubmissionEpochs: 1, + targetCommitteeSize: 48, + }), }; } diff --git a/yarn-project/sequencer-client/src/config.ts b/yarn-project/sequencer-client/src/config.ts index 74fa82262daf..60ce42919779 100644 --- a/yarn-project/sequencer-client/src/config.ts +++ b/yarn-project/sequencer-client/src/config.ts @@ -11,8 +11,14 @@ import { EthAddress } from '@aztec/foundation/eth-address'; import { type KeyStoreConfig, keyStoreConfigMappings } from '@aztec/node-keystore/config'; import { type P2PConfig, p2pConfigMappings } from '@aztec/p2p/config'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; -import { type ChainConfig, type SequencerConfig, chainConfigMappings } from '@aztec/stdlib/config'; +import { + type ChainConfig, + type SequencerConfig, + chainConfigMappings, + sharedSequencerConfigMappings, +} from '@aztec/stdlib/config'; import type { ResolvedSequencerConfig } from '@aztec/stdlib/interfaces/server'; +import { DEFAULT_P2P_PROPAGATION_TIME } from '@aztec/stdlib/timetable'; import { type ValidatorClientConfig, validatorClientConfigMappings } from '@aztec/validator-client/config'; import { @@ -25,8 +31,6 @@ import { export * from './publisher/config.js'; export type { SequencerConfig }; -export const DEFAULT_ATTESTATION_PROPAGATION_TIME = 2; - /** * Default values for SequencerConfig. * Centralized location for all sequencer configuration defaults. @@ -41,7 +45,7 @@ export const DefaultSequencerConfig: ResolvedSequencerConfig = { maxDABlockGas: 10e9, maxBlockSizeInBytes: 1024 * 1024, enforceTimeTable: true, - attestationPropagationTime: DEFAULT_ATTESTATION_PROPAGATION_TIME, + attestationPropagationTime: DEFAULT_P2P_PROPAGATION_TIME, secondsBeforeInvalidatingBlockAsCommitteeMember: 144, // 12 L1 blocks secondsBeforeInvalidatingBlockAsNonCommitteeMember: 432, // 36 L1 blocks skipCollectingAttestations: false, @@ -191,13 +195,7 @@ export const sequencerConfigMappings: ConfigMappingsType = { description: 'Shuffle attestation ordering to create invalid ordering (for testing only)', ...booleanConfigHelper(DefaultSequencerConfig.shuffleAttestationOrdering), }, - blockDurationMs: { - env: 'SEQ_BLOCK_DURATION_MS', - description: - 'Duration per block in milliseconds when building multiple blocks per slot. ' + - 'If undefined (default), builds a single block per slot using the full slot duration.', - parseEnv: (val: string) => (val ? parseInt(val, 10) : undefined), - }, + ...sharedSequencerConfigMappings, buildCheckpointIfEmpty: { env: 'SEQ_BUILD_CHECKPOINT_IF_EMPTY', description: 'Have sequencer build and publish an empty checkpoint if there are no txs', diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts index 2f460a06fd8a..fdba28718c27 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.test.ts @@ -146,6 +146,7 @@ describe('CheckpointProposalJob', () => { l1StartBlock: 0n, epochDuration: 16, proofSubmissionEpochs: 4, + targetCommitteeSize: 48, }; dateProvider = new TestDateProvider(); diff --git a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts index c27200244247..e34e29fd82e5 100644 --- a/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/checkpoint_proposal_job.timing.test.ts @@ -329,6 +329,7 @@ describe('CheckpointProposalJob Timing Tests', () => { l1StartBlock: 0n, epochDuration: 16, proofSubmissionEpochs: 4, + targetCommitteeSize: 48, }; // Initialize test state diff --git a/yarn-project/sequencer-client/src/sequencer/timetable.test.ts b/yarn-project/sequencer-client/src/sequencer/timetable.test.ts index ab45835ebe75..c247cfddf3b1 100644 --- a/yarn-project/sequencer-client/src/sequencer/timetable.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/timetable.test.ts @@ -1,4 +1,6 @@ -import { MIN_EXECUTION_TIME, SequencerTimetable } from './timetable.js'; +import { MIN_EXECUTION_TIME } from '@aztec/stdlib/timetable'; + +import { SequencerTimetable } from './timetable.js'; import { SequencerState } from './utils.js'; describe('sequencer-timetable', () => { diff --git a/yarn-project/sequencer-client/src/sequencer/timetable.ts b/yarn-project/sequencer-client/src/sequencer/timetable.ts index 931beba88462..505979f95af7 100644 --- a/yarn-project/sequencer-client/src/sequencer/timetable.ts +++ b/yarn-project/sequencer-client/src/sequencer/timetable.ts @@ -1,14 +1,15 @@ import { createLogger } from '@aztec/aztec.js/log'; +import { + CHECKPOINT_ASSEMBLE_TIME, + CHECKPOINT_INITIALIZATION_TIME, + DEFAULT_P2P_PROPAGATION_TIME, + MIN_EXECUTION_TIME, +} from '@aztec/stdlib/timetable'; -import { DEFAULT_ATTESTATION_PROPAGATION_TIME as DEFAULT_P2P_PROPAGATION_TIME } from '../config.js'; import { SequencerTooSlowError } from './errors.js'; import type { SequencerMetrics } from './metrics.js'; import { SequencerState } from './utils.js'; -export const MIN_EXECUTION_TIME = 2; -export const CHECKPOINT_INITIALIZATION_TIME = 1; -export const CHECKPOINT_ASSEMBLE_TIME = 1; - export class SequencerTimetable { /** * How late into the slot can we be to start working. Computed as the total time needed for assembling and publishing a block, diff --git a/yarn-project/slasher/src/tally_slasher_client.test.ts b/yarn-project/slasher/src/tally_slasher_client.test.ts index b84d19dafe69..9ca14eaa7a44 100644 --- a/yarn-project/slasher/src/tally_slasher_client.test.ts +++ b/yarn-project/slasher/src/tally_slasher_client.test.ts @@ -136,6 +136,7 @@ describe('TallySlasherClient', () => { epochDuration: 32, ethereumSlotDuration: 12, proofSubmissionEpochs: 8, + targetCommitteeSize: 48, }); // Create mocks for L1 contracts diff --git a/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts b/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts index 1bcb01acead5..24355f8545d8 100644 --- a/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts +++ b/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts @@ -58,6 +58,7 @@ describe('EpochPruneWatcher', () => { epochDuration: 8, ethereumSlotDuration: 12, proofSubmissionEpochs: 1, + targetCommitteeSize: 48, }; epochCache.getL1Constants.mockReturnValue(l1Constants); diff --git a/yarn-project/stdlib/package.json b/yarn-project/stdlib/package.json index d8ffe81f6268..9189844ec9aa 100644 --- a/yarn-project/stdlib/package.json +++ b/yarn-project/stdlib/package.json @@ -61,7 +61,8 @@ "./zkpassport": "./dest/zkpassport/index.js", "./slashing": "./dest/slashing/index.js", "./l1-contracts": "./dest/l1-contracts/index.js", - "./world-state": "./dest/world-state/index.js" + "./world-state": "./dest/world-state/index.js", + "./timetable": "./dest/timetable/index.js" }, "typedocOptions": { "entryPoints": [ diff --git a/yarn-project/stdlib/src/config/index.ts b/yarn-project/stdlib/src/config/index.ts index 8b3489f5a5c2..3682f0db4012 100644 --- a/yarn-project/stdlib/src/config/index.ts +++ b/yarn-project/stdlib/src/config/index.ts @@ -1,2 +1,3 @@ export * from './chain-config.js'; export * from './node-rpc-config.js'; +export * from './sequencer-config.js'; diff --git a/yarn-project/stdlib/src/config/sequencer-config.ts b/yarn-project/stdlib/src/config/sequencer-config.ts new file mode 100644 index 000000000000..bc9ef0acb65b --- /dev/null +++ b/yarn-project/stdlib/src/config/sequencer-config.ts @@ -0,0 +1,19 @@ +import type { ConfigMappingsType } from '@aztec/foundation/config'; + +import type { SequencerConfig } from '../interfaces/configs.js'; + +/** + * Partial sequencer config mappings for fields that need to be shared across packages. + * The full sequencer config mappings remain in sequencer-client, but shared fields + * (like blockDurationMs needed by both p2p and sequencer-client) are defined here + * to avoid duplication. + */ +export const sharedSequencerConfigMappings: ConfigMappingsType> = { + blockDurationMs: { + env: 'SEQ_BLOCK_DURATION_MS', + description: + 'Duration per block in milliseconds when building multiple blocks per slot. ' + + 'If undefined (default), builds a single block per slot using the full slot duration.', + parseEnv: (val: string) => (val ? parseInt(val, 10) : undefined), + }, +}; diff --git a/yarn-project/stdlib/src/epoch-helpers/index.test.ts b/yarn-project/stdlib/src/epoch-helpers/index.test.ts index ed1564cbe75d..1dfe16d60cee 100644 --- a/yarn-project/stdlib/src/epoch-helpers/index.test.ts +++ b/yarn-project/stdlib/src/epoch-helpers/index.test.ts @@ -13,6 +13,7 @@ describe('EpochHelpers', () => { slotDuration: 24, ethereumSlotDuration: 12, proofSubmissionEpochs: 1, + targetCommitteeSize: 48, }; }); diff --git a/yarn-project/stdlib/src/epoch-helpers/index.ts b/yarn-project/stdlib/src/epoch-helpers/index.ts index 2365ca59a8f3..3dc686828083 100644 --- a/yarn-project/stdlib/src/epoch-helpers/index.ts +++ b/yarn-project/stdlib/src/epoch-helpers/index.ts @@ -11,6 +11,7 @@ export type L1RollupConstants = { epochDuration: number; ethereumSlotDuration: number; proofSubmissionEpochs: number; + targetCommitteeSize: number; }; export const EmptyL1RollupConstants: L1RollupConstants = { @@ -20,6 +21,7 @@ export const EmptyL1RollupConstants: L1RollupConstants = { slotDuration: 1, ethereumSlotDuration: 1, proofSubmissionEpochs: 1, + targetCommitteeSize: 48, }; export const L1RollupConstantsSchema = zodFor()( @@ -30,6 +32,7 @@ export const L1RollupConstantsSchema = zodFor()( epochDuration: z.number(), ethereumSlotDuration: z.number(), proofSubmissionEpochs: z.number(), + targetCommitteeSize: z.number(), }), ); diff --git a/yarn-project/stdlib/src/l1-contracts/slash_factory.test.ts b/yarn-project/stdlib/src/l1-contracts/slash_factory.test.ts index 7652f8d9681e..4b482065bc18 100644 --- a/yarn-project/stdlib/src/l1-contracts/slash_factory.test.ts +++ b/yarn-project/stdlib/src/l1-contracts/slash_factory.test.ts @@ -41,6 +41,7 @@ describe('SlashFactory', () => { epochDuration: 32, ethereumSlotDuration: 12, proofSubmissionEpochs: 2, + targetCommitteeSize: 48, slashingRoundSize: 100, slashingPayloadLifetimeInRounds: 3, logsBatchSize: 50, diff --git a/yarn-project/stdlib/src/timetable/index.ts b/yarn-project/stdlib/src/timetable/index.ts new file mode 100644 index 000000000000..e598b6849afb --- /dev/null +++ b/yarn-project/stdlib/src/timetable/index.ts @@ -0,0 +1,66 @@ +/** + * Timetable constants used for sequencer timing calculations. + * These define the time budgets for various phases of block production. + * + * The sequencer slot is divided into phases: + * 1. Checkpoint initialization (sync + proposer check) + * 2. Block building (execution) + * 3. Checkpoint assembly + * 4. P2P propagation for proposal and attestations (round-trip) + * 5. L1 publishing + */ + +/** Time budget for checkpoint initialization (sync + proposer check) in seconds */ +export const CHECKPOINT_INITIALIZATION_TIME = 1; + +/** Time budget for assembling a checkpoint after building the last block in seconds */ +export const CHECKPOINT_ASSEMBLE_TIME = 1; + +/** Default one-way P2P propagation time for proposals and attestations in seconds */ +export const DEFAULT_P2P_PROPAGATION_TIME = 2; + +/** Default L1 publishing time (matches Ethereum slot duration on mainnet) in seconds */ +export const DEFAULT_L1_PUBLISHING_TIME = 12; + +/** Minimum execution time for building a block in seconds */ +export const MIN_EXECUTION_TIME = 2; + +/** + * Calculates the maximum number of blocks that can be built in a slot. + * Used by both the sequencer timetable and p2p gossipsub scoring. + * + * @param aztecSlotDurationSec - Aztec slot duration in seconds + * @param blockDurationSec - Duration per block in seconds (undefined = single block mode) + * @param opts - Optional overrides for timing constants + * @returns Maximum number of blocks per slot + */ +export function calculateMaxBlocksPerSlot( + aztecSlotDurationSec: number, + blockDurationSec: number | undefined, + opts: { + checkpointInitializationTime?: number; + checkpointAssembleTime?: number; + p2pPropagationTime?: number; + l1PublishingTime?: number; + } = {}, +): number { + if (!blockDurationSec) { + return 1; // Single block per slot + } + + const initOffset = opts.checkpointInitializationTime ?? CHECKPOINT_INITIALIZATION_TIME; + const assembleTime = opts.checkpointAssembleTime ?? CHECKPOINT_ASSEMBLE_TIME; + const p2pTime = opts.p2pPropagationTime ?? DEFAULT_P2P_PROPAGATION_TIME; + const l1Time = opts.l1PublishingTime ?? DEFAULT_L1_PUBLISHING_TIME; + + // Calculate checkpoint finalization time (assembly + round-trip propagation + L1 publishing) + const checkpointFinalizationTime = assembleTime + p2pTime * 2 + l1Time; + + // Time reserved at end for last sub-slot (validator re-execution) + finalization + const timeReservedAtEnd = blockDurationSec + checkpointFinalizationTime; + + // Time available for building blocks + const timeAvailableForBlocks = aztecSlotDurationSec - initOffset - timeReservedAtEnd; + + return Math.max(1, Math.floor(timeAvailableForBlocks / blockDurationSec)); +} diff --git a/yarn-project/txe/src/state_machine/mock_epoch_cache.ts b/yarn-project/txe/src/state_machine/mock_epoch_cache.ts index 62060512e26d..4c569cb83390 100644 --- a/yarn-project/txe/src/state_machine/mock_epoch_cache.ts +++ b/yarn-project/txe/src/state_machine/mock_epoch_cache.ts @@ -1,6 +1,7 @@ import type { EpochAndSlot, EpochCacheInterface, EpochCommitteeInfo, SlotTag } from '@aztec/epoch-cache'; import { EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; import { EthAddress } from '@aztec/foundation/eth-address'; +import { EmptyL1RollupConstants, type L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; /** * Mock implementation of the EpochCacheInterface used to satisfy dependencies of AztecNodeService. @@ -64,4 +65,8 @@ export class MockEpochCache implements EpochCacheInterface { filterInCommittee(_slot: SlotTag, _validators: EthAddress[]): Promise { return Promise.resolve([]); } + + getL1Constants(): L1RollupConstants { + return EmptyL1RollupConstants; + } } diff --git a/yarn-project/validator-client/src/validator.integration.test.ts b/yarn-project/validator-client/src/validator.integration.test.ts index cf2e6a04844a..ffb248852b25 100644 --- a/yarn-project/validator-client/src/validator.integration.test.ts +++ b/yarn-project/validator-client/src/validator.integration.test.ts @@ -52,6 +52,7 @@ describe('ValidatorClient Integration', () => { ethereumSlotDuration: 12, proofSubmissionEpochs: 2, l1StartBlock: 0n, + targetCommitteeSize: 48, }; const emptyL1ToL2Messages: Fr[] = [];