From b3e97b772a9f5865a193ff5fb9c0b1022709a107 Mon Sep 17 00:00:00 2001 From: raulk Date: Mon, 26 May 2025 00:01:32 +0100 Subject: [PATCH 1/3] update EIP-7892: harden the spec with p2p details. --- EIPS/eip-7892.md | 112 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 97 insertions(+), 15 deletions(-) diff --git a/EIPS/eip-7892.md b/EIPS/eip-7892.md index 0cf9c14f65ba3f..0ea202846867e9 100644 --- a/EIPS/eip-7892.md +++ b/EIPS/eip-7892.md @@ -2,7 +2,7 @@ eip: 7892 title: Blob Parameter Only Hardforks description: Defines a mechanism for scaling Ethereum’s blob capacity via specialized hard forks that modify only blob-related parameters. -author: Mark Mackey (@ethDreamer) +author: Mark Mackey (@ethDreamer), Raúl Kripalani (@raulk) discussions-to: https://ethereum-magicians.org/t/eip-7892-blob-parameter-only-hardforks/23018 status: Draft type: Informational @@ -18,14 +18,16 @@ This EIP introduces **Blob Parameter Only (BPO) Hardforks**, a lightweight mecha Ethereum's scaling strategy relies on Layer 2 (L2) solutions for transaction execution while using Ethereum as a **data availability (DA) layer**. However, the demand for DA has increased rapidly, and the current approach of only modifying blob parameters in large, infrequent hard forks is **not agile enough** to keep up with L2 growth. -The key motivations for BPO forks are: +The key motivations for BPO forks are as follows: 1. **Continuous Scaling** - - L2 DA demand is growing rapidly, leading to saturation of blob capacity. + - L2 DA demand is growing rapidly, leading to ongoing saturation of blob capacity. - Large, infrequent blob parameter changes create high costs and inefficiencies. - BPO forks allow for more frequent, safer capacity increases. 2. **Reduced Operational Overhead** + - Performance improvements and further testing will continue to unlock additional capacity. + - It is desirable to reduce the time between core devs agreeing on a parameter increase and its effective deployment. - Full Ethereum hard forks require significant coordination, testing, and upgrade efforts across clients. - By isolating blob parameter changes, BPO forks reduce the complexity of upgrades. @@ -39,12 +41,22 @@ The key motivations for BPO forks are: ## Specification -BPO forks are a special class of hard fork which **only modifies any of the following** blob-related parameters: +### Definition + +BPO hardforks are defined as protocol upgrades that modify only blob-related parameters through configuration, without requiring any client-side code changes. The new parameters take effect immediately at the specified activation time. + +### Blob schedule configuration + +The following protocol parameters are now managed by the blob schedule configuration: - **Blob Target (`blob_target`)**: The expected number of blobs per block. - **Blob Limit (`blob_limit`)**: The maximum number of blobs per block. - **Blob Base Fee Update Fraction (`baseFeeUpdateFraction`)**: Determines how blob gas pricing adjusts per block. +To ensure consistency, when a regular hardfork changes any of these parameters, it MUST do so by adding an entry to the blob schedule configuration. + +### Execution layer configuration + To facilitate these changes on the execution layer, the `blobSchedule` object specified in [EIP-7840](./eip-7840.md) is extended to allow for an arbitrary number of block timestamps at which these parameters **MAY** change. ```json @@ -59,12 +71,12 @@ To facilitate these changes on the execution layer, the `blobSchedule` object sp "max": 9, "baseFeeUpdateFraction": 5007716 }, - "1740693335": { + "12000000": { "target": 24, "max": 48, "baseFeeUpdateFraction": 5007716 }, - "1743285335": { + "12300000": { "target": 36, "max": 56, "baseFeeUpdateFraction": 5007716 @@ -72,24 +84,94 @@ To facilitate these changes on the execution layer, the `blobSchedule` object sp } ``` -On the consensus layer, a new parameter is added to the configuration: +### Consensus layer configuration -``` +A new `BLOB_SCHEDULE` field is added to consensus layer configuration, containing a sequence of entries representing blob parameter changes **after** `ELECTRA_FORK_EPOCH`. There exists one entry per fork that changes blob parameters, whether it is a regular or a Blob-Parameter-Only fork. + +```yaml BLOB_SCHEDULE: - - EPOCH: 348618 + - EPOCH: 400000 ## A future anonymous BPO fork MAX_BLOBS_PER_BLOCK: 24 - - EPOCH: 355368 + - EPOCH: 420000 ## A future anonymous BPO fork MAX_BLOBS_PER_BLOCK: 56 + - EPOCH: 440000 ## GLOAS_FORK_EPOCH; a future named fork introducing blob parameter changes + MAX_BLOBS_PER_BLOCK: 72 ``` The parameters and schedules above are purely illustrative. Actual values and schedules are beyond the scope of this specification. -### Requirements +**Requirements:** + +- Execution and consensus clients **MUST** share consistent BPO fork schedules. +- The slot number in the EL's `blobSchedule` **MUST** align with the start of the epoch specified in the consensus layer configuration. +- The `max` field in the EL's `blobSchedule` **MUST** equal the `MAX_BLOBS_PER_BLOCK` value in the consensus layer configuration. + +### Modified `compute_fork_digest` + +The `compute_fork_digest` helper is updated to account for BPO forks: + +```python +# These basic types already exist. Copied here for reference. +Epoch = int +Version = bytes +Root = bytes +ForkDigest = bytes + +class BlobScheduleEntry(NamedTuple): + epoch: Epoch + max_blobs_per_block: int # uint32 + +def compute_fork_digest( + current_version: Version, # Unchanged. Refers to the baseline hardfork atop which the blob schedule is applied. + genesis_validators_root: Root, # Unchanged. + current_epoch: Epoch, # New. + blob_schedule: Sequence[BlobScheduleEntry] # New. +) -> ForkDigest: + """ + Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``, + bitmasking blob parameters after ``ELECTRA_FORK_VERSION``. + + This is a digest primarily used for domain separation on the p2p layer. + 4-bytes suffices for practical separation of forks/chains. + """ + base_digest = compute_fork_data_root(current_version, genesis_validators_root)[:4] + + # Find the blob parameters applicable to this epoch. + sorted_schedule = sorted(blob_schedule, key=lambda e: e.epoch, reverse=True) + blob_params = next( + (entry for entry in sorted_schedule if current_epoch >= entry.epoch), + None + ) + + # This check enables us to roll out the BPO mechanism without a concurrent parameter change. + if blob_params is None: + return ForkDigest(base_digest) + + # Bitmask blob parameters into the digest. + mask = blob_params.max_blobs_per_block.to_bytes(4, 'big') + masked_digest = bytes(a ^ b for a, b in zip(base_digest, mask)) + return ForkDigest(masked_digest) +``` + +### P2P Networking + +#### ENR + +In the consensus layer, ENRs are extended with an additional entry that communicates the digest of the next scheduled fork, regardless of whether it is a regular or BPO fork. This approach is preferred over encoding BPO-specific parameters because it is agnostic to specific use cases and offers greater long-term flexibility. + +| Key | Value | +| :----- | :---------------------- | +| `nfd` | SSZ Bytes4 `ForkDigest` | + +When discovering and interfacing with peers, nodes MUST evaluate `nfd` alongside their existing consideration of the `ENRForkID::next_*` fields under the `eth2` key, to form a more accurate view of the peer's intended next fork. + +#### `Status` req/resp + +No changes are needed in this interaction, but it is noted that it must correctly convey the updated `fork_digest`. + +#### Gossip topics -- Execution and consensus clients **MUST** share consistent BPO fork schedules -- BPO forks **MUST NOT** conflict with other fork schedules -- The timestamp in `blobSchedule` **MUST** align with the start of the epoch specified in the consensus layer configuration -- The `max` field in `blobSchedule` **MUST** equal the `MAX_BLOBS_PER_BLOCK` value in the consensus layer configuration +No changes are required to topic structure or configuration. However, all topics will automatically rotate at a BPO fork due to changes in their `ForkDigestValue` component. ## Rationale From f969a47b50fca872b8b94f3fef440cabe4e66899 Mon Sep 17 00:00:00 2001 From: raulk Date: Fri, 30 May 2025 17:07:18 +0100 Subject: [PATCH 2/3] address review comments. --- EIPS/eip-7892.md | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/EIPS/eip-7892.md b/EIPS/eip-7892.md index 0ea202846867e9..a4f839c2d65a97 100644 --- a/EIPS/eip-7892.md +++ b/EIPS/eip-7892.md @@ -57,7 +57,7 @@ To ensure consistency, when a regular hardfork changes any of these parameters, ### Execution layer configuration -To facilitate these changes on the execution layer, the `blobSchedule` object specified in [EIP-7840](./eip-7840.md) is extended to allow for an arbitrary number of block timestamps at which these parameters **MAY** change. +To facilitate these changes on the execution layer, each fork in the `blobSchedule` object defined in [EIP-7840](./eip-7840.md) is linked to an activation timestamp via a top-level `Time` field, which holds the Unix timestamp of the activation slot as a JSON number. BPO forks SHOULD be named using the convention `bpo`, where `` starts at `1`. Left padding is unnecessary since these labels are not subject to lexicographic sorting. Activation timestamps are required only for forks that occur **after** Prague. ```json "blobSchedule": { @@ -71,17 +71,27 @@ To facilitate these changes on the execution layer, the `blobSchedule` object sp "max": 9, "baseFeeUpdateFraction": 5007716 }, - "12000000": { - "target": 24, - "max": 48, + "osaka": { + "target": 9, + "max": 12, "baseFeeUpdateFraction": 5007716 }, - "12300000": { - "target": 36, - "max": 56, + "bpo1": { + "target": 12, + "max": 16, "baseFeeUpdateFraction": 5007716 - } -} + }, + "bpo2": { + "target": 16, + "max": 24, + "baseFeeUpdateFraction": 5007716 + }, +}, +"cancunTime": 0, // no backporting +"pragueTime": 0, // no backporting +"osakaTime": 1747387400, +"bpo1Time": 1757387400, +"bpo2Time": 1767387784, ``` ### Consensus layer configuration @@ -111,15 +121,9 @@ The parameters and schedules above are purely illustrative. Actual values and sc The `compute_fork_digest` helper is updated to account for BPO forks: ```python -# These basic types already exist. Copied here for reference. -Epoch = int -Version = bytes -Root = bytes -ForkDigest = bytes - class BlobScheduleEntry(NamedTuple): epoch: Epoch - max_blobs_per_block: int # uint32 + max_blobs_per_block: int # uint64, aligning with the type of MAX_BLOBS_PER_BLOCK. def compute_fork_digest( current_version: Version, # Unchanged. Refers to the baseline hardfork atop which the blob schedule is applied. @@ -147,7 +151,8 @@ def compute_fork_digest( if blob_params is None: return ForkDigest(base_digest) - # Bitmask blob parameters into the digest. + # Safely bitmask blob parameters into the digest. + assert 0 <= blob_params.max_blobs_per_block <= 0xFFFFFFFF mask = blob_params.max_blobs_per_block.to_bytes(4, 'big') masked_digest = bytes(a ^ b for a, b in zip(base_digest, mask)) return ForkDigest(masked_digest) @@ -157,7 +162,7 @@ def compute_fork_digest( #### ENR -In the consensus layer, ENRs are extended with an additional entry that communicates the digest of the next scheduled fork, regardless of whether it is a regular or BPO fork. This approach is preferred over encoding BPO-specific parameters because it is agnostic to specific use cases and offers greater long-term flexibility. +In the consensus layer, ENRs are extended with an additional entry `nfd`, short for "next fork digest". This field communicates the digest of the next scheduled fork, regardless of whether it is a regular or BPO fork. This approach is preferred over encoding BPO-specific parameters because it is agnostic to specific use cases and offers greater long-term flexibility. | Key | Value | | :----- | :---------------------- | From 95d580afcc8dd5ca08946d2051ce1812df293700 Mon Sep 17 00:00:00 2001 From: raulk Date: Fri, 30 May 2025 17:43:25 +0100 Subject: [PATCH 3/3] more editorial changes + Fulu blob schedule entry. --- EIPS/eip-7892.md | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/EIPS/eip-7892.md b/EIPS/eip-7892.md index a4f839c2d65a97..04ccd0bca7d2b2 100644 --- a/EIPS/eip-7892.md +++ b/EIPS/eip-7892.md @@ -100,11 +100,13 @@ A new `BLOB_SCHEDULE` field is added to consensus layer configuration, containin ```yaml BLOB_SCHEDULE: - - EPOCH: 400000 ## A future anonymous BPO fork + - EPOCH: 380000 # FULU_FORK_EPOCH (illustrative) + MAX_BLOBS_PER_BLOCK: 12 + - EPOCH: 400000 # A future anonymous BPO fork MAX_BLOBS_PER_BLOCK: 24 - - EPOCH: 420000 ## A future anonymous BPO fork + - EPOCH: 420000 # A future anonymous BPO fork MAX_BLOBS_PER_BLOCK: 56 - - EPOCH: 440000 ## GLOAS_FORK_EPOCH; a future named fork introducing blob parameter changes + - EPOCH: 440000 # GLOAS_FORK_EPOCH; a future named fork introducing blob parameter changes MAX_BLOBS_PER_BLOCK: 72 ``` @@ -121,15 +123,16 @@ The parameters and schedules above are purely illustrative. Actual values and sc The `compute_fork_digest` helper is updated to account for BPO forks: ```python -class BlobScheduleEntry(NamedTuple): +@dataclass +class BlobScheduleEntry: epoch: Epoch - max_blobs_per_block: int # uint64, aligning with the type of MAX_BLOBS_PER_BLOCK. + max_blobs_per_block: uint64 # Aligning with the type of MAX_BLOBS_PER_BLOCK def compute_fork_digest( - current_version: Version, # Unchanged. Refers to the baseline hardfork atop which the blob schedule is applied. - genesis_validators_root: Root, # Unchanged. - current_epoch: Epoch, # New. - blob_schedule: Sequence[BlobScheduleEntry] # New. + current_version: Version, # Unchanged; refers to the baseline hardfork atop which the blob schedule is applied + genesis_validators_root: Root, # Unchanged + current_epoch: Epoch, # New + blob_schedule: Sequence[BlobScheduleEntry] # New ) -> ForkDigest: """ Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``, @@ -142,10 +145,11 @@ def compute_fork_digest( # Find the blob parameters applicable to this epoch. sorted_schedule = sorted(blob_schedule, key=lambda e: e.epoch, reverse=True) - blob_params = next( - (entry for entry in sorted_schedule if current_epoch >= entry.epoch), - None - ) + blob_params = None + for entry in sorted_schedule: + if current_epoch >= entry.epoch: + blob_params = entry + break # This check enables us to roll out the BPO mechanism without a concurrent parameter change. if blob_params is None: @@ -172,7 +176,7 @@ When discovering and interfacing with peers, nodes MUST evaluate `nfd` alongside #### `Status` req/resp -No changes are needed in this interaction, but it is noted that it must correctly convey the updated `fork_digest`. +No changes are needed in this interaction, but it is noted that the response payload must correctly contain the updated `fork_digest`. #### Gossip topics