Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
4d2f29b
Add parsing for blob schedule table
jtraglia Apr 16, 2025
43b3a29
add parser for config in yaml
GabrielAstieres Apr 17, 2025
e736f28
improve regex
GabrielAstieres Apr 17, 2025
c790cec
add get_max_blobs_per_block
GabrielAstieres Apr 17, 2025
fcda062
remove MAX_BLOBS_PER_BLOCK_FULU
GabrielAstieres Apr 17, 2025
6324589
revamp get_max_blob_count
GabrielAstieres Apr 17, 2025
f1deb42
address Justin comments
GabrielAstieres Apr 17, 2025
9e479b5
Merge branch 'dev' into blob-schedule
jtraglia Apr 17, 2025
5b69bc2
Simplify blob schedule table
jtraglia Apr 17, 2025
1cf0f03
Clean up parse_config_vars function
jtraglia Apr 17, 2025
c0d8595
Improve testing just a little
jtraglia Apr 17, 2025
d3c21d8
Check/overwrite blob schedule with config file
jtraglia Apr 17, 2025
5501ac1
Remove assert and fix tests
jtraglia Apr 17, 2025
d2a4253
Fix lint & update minimal blob schedule
jtraglia Apr 18, 2025
75163d6
Improve list of records parsing
jtraglia Apr 18, 2025
2133a54
Merge branch 'ethereum:dev' into blob-schedule
GabrielAstieres Apr 22, 2025
be5578b
remove BPO
GabrielAstieres Apr 22, 2025
918dcb1
fix test_incorrect_blob_tx_type
GabrielAstieres Apr 22, 2025
2752acc
fix block processing tests
GabrielAstieres Apr 22, 2025
6d1027d
linting changes
GabrielAstieres Apr 22, 2025
80e9f23
fix test_get_max_blobs
GabrielAstieres Apr 23, 2025
5f47501
hardcode fork epoch in test
GabrielAstieres Apr 23, 2025
e2dfe47
Revamp max blobs getter to default to electra limit
GabrielAstieres Apr 23, 2025
40c1370
Merge branch 'dev' into blob-schedule
jtraglia Apr 23, 2025
c0088d1
add EOF lines
GabrielAstieres Apr 24, 2025
6ba03b5
Add special condition to get_max_blobs_per_block
jtraglia Apr 30, 2025
fb9f20f
Simplify
jtraglia May 1, 2025
7c5b431
Use EIP7892 in modified comment
jtraglia May 8, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion configs/mainnet.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,6 @@ SAMPLES_PER_SLOT: 8
CUSTODY_REQUIREMENT: 4
VALIDATOR_CUSTODY_REQUIREMENT: 8
BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000
MAX_BLOBS_PER_BLOCK_FULU: 12
MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096

# EIP7441
Expand All @@ -190,3 +189,14 @@ VIEW_FREEZE_DEADLINE: 9
MAX_REQUEST_INCLUSION_LIST: 16
# 2**13 (= 8192)
MAX_BYTES_PER_INCLUSION_LIST: 8192

# Blob Scheduling
# ---------------------------------------------------------------

BLOB_SCHEDULE:
# Deneb
- EPOCH: 269568
MAX_BLOBS_PER_BLOCK: 6
# Electra
- EPOCH: 364032
MAX_BLOBS_PER_BLOCK: 9
12 changes: 11 additions & 1 deletion configs/minimal.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,6 @@ SAMPLES_PER_SLOT: 8
CUSTODY_REQUIREMENT: 4
VALIDATOR_CUSTODY_REQUIREMENT: 8
BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000
MAX_BLOBS_PER_BLOCK_FULU: 12
MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096

# EIP7441
Expand All @@ -187,3 +186,14 @@ VIEW_FREEZE_DEADLINE: 3
MAX_REQUEST_INCLUSION_LIST: 16
# 2**13 (= 8192)
MAX_BYTES_PER_INCLUSION_LIST: 8192

# Blob Scheduling
# ---------------------------------------------------------------

BLOB_SCHEDULE:
# Deneb
- EPOCH: 18446744073709551615
MAX_BLOBS_PER_BLOCK: 6
# Electra
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we add a MAX_BLOBS_PER_BLOCK for fulu?

Currently how this is drafted, get_max_blobs_per_block() will return 9 during fulu.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nah, during the 2nd to last PeerDAS breakout call we decided to remove any placeholder blob limit increases. We can decide these in a future PR. This PR just lays the foundation for that. Also, the current plan initially is to keep the Electra blob limit at the fork, then 2x two weeks later, then 2x two months later, and 2x once again another two months later. Resulting 72 blobs per block 4.5 months after Fulu.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

then 2x two weeks later

how did we come up with that number btw? 2 weeks doesn't provide us any value, it's not sufficient to analyse the data on the network and push out new client releases + get everyone to update, minimum should be 2 months imo

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This comes from Alex Stokes's proposal: blob/acc in 2025. There's a some rationales in there. To be clear, this is just his proposed schedule. Nothing has been decided yet. And this discussion is out-of-scope for this PR 😅

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't really see much rationale there but saw someone else left similar comment on the document, but agree this is out of scope of the PR 😁

- EPOCH: 18446744073709551615
MAX_BLOBS_PER_BLOCK: 9
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ dependencies = [
"curdleproofs==0.1.2",
"eth-typing==5.2.1",
"eth-utils==5.3.0",
"frozendict==2.4.6",
"lru-dict==1.3.0",
"marko==2.1.3",
"milagro_bls_binding==1.9.0",
Expand Down
44 changes: 33 additions & 11 deletions pysetup/helpers.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import re
from typing import TypeVar, Dict
from typing import TypeVar, Dict, Union, List
import textwrap
from functools import reduce

Expand Down Expand Up @@ -85,23 +85,42 @@ def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str
ordered_class_objects_spec = '\n\n\n'.join(ordered_class_objects.values())

# Access global dict of config vars for runtime configurables
# Ignore variable between quotes and doubles quotes
for name in spec_object.config_vars.keys():
functions_spec = re.sub(r"\b%s\b" % name, 'config.' + name, functions_spec)
ordered_class_objects_spec = re.sub(r"\b%s\b" % name, 'config.' + name, ordered_class_objects_spec)

def format_config_var(name: str, vardef: VariableDefinition) -> str:
if vardef.type_name is None:
functions_spec = re.sub(r"(?<!['\"])\b%s\b(?!['\"])" % name, "config." + name, functions_spec)
ordered_class_objects_spec = re.sub(r"(?<!['\"])\b%s\b(?!['\"])" % name, "config." + name, ordered_class_objects_spec)

def format_config_var(name: str, vardef) -> str:
if isinstance(vardef, list):
# A special case for list of records.
indent = " " * 4
lines = [f"{name}=("]
for d in vardef:
line = indent*2 + "frozendict({\n"
for k, v in d.items():
line += indent * 3 + f'"{k}": {v},\n'
line += indent*2 + "}),"
lines.append(line)
lines.append(indent + "),")
return "\n".join(lines)
elif vardef.type_name is None:
out = f'{name}={vardef.value},'
else:
out = f'{name}={vardef.type_name}({vardef.value}),'
if vardef.comment is not None:
out += f' # {vardef.comment}'
return out

def format_config_var_param(value):
if isinstance(value, list):
# A special case for list of records.
return "tuple[frozendict[str, Any], ...]"
elif isinstance(value, VariableDefinition):
return value.type_name if value.type_name is not None else "int"

config_spec = 'class Configuration(NamedTuple):\n'
config_spec += ' PRESET_BASE: str\n'
config_spec += '\n'.join(f' {k}: {v.type_name if v.type_name is not None else "int"}'
for k, v in spec_object.config_vars.items())
config_spec += '\n'.join(f' {k}: {format_config_var_param(v)}' for k, v in spec_object.config_vars.items())
config_spec += '\n\n\nconfig = Configuration(\n'
config_spec += f' PRESET_BASE="{preset_name}",\n'
config_spec += '\n'.join(' ' + format_config_var(k, v) for k, v in spec_object.config_vars.items())
Expand Down Expand Up @@ -267,13 +286,16 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
)


def parse_config_vars(conf: Dict[str, str]) -> Dict[str, str]:
def parse_config_vars(conf: Dict[str, str]) -> Dict[str, Union[str, List[Dict[str, str]]]]:
"""
Parses a dict of basic str/int/list types into a dict for insertion into the spec code.
"""
out: Dict[str, str] = dict()
out: Dict[str, Union[str, List[Dict[str, str]]]] = dict()
for k, v in conf.items():
if isinstance(v, str) and (v.startswith("0x") or k == 'PRESET_BASE' or k == 'CONFIG_NAME'):
if isinstance(v, list):
# A special case for list of records
out[k] = v
elif isinstance(v, str) and (v.startswith("0x") or k == "PRESET_BASE" or k == "CONFIG_NAME"):
# Represent byte data with string, to avoid misinterpretation as big-endian int.
# Everything except PRESET_BASE and CONFIG_NAME is either byte data or an integer.
out[k] = f"'{v}'"
Expand Down
1 change: 1 addition & 0 deletions pysetup/spec_builders/fulu.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ class FuluSpecBuilder(BaseSpecBuilder):
@classmethod
def imports(cls, preset_name: str):
return f'''
from frozendict import frozendict
from eth2spec.electra import {preset_name} as electra
'''

Expand Down
67 changes: 67 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import json
import logging
import os
import re
import string
import sys
import warnings
Expand Down Expand Up @@ -215,6 +216,8 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr

current_name = None
should_skip = False
list_of_records = None
list_of_records_name = None
for child in document.children:
if isinstance(child, BlankLine):
continue
Expand Down Expand Up @@ -255,6 +258,62 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
ssz_objects[current_name] = "\n".join(line.rstrip() for line in source.splitlines())
else:
raise Exception("unrecognized python code element: " + source)
elif isinstance(child, Table) and list_of_records is not None:
list_of_records_header = None
for i, row in enumerate(child.children):
# This will start as an empty list when there is a <!-- list-of-records --> comment,
# which indicates that the next table is a list-of-records. After we're done parsing
# the table, we will reset this to None.
if list_of_records is not None:
if i == 0:
# Save the table header, this will be used for field names
# Skip the last item, which is the description
list_of_records_header = [
# Convert the titles to SNAKE_CASE
re.sub(r'\s+', '_', value.children[0].children.upper())
for value in row.children[:-1]
]
else:
# Add the row entry to our list of records
list_of_records.append({
list_of_records_header[i]: value.children[0].children
for i, value in enumerate(row.children[:-1])
})

# Make a type map from the spec definition
# We'll apply this to the file config (ie mainnet.yaml)
type_map: dict[str,str] = {}
pattern = re.compile(r'^(\w+)\(.*\)$')
for entry in list_of_records:
for k, v in entry.items():
m = pattern.match(v)
if m:
type_map[k] = m.group(1)

# Apply the types to the file config
list_of_records_config: list[dict[str,str]] = []
for entry in config[list_of_records_name]:
new_entry: dict[str,str] = {}
for k, v in entry.items():
ctor = type_map.get(k)
if ctor:
new_entry[k] = f"{ctor}({v})"
else:
new_entry[k] = v
list_of_records_config.append(new_entry)

# For mainnet, check that the spec config & file config are the same
# For minimal, we expect this to be different; just use the file config
if preset_name == "mainnet":
assert list_of_records == list_of_records_config, \
f"list of records mismatch: {list_of_records} vs {list_of_records_config}"
elif preset_name == "minimal":
list_of_records = list_of_records_config

# Set the config variable and reset the global variable
config_vars[list_of_records_name] = list_of_records
list_of_records = None

elif isinstance(child, Table):
for row in child.children:
cells = row.children
Expand Down Expand Up @@ -315,6 +374,14 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
elif isinstance(child, HTMLBlock):
if child.body.strip() == "<!-- eth2spec: skip -->":
should_skip = True
# Handle list-of-records tables
match = re.match(r"<!--\s*list-of-records:([a-zA-Z0-9_-]+)\s*-->", child.body.strip())
if match:
# Initialize list-of-records, in the next iteration this will indicate that the
# table is a list-of-records and must be parsed differently.
list_of_records = []
# Use regex to extract the desired configuration list name
list_of_records_name = match.group(1).upper()

# Load KZG trusted setup from files
if any('KZG_SETUP' in name for name in constant_vars):
Expand Down
9 changes: 1 addition & 8 deletions specs/fulu/beacon-chain.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

- [Introduction](#introduction)
- [Configuration](#configuration)
- [Execution](#execution)
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
- [Block processing](#block-processing)
- [Execution payload](#execution-payload)
Expand All @@ -20,12 +19,6 @@

## Configuration

### Execution

| Name | Value | Description |
| -------------------------- | ------------ | ------------------------------------------------------------------------------------------------------------- |
| `MAX_BLOBS_PER_BLOCK_FULU` | `uint64(12)` | *[New in Fulu:EIP7594]* Maximum number of blobs in a single block limited by `MAX_BLOB_COMMITMENTS_PER_BLOCK` |

## Beacon chain state transition function

### Block processing
Expand All @@ -45,7 +38,7 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
# Verify timestamp
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
# Verify commitments are under limit
assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU # [Modified in Fulu:EIP7594]
assert len(body.blob_kzg_commitments) <= get_max_blobs_per_block(get_current_epoch(state)) # [Modified in Fulu:EIP7892]
# Verify the execution payload is valid
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
assert execution_engine.verify_and_notify_new_payload(
Expand Down
27 changes: 27 additions & 0 deletions specs/fulu/das-core.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,13 @@
- [Configuration](#configuration)
- [Data size](#data-size)
- [Custody setting](#custody-setting)
- [Blob schedule](#blob-schedule)
- [Containers](#containers)
- [`DataColumnSidecar`](#datacolumnsidecar)
- [`MatrixEntry`](#matrixentry)
- [Helper functions](#helper-functions)
- [`get_custody_groups`](#get_custody_groups)
- [`get_max_blobs_per_block`](#get_max_blobs_per_block)
- [`compute_columns_for_custody_group`](#compute_columns_for_custody_group)
- [`compute_matrix`](#compute_matrix)
- [`recover_matrix`](#recover_matrix)
Expand Down Expand Up @@ -67,6 +69,17 @@ The following values are (non-configurable) constants used throughout the specif
| `NUMBER_OF_CUSTODY_GROUPS` | `128` | Number of custody groups available for nodes to custody |
| `CUSTODY_REQUIREMENT` | `4` | Minimum number of custody groups an honest node custodies and serves samples from |

### Blob schedule

*[New in EIP7892]* This schedule defines the maximum blobs per block limit for a given epoch.

<!-- list-of-records:blob_schedule -->

| Epoch | Max Blobs Per Block | Description |
| --------------------------- | ------------------- | -------------------------------- |
| `Epoch(269568)` **Deneb** | `uint64(6)` | The limit is set to `6` blobs |
| `Epoch(364032)` **Electra** | `uint64(9)` | The limit is raised to `9` blobs |

### Containers

#### `DataColumnSidecar`
Expand Down Expand Up @@ -118,6 +131,20 @@ def get_custody_groups(node_id: NodeID, custody_group_count: uint64) -> Sequence
return sorted(custody_groups)
```

### `get_max_blobs_per_block`

```python
def get_max_blobs_per_block(epoch: Epoch) -> uint64:
"""
Return the maximum number of blobs that can be included in a block for a given epoch.
"""
assert len(BLOB_SCHEDULE) > 0
for entry in sorted(BLOB_SCHEDULE, key=lambda e: e["EPOCH"], reverse=True):
if epoch >= entry["EPOCH"]:
return entry["MAX_BLOBS_PER_BLOCK"]
return min(entry["MAX_BLOBS_PER_BLOCK"] for entry in BLOB_SCHEDULE)
```

### `compute_columns_for_custody_group`

```python
Expand Down
2 changes: 1 addition & 1 deletion specs/fulu/p2p-interface.md
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ Some gossip meshes are upgraded in the Fulu fork to support upgraded types.
*Updated validation*

- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer --
i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU`
i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= get_max_blobs_per_block(get_current_epoch(state))`

##### Blob subnets

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ def test_invalid_exceed_max_blobs_per_block(spec, state):
execution_payload = build_empty_execution_payload(spec, state)

opaque_tx, _, blob_kzg_commitments, _ = get_sample_blob_tx(
spec, blob_count=get_max_blob_count(spec) + 1
spec, blob_count=get_max_blob_count(spec, state) + 1
)

execution_payload.transactions = [opaque_tx]
Expand Down
12 changes: 7 additions & 5 deletions tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,36 +84,38 @@ def test_one_blob_two_txs(spec, state):
@with_deneb_until_eip7732
@spec_state_test
def test_one_blob_max_txs(spec, state):
yield from run_block_with_blobs(spec, state, blob_count=1, tx_count=get_max_blob_count(spec))
yield from run_block_with_blobs(
spec, state, blob_count=1, tx_count=get_max_blob_count(spec, state)
)


@with_deneb_until_eip7732
@spec_state_test
def test_invalid_one_blob_max_plus_one_txs(spec, state):
yield from run_block_with_blobs(
spec, state, blob_count=1, tx_count=get_max_blob_count(spec) + 1, valid=False
spec, state, blob_count=1, tx_count=get_max_blob_count(spec, state) + 1, valid=False
)


@with_deneb_until_eip7732
@spec_state_test
def test_max_blobs_per_block(spec, state):
yield from run_block_with_blobs(spec, state, blob_count=get_max_blob_count(spec))
yield from run_block_with_blobs(spec, state, blob_count=get_max_blob_count(spec, state))


@with_deneb_until_eip7732
@spec_state_test
def test_invalid_max_blobs_per_block_two_txs(spec, state):
yield from run_block_with_blobs(
spec, state, blob_count=get_max_blob_count(spec), tx_count=2, valid=False
spec, state, blob_count=get_max_blob_count(spec, state), tx_count=2, valid=False
)


@with_deneb_until_eip7732
@spec_state_test
def test_invalid_exceed_max_blobs_per_block(spec, state):
yield from run_block_with_blobs(
spec, state, blob_count=get_max_blob_count(spec) + 1, valid=False
spec, state, blob_count=get_max_blob_count(spec, state) + 1, valid=False
)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def test_blob_kzg_commitments_merkle_proof__random_block_1(spec, state):
@with_fulu_and_later
@spec_state_test
def test_blob_kzg_commitments_merkle_proof__multiple_blobs(spec, state):
blob_count = spec.config.MAX_BLOBS_PER_BLOCK_FULU // 2
blob_count = spec.get_max_blobs_per_block(spec.get_current_epoch(state)) // 2
rng = random.Random(2222)
yield from _run_blob_kzg_commitments_merkle_proof_test(
spec, state, rng=rng, blob_count=blob_count
Expand All @@ -93,7 +93,7 @@ def test_blob_kzg_commitments_merkle_proof__multiple_blobs(spec, state):
@with_fulu_and_later
@spec_state_test
def test_blob_kzg_commitments_merkle_proof__max_blobs(spec, state):
max_blobs = spec.config.MAX_BLOBS_PER_BLOCK_FULU
max_blobs = spec.get_max_blobs_per_block(spec.get_current_epoch(state))
rng = random.Random(3333)
yield from _run_blob_kzg_commitments_merkle_proof_test(
spec, state, rng=rng, blob_count=max_blobs
Expand Down
Loading