Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 23 additions & 2 deletions node/derivation/beacon.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,8 +159,29 @@ func KZGToVersionedHash(commitment kzg4844.Commitment) (out common.Hash) {
return out
}

func VerifyBlobProof(blob *Blob, commitment kzg4844.Commitment, proof kzg4844.Proof) error {
return kzg4844.VerifyBlobProof(blob.KZGBlob(), commitment, proof)
// verifyBlob authenticates a blob against the L1-signed versioned blob hash
// by recomputing the KZG commitment locally and checking
//
// KZGToVersionedHash(BlobToCommitment(blob)) == expectedHash
//
// We deliberately do NOT verify a beacon-supplied kzg_proof. After
// EIP-7594 (PeerDAS / Osaka) the beacon /eth/v1/beacon/blob_sidecars
// endpoint's kzg_proof field is no longer guaranteed to be a legacy
// single-blob proof across forks/clients, and the new
// /eth/v1/beacon/blobs endpoint does not return proofs at all. The
// commitment round-trip gives us the same security property
// (blob bytes -> commitment -> versioned hash matches the L1-signed
// hash) without depending on those fields.
func verifyBlob(blob *Blob, expectedHash common.Hash) error {
commitment, err := kzg4844.BlobToCommitment(blob.KZGBlob())
if err != nil {
return fmt.Errorf("cannot compute KZG commitment for blob: %w", err)
}
got := KZGToVersionedHash(commitment)
if got != expectedHash {
return fmt.Errorf("recomputed blob hash %s does not match expected %s", got.Hex(), expectedHash.Hex())
}
return nil
}

// dataAndHashesFromTxs extracts calldata and datahashes from the input transactions and returns them. It
Expand Down
37 changes: 27 additions & 10 deletions node/derivation/derivation.go
Original file line number Diff line number Diff line change
Expand Up @@ -393,40 +393,57 @@ func (d *Derivation) fetchRollupDataByTxHash(txHash common.Hash, blockNumber uin
// can assemble the local sidecar in the exact order the L1 tx
// declared its blobs. Multi-blob batches are decoded by
// concatenating blob bodies in tx order; any reordering here
// would corrupt the resulting zstd stream.
// would corrupt the resulting zstd stream. The map key is
// derived from the beacon-supplied commitment; verifyBlob below
// re-derives the same hash from the actual blob bytes, so a
// malicious beacon cannot forge an entry by lying about the
// commitment.
byHash := make(map[common.Hash]*BlobSidecar, len(blobSidecars))
for _, sidecar := range blobSidecars {
var commitment kzg4844.Commitment
copy(commitment[:], sidecar.KZGCommitment[:])
versionedHash := KZGToVersionedHash(commitment)
byHash[versionedHash] = sidecar
byHash[KZGToVersionedHash(commitment)] = sidecar
}

// Downstream (ParseBatch) only consumes Sidecar.Blobs and
// Sidecar.Commitments; Proofs is intentionally left empty to
// avoid an extra ~O(n) KZG op per blob per batch on every
// sync. If a future consumer needs Proofs, compute them
// lazily there or call kzg4844.ComputeBlobProof here.
var blobTxSidecar eth.BlobTxSidecar
for i, expectedHash := range blobHashes {
sidecar, ok := byHash[expectedHash]
if !ok {
return nil, fmt.Errorf("blob %d (hash=%s) not found in beacon sidecars", i, expectedHash.Hex())
}
var commitment kzg4844.Commitment
copy(commitment[:], sidecar.KZGCommitment[:])

var blob Blob
b, err := hexutil.Decode(sidecar.Blob)
if err != nil {
return nil, fmt.Errorf("failed to decode blob %d: %w", i, err)
}
// Reject malformed beacon responses up front. copy(blob[:], b)
// silently:
// - zero-pads when len(b) < BlobSize (tail of the
// zero-initialized array stays zero)
// - truncates when len(b) > BlobSize (extra bytes dropped)
// Either case would otherwise surface later as a confusing
// blob-hash mismatch instead of a clear length error.
if len(b) != BlobSize {
return nil, fmt.Errorf("blob %d: unexpected length %d (want %d, hash=%s)", i, len(b), BlobSize, expectedHash.Hex())
}
var blob Blob
copy(blob[:], b)

proof := kzg4844.Proof(sidecar.KZGProof)
if err := VerifyBlobProof(&blob, commitment, proof); err != nil {
return nil, fmt.Errorf("blob %d KZG proof verification failed: %w", i, err)
if err := verifyBlob(&blob, expectedHash); err != nil {
return nil, fmt.Errorf("blob %d: %w", i, err)
}

var commitment kzg4844.Commitment
copy(commitment[:], sidecar.KZGCommitment[:])

d.logger.Info("Matched blob", "txOrder", i, "beaconIndex", sidecar.Index, "hash", expectedHash.Hex())
blobTxSidecar.Blobs = append(blobTxSidecar.Blobs, *blob.KZGBlob())
blobTxSidecar.Commitments = append(blobTxSidecar.Commitments, commitment)
blobTxSidecar.Proofs = append(blobTxSidecar.Proofs, proof)
}

d.logger.Info("Blob matching results", "matched", len(blobTxSidecar.Blobs), "expected", len(blobHashes))
Expand Down
15 changes: 12 additions & 3 deletions ops/docker/layer1/configs/values.env.template
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,13 @@ export VIEW_FREEZE_CUTOFF_BPS=7500
export INCLUSION_LIST_SUBMISSION_DUE_BPS=6667
export PROPOSER_INCLUSION_LIST_CUTOFF_BPS=9167
export DATA_COLUMN_SIDECAR_SUBNET_COUNT=128
export SAMPLES_PER_SLOT=8
export CUSTODY_REQUIREMENT=4
# Single-node devnet: every node IS the entire network, so it must
# custody all 128 columns and sample all 128 each slot. Without this,
# only CUSTODY_REQUIREMENT (default 4) columns are persisted, which is
# never enough to reconstruct blobs (need 64/128) and any historical
# blob retrieval (e.g. validator re-deriving from L1 genesis) fails.
export SAMPLES_PER_SLOT=128
export CUSTODY_REQUIREMENT=128
export MAX_BLOBS_PER_BLOCK_ELECTRA=9
export TARGET_BLOBS_PER_BLOCK_ELECTRA=6
export MAX_REQUEST_BLOCKS_DENEB=128
Expand Down Expand Up @@ -81,5 +86,9 @@ export BPO_5_EPOCH=18446744073709551615
export BPO_5_MAX_BLOBS=0
export BPO_5_TARGET_BLOBS=0
export BPO_5_BASE_FEE_UPDATE_FRACTION=0
export MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS=4096
# Bumped from spec default 4096 (~27h on a 3s-slot/8-slot-per-epoch
# minimal preset) to ~30 days, so a freshly reset validator can always
# re-derive from L1 genesis without hitting "0 data columns found"
# pruning errors. 110000 epochs * 24s/epoch ≈ 30.5 days.
export MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS=110000
export MIN_EPOCHS_FOR_BLOCK_REQUESTS=33024
Loading