Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
f0a66a5
chore: temp downgrade bignum until bigcurve works on 0.7+
MirandaWood May 7, 2025
f3a244e
Merge remote-tracking branch 'origin/master' into mw/blob-batching
MirandaWood May 15, 2025
dc3a027
chore: revert to bignum master version
MirandaWood May 15, 2025
c29f9df
Merge remote-tracking branch 'origin/master' into mw/blob-batching
MirandaWood May 16, 2025
43720fd
Merge remote-tracking branch 'origin/master' into mw/blob-batching
MirandaWood May 20, 2025
f6cffda
Merge remote-tracking branch 'origin/next' into mw/blob-batching
MirandaWood May 22, 2025
0e57a5d
Merge remote-tracking branch 'origin/next' into mw/blob-batching
MirandaWood May 28, 2025
58da20b
feat: blob batching methods (#13583)
MirandaWood Jun 3, 2025
2c45397
feat: blob batching methods (ts) (#13606)
MirandaWood Jun 3, 2025
b05c67e
Merge remote-tracking branch 'origin/next' into mw/blob-batching
MirandaWood Jun 3, 2025
a0a2f11
Merge remote-tracking branch 'origin/next' into mw/blob-batching
MirandaWood Jun 3, 2025
e9726bb
feat: WIP batch blobs and validate in rollup (#13817)
MirandaWood Jun 4, 2025
4caef57
Merge remote-tracking branch 'origin/next' into mw/blob-batching
MirandaWood Jun 4, 2025
3f75d58
feat: WIP integrate batched blobs into l1 contracts + ts (#14329)
MirandaWood Jun 4, 2025
b90b979
feat: improve blob hash calc, add extra blob check + test
MirandaWood Jun 4, 2025
32bf652
feat: move blobCommitmentsHash to blocklog
MirandaWood Jun 4, 2025
1019dd2
chore: update comments
MirandaWood Jun 6, 2025
d798af5
Merge remote-tracking branch 'origin/next' into mw/blob-batching
MirandaWood Jun 6, 2025
980cc32
chore: generate fixtures, update constants post merge
MirandaWood Jun 6, 2025
969d4bf
Merge remote-tracking branch 'origin/next' into mw/blob-batching
MirandaWood Jun 9, 2025
61a4d76
chore: post merge fixes, update fixtures and constants
MirandaWood Jun 9, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions l1-contracts/src/core/Rollup.sol
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ contract Rollup is IStaking, IValidatorSelection, IRollup, RollupCore {
external
view
override(IRollup)
returns (bytes32[] memory, bytes32, bytes32)
returns (bytes32[] memory, bytes32, bytes[] memory)
{
return ExtRollupLib.validateBlobs(_blobsInput, checkBlob);
}
Expand Down Expand Up @@ -360,13 +360,18 @@ contract Rollup is IStaking, IValidatorSelection, IRollup, RollupCore {
return FeeHeaderLib.decompress(FeeLib.getStorage().feeHeaders[_blockNumber]);
}

function getBlobPublicInputsHash(uint256 _blockNumber)
function getBlobCommitmentsHash(uint256 _blockNumber)
external
view
override(IRollup)
returns (bytes32)
{
return STFLib.getStorage().blobPublicInputsHashes[_blockNumber];
return STFLib.getStorage().blocks[_blockNumber].blobCommitmentsHash;
}

function getCurrentBlobCommitmentsHash() external view override(IRollup) returns (bytes32) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we using this to have a slightly easier way to fetch it? Not a big thing atm, but it takes up extra space in the contract, and put us with more code paths which can be kinda annoying. We have a good bunch of these flowing around.

Created #14854, no need to address here. Guess there will be plenty at once as part of #14854.

Copy link
Contributor Author

@MirandaWood MirandaWood Jun 6, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, just an easier way to fetch - thanks for adding an issue!

RollupStore storage rollupStore = STFLib.getStorage();
return rollupStore.blocks[rollupStore.tips.pendingBlockNumber].blobCommitmentsHash;
}

function getConfig(address _attester)
Expand Down
20 changes: 13 additions & 7 deletions l1-contracts/src/core/interfaces/IRollup.sol
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,21 @@ struct SubmitEpochRootProofArgs {
uint256 end; // inclusive
PublicInputArgs args;
bytes32[] fees;
bytes blobPublicInputs;
bytes blobInputs;
bytes proof;
}

/**
* @notice Struct for storing block data, set in proposal.
* @param archive - Archive tree root of the block
* @param headerHash - Hash of the proposed block header
* @param blobCommitmentsHash - H(...H(H(commitment_0), commitment_1).... commitment_n) - used to validate we are using the same blob commitments on L1 and in the rollup circuit
* @param slotNumber - This block's slot
*/
struct BlockLog {
bytes32 archive;
bytes32 headerHash; // hash of the proposed block header
bytes32 headerHash;
bytes32 blobCommitmentsHash; // TODO(#14646): Keep a running hash we iteratively overwrite, instead of per block.
Slot slotNumber;
}

Expand Down Expand Up @@ -94,12 +102,9 @@ struct RollupConfig {
uint256 version;
}

// The below blobPublicInputsHashes are filled when proposing a block, then used to verify an epoch proof.
// TODO(#8955): When implementing batched kzg proofs, store one instance per epoch rather than block
struct RollupStore {
ChainTips tips; // put first such that the struct slot structure is easy to follow for cheatcodes
mapping(uint256 blockNumber => BlockLog log) blocks;
mapping(uint256 blockNumber => bytes32) blobPublicInputsHashes;
mapping(address => uint256) sequencerRewards;
mapping(Epoch => EpochRewards) epochRewards;
// @todo Below can be optimised with a bitmap as we can benefit from provers likely proving for epochs close
Expand Down Expand Up @@ -179,7 +184,7 @@ interface IRollup is IRollupCore {
function validateBlobs(bytes calldata _blobsInputs)
external
view
returns (bytes32[] memory, bytes32, bytes32);
returns (bytes32[] memory, bytes32, bytes[] memory);

function getManaBaseFeeComponentsAt(Timestamp _timestamp, bool _inFeeAsset)
external
Expand All @@ -198,7 +203,8 @@ interface IRollup is IRollupCore {
function getPendingBlockNumber() external view returns (uint256);
function getBlock(uint256 _blockNumber) external view returns (BlockLog memory);
function getFeeHeader(uint256 _blockNumber) external view returns (FeeHeader memory);
function getBlobPublicInputsHash(uint256 _blockNumber) external view returns (bytes32);
function getBlobCommitmentsHash(uint256 _blockNumber) external view returns (bytes32);
function getCurrentBlobCommitmentsHash() external view returns (bytes32);

function getSequencerRewards(address _sequencer) external view returns (uint256);
function getCollectiveProverRewardsForEpoch(Epoch _epoch) external view returns (uint256);
Expand Down
5 changes: 2 additions & 3 deletions l1-contracts/src/core/libraries/ConstantsGen.sol
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,9 @@ library Constants {
uint256 internal constant GENESIS_ARCHIVE_ROOT =
1002640778211850180189505934749257244705296832326768971348723156503780793518;
uint256 internal constant FEE_JUICE_ADDRESS = 5;
uint256 internal constant BLOB_PUBLIC_INPUTS = 6;
uint256 internal constant BLOB_PUBLIC_INPUTS_BYTES = 112;
uint256 internal constant BLS12_POINT_COMPRESSED_BYTES = 48;
uint256 internal constant PROPOSED_BLOCK_HEADER_LENGTH_BYTES = 316;
uint256 internal constant ROOT_ROLLUP_PUBLIC_INPUTS_LENGTH = 1015;
uint256 internal constant ROOT_ROLLUP_PUBLIC_INPUTS_LENGTH = 158;
uint256 internal constant NUM_MSGS_PER_BASE_PARITY = 4;
uint256 internal constant NUM_BASE_PARITY_PER_ROOT_PARITY = 4;
}
10 changes: 5 additions & 5 deletions l1-contracts/src/core/libraries/Errors.sol
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,8 @@ library Errors {
error Rollup__InvalidProof(); // 0xa5b2ba17
error Rollup__InvalidProposedArchive(bytes32 expected, bytes32 actual); // 0x32532e73
error Rollup__InvalidTimestamp(Timestamp expected, Timestamp actual); // 0x3132e895
error Rollup__InvalidBlobHash(bytes32 blobHash); // 0xc4a168c6
error Rollup__InvalidBlobHash(bytes32 expected, bytes32 actual); // 0x13031e6a
error Rollup__InvalidBlobProof(bytes32 blobHash); // 0x5ca17bef
error Rollup__InvalidBlobPublicInputsHash(bytes32 expected, bytes32 actual); // 0xfe6b4994
error Rollup__NoEpochToProve(); // 0xcbaa3951
error Rollup__NonSequentialProving(); // 0x1e5be132
error Rollup__NothingToPrune(); // 0x850defd3
Expand All @@ -71,9 +70,10 @@ library Errors {
error Rollup__NonZeroDaFee(); // 0xd9c75f52
error Rollup__InvalidBasisPointFee(uint256 basisPointFee); // 0x4292d136
error Rollup__InvalidManaBaseFee(uint256 expected, uint256 actual); // 0x73b6d896
error Rollup__StartAndEndNotSameEpoch(Epoch start, Epoch end);
error Rollup__StartIsNotFirstBlockOfEpoch();
error Rollup__StartIsNotBuildingOnProven();
error Rollup__StartAndEndNotSameEpoch(Epoch start, Epoch end); // 0xb64ec33e
error Rollup__StartIsNotFirstBlockOfEpoch(); // 0x4ef11e0d
error Rollup__StartIsNotBuildingOnProven(); // 0x4a59f42e
error Rollup__TooManyBlocksInEpoch(uint256 expected, uint256 actual); // 0x7d5b1408
error Rollup__AlreadyClaimed(address prover, Epoch epoch);
error Rollup__NotPastDeadline(Slot deadline, Slot currentSlot);
error Rollup__PastDeadline(Slot deadline, Slot currentSlot);
Expand Down
172 changes: 127 additions & 45 deletions l1-contracts/src/core/libraries/rollup/BlobLib.sol
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ import {Vm} from "forge-std/Vm.sol";

library BlobLib {
address public constant VM_ADDRESS = address(uint160(uint256(keccak256("hevm cheat code"))));
uint256 internal constant VERSIONED_HASH_VERSION_KZG =
0x0100000000000000000000000000000000000000000000000000000000000000; // 0x01 << 248 to be used in blobHashCheck
uint256 internal constant MAX_BLOBS_PER_BLOCK = 6; // Increasing to 9 with Pectra

/**
* @notice Get the blob base fee
Expand All @@ -26,77 +29,156 @@ library BlobLib {
}

/**
* @notice Validate an L2 block's blobs and return the hashed blobHashes and public inputs.
* @notice Validate an L2 block's blobs and return the blobHashes, the hashed blobHashes, and blob commitments.
* @notice We assume that this propose transaction contains only Aztec blobs
* Input bytes:
* input[:1] - num blobs in block
* input[1:] - 192 * num blobs of the above _blobInput
* @param _blobsInput - The above bytes to verify a blob
* input[0] - num blobs in block
* input[1:] - blob commitments (48 bytes * num blobs in block)
* @param _blobsInput - The above bytes to verify our input blob commitments match real blobs
* @param _checkBlob - Whether to skip blob related checks. Hardcoded to true (See RollupCore.sol -> checkBlob), exists only to be overriden in tests.
* Returns for proposal:
* @return blobHashes - All of the blob hashes included in this block, to be emitted in L2BlockProposed event.
* @return blobsHashesCommitment - A hash of all blob hashes in this block, to be included in the block header. See comment at the end of this fn for more info.
* @return blobCommitments - All of the blob commitments included in this block, to be stored then validated against those used in the rollup in epoch proof verification.
*/
function validateBlobs(bytes calldata _blobsInput, bool _checkBlob)
internal
view
returns (
// All of the blob hashes included in this blob
bytes32[] memory blobHashes,
bytes32 blobsHashesCommitment,
bytes32 blobPublicInputsHash
bytes[] memory blobCommitments
)
{
// We cannot input the incorrect number of blobs below, as the blobsHash
// and epoch proof verification will fail.
uint8 numBlobs = uint8(_blobsInput[0]);
blobHashes = new bytes32[](numBlobs);
bytes memory blobPublicInputs;
blobCommitments = new bytes[](numBlobs);
bytes32 blobHash;
// Add 1 for the numBlobs prefix
uint256 blobInputStart = 1;
for (uint256 i = 0; i < numBlobs; i++) {
// Add 1 for the numBlobs prefix
uint256 blobInputStart = i * 192 + 1;
// Since an invalid blob hash here would fail the consensus checks of
// the header, the `blobInput` is implicitly accepted by consensus as well.
blobHashes[i] = validateBlob(_blobsInput[blobInputStart:blobInputStart + 192], i, _checkBlob);
// We want to extract the 112 bytes we use for public inputs:
// * input[32:64] - z
// * input[64:96] - y
// * input[96:144] - commitment C
// Out of 192 bytes per blob.
blobPublicInputs = abi.encodePacked(
blobPublicInputs,
_blobsInput[blobInputStart + 32:blobInputStart + 32 + Constants.BLOB_PUBLIC_INPUTS_BYTES]
// Commitments = arrays of bytes48 compressed points
blobCommitments[i] = abi.encodePacked(
_blobsInput[blobInputStart:blobInputStart + Constants.BLS12_POINT_COMPRESSED_BYTES]
);
blobInputStart += Constants.BLS12_POINT_COMPRESSED_BYTES;

bytes32 blobHashCheck = calculateBlobHash(blobCommitments[i]);
if (_checkBlob) {
assembly {
blobHash := blobhash(i)
}
// The below check ensures that our injected blobCommitments indeed match the real
// blobs submitted with this block. They are then used in the blobCommitmentsHash (see below).
require(blobHash == blobHashCheck, Errors.Rollup__InvalidBlobHash(blobHash, blobHashCheck));
} else {
blobHash = blobHashCheck;
}
blobHashes[i] = blobHash;
}
// Ensure no non-Aztec blobs have been emitted in this tx:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, my comment related to this one (on the previous pr) was because people have been talking about same actor sending a tx that propose blocks for multiple rollups at the same time, so there could be a blob before the aztec specific, or after.

I'm ok with us checking like this, but would expect that we could get away with just checking blobhash(numBlobs) == 0 🤔.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah ok I misunderstood! I was thinking that it might be possible to submit blobs like blob_0, blob_1, blob_2 where blob_1 is empty and wanted to cover that case. I wasn't sure whether blob submission would count blob_1 as 'blank' (with blobhash 0, covered by new check) or assign a blobhash which would just be the hash of the (0, 0) commitment (covered by protocol).

so there could be a blob before the aztec specific, or after.

We can't have one before the aztec specific since any commitment before numBlobs would be 'stored' and fail the epoch verification. I don't think there is a way to 'attack' this by injecting a commitment that matches the non-aztec blob both to this fn and in the rollup circuits (@iAmMichaelConnor?)

for (uint256 i = numBlobs; i < MAX_BLOBS_PER_BLOCK; i++) {
assembly {
blobHash := blobhash(i)
}
require(blobHash == 0, Errors.Rollup__InvalidBlobHash(blobHash, 0));
}
// Return the hash of all z, y, and Cs, so we can use them in proof verification later
blobPublicInputsHash = sha256(blobPublicInputs);
// Hash the EVM blob hashes for the block header
// TODO(#13430): The below blobsHashesCommitment known as blobsHash elsewhere in the code. The name blobsHashesCommitment is confusingly similar to blobCommitmentsHash
// which are different values:
// - blobsHash := sha256([blobhash_0, ..., blobhash_m]) = a hash of all blob hashes in a block with m+1 blobs inserted into the header, exists so a user can cross check blobs.
// - blobCommitmentsHash := sha256( ...sha256(sha256(C_0), C_1) ... C_n) = iteratively calculated hash of all blob commitments in an epoch with n+1 blobs (see calculateBlobCommitmentsHash()),
// exists so we can validate injected commitments to the rollup circuits correspond to the correct real blobs.
// We may be able to combine these values e.g. blobCommitmentsHash := sha256( ...sha256(sha256(blobshash_0), blobshash_1) ... blobshash_l) for an epoch with l+1 blocks.
blobsHashesCommitment = Hash.sha256ToField(abi.encodePacked(blobHashes));
}

/**
* @notice Validate a blob.
* @notice Validate a batched blob.
* Input bytes:
* input[:32] - versioned_hash
* input[32:64] - z
* input[64:96] - y
* input[96:144] - commitment C
* input[144:192] - proof (a commitment to the quotient polynomial q(X))
* - This can be relaxed to happen at the time of `submitProof` instead
* @notice Apparently there is no guarantee that the blobs will be processed in the order sent
* so the use of blobhash(_blobNumber) may fail in production
* @param _blobInput - The above bytes to verify a blob
* input[:32] - versioned_hash - NB for a batched blob, this is simply the versioned hash of the batched commitment
* input[32:64] - z = poseidon2( ...poseidon2(poseidon2(z_0, z_1), z_2) ... z_n)
* input[64:96] - y = y_0 + gamma * y_1 + gamma^2 * y_2 + ... + gamma^n * y_n
* input[96:144] - commitment C = C_0 + gamma * C_1 + gamma^2 * C_2 + ... + gamma^n * C_n
* input[144:192] - proof (a commitment to the quotient polynomial q(X)) = Q_0 + gamma * Q_1 + gamma^2 * Q_2 + ... + gamma^n * Q_n
* @param _blobInput - The above bytes to verify a batched blob
*
* If this function passes where the values of z, y, and C are valid public inputs to the final epoch root proof, then
* we know that the data in each blob of the epoch corresponds to the tx effects of all our proven txs in the epoch.
*
* The rollup circuits calculate each z_i and y_i as above, so if this function passes but they do not match the values from the
* circuit, then proof verification will fail.
*
* Each commitment C_i is injected into the circuits and their correctness is validated using the blobCommitmentsHash, as
* explained below in calculateBlobCommitmentsHash().
*
*/
function validateBlob(bytes calldata _blobInput, uint256 _blobNumber, bool _checkBlob)
internal
view
returns (bytes32 blobHash)
{
if (!_checkBlob) {
return bytes32(_blobInput[0:32]);
function validateBatchedBlob(bytes calldata _blobInput) internal view returns (bool success) {
// Staticcall the point eval precompile https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile :
(success,) = address(0x0a).staticcall(_blobInput);
require(success, Errors.Rollup__InvalidBlobProof(bytes32(_blobInput[0:32])));
}

/**
* @notice Calculate the current state of the blobCommitmentsHash. Called for each new proposed block.
* @param _previousblobCommitmentsHash - The previous block's blobCommitmentsHash.
* @param _blobCommitments - The commitments corresponding to this block's blobs.
* @param _isFirstBlockOfEpoch - Whether this block is the first of an epoch (see below).
*
* The blobCommitmentsHash is an accumulated value calculated in the rollup circuits as:
* blobCommitmentsHash_i := sha256(blobCommitmentsHash_(i - 1), C_i)
* for each blob commitment C_i in an epoch. For the first blob in the epoch (i = 0):
* blobCommitmentsHash_i := sha256(C_0)
* which is why we require _isFirstBlockOfEpoch here.
*
* Each blob commitment is injected into the rollup circuits and we rely on the L1 contracts to validate
* these commitments correspond to real blobs. The input _blobCommitments below come from validateBlobs()
* so we know they are valid commitments here.
*
* We recalculate the same blobCommitmentsHash (which encompasses all claimed blobs in the epoch)
* as in the rollup circuits, then use the final value as a public input to the root rollup proof
* verification in EpochProofLib.sol.
*
* If the proof verifies, we know that the injected commitments used in the rollup circuits match
* the real commitments to L1 blobs.
*
*/
function calculateBlobCommitmentsHash(
bytes32 _previousblobCommitmentsHash,
bytes[] memory _blobCommitments,
bool _isFirstBlockOfEpoch
) internal pure returns (bytes32 currentblobCommitmentsHash) {
uint256 i = 0;
currentblobCommitmentsHash = _previousblobCommitmentsHash;
// If we are at the first block of an epoch, we reinitialise the blobCommitmentsHash.
// Blob commitments are collected and proven per root rollup proof => per epoch.
if (_isFirstBlockOfEpoch) {
// Initialise the blobCommitmentsHash
currentblobCommitmentsHash = Hash.sha256ToField(abi.encodePacked(_blobCommitments[i++]));
}
assembly {
blobHash := blobhash(_blobNumber)
for (i; i < _blobCommitments.length; i++) {
currentblobCommitmentsHash =
Hash.sha256ToField(abi.encodePacked(currentblobCommitmentsHash, _blobCommitments[i]));
}
require(blobHash == bytes32(_blobInput[0:32]), Errors.Rollup__InvalidBlobHash(blobHash));
}

// Staticcall the point eval precompile https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile :
(bool success,) = address(0x0a).staticcall(_blobInput);
require(success, Errors.Rollup__InvalidBlobProof(blobHash));
/**
* @notice Calculate the expected blob hash given a blob commitment
* @dev TODO(#14646): Use kzg_to_versioned_hash & VERSIONED_HASH_VERSION_KZG
* Until we use an external kzg_to_versioned_hash(), calculating it here:
* EIP-4844 spec blobhash is 32 bytes: [version, ...sha256(commitment)[1:32]]
* The version = VERSIONED_HASH_VERSION_KZG, currently 0x01.
* @param _blobCommitment - The 48 byte blob commitment
* @return bytes32 - The blob hash
*/
function calculateBlobHash(bytes memory _blobCommitment) internal pure returns (bytes32) {
return bytes32(
(
uint256(sha256(_blobCommitment))
& 0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
) | VERSIONED_HASH_VERSION_KZG
);
}
}
Loading
Loading