From 8dcb2267f8b0599960130c9f7453f8477b3ff695 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Fri, 3 Nov 2023 16:09:14 +0000 Subject: [PATCH 1/3] add note on method unused method --- .../aztec3/circuits/rollup/base/native_base_rollup_circuit.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/circuits/cpp/src/aztec3/circuits/rollup/base/native_base_rollup_circuit.cpp b/circuits/cpp/src/aztec3/circuits/rollup/base/native_base_rollup_circuit.cpp index e6d4961d2ded..440351fa7685 100644 --- a/circuits/cpp/src/aztec3/circuits/rollup/base/native_base_rollup_circuit.cpp +++ b/circuits/cpp/src/aztec3/circuits/rollup/base/native_base_rollup_circuit.cpp @@ -57,6 +57,7 @@ AggregationObject aggregate_proofs(BaseRollupInputs const& baseRollupInputs) } /** TODO: implement + * This is not being used * @brief Get the prover contribution hash object * * @return NT::fr From 30460b869489d487431eb5d81904a94224665893 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Fri, 3 Nov 2023 16:09:44 +0000 Subject: [PATCH 2/3] add is_empty methods to public_data_read and public_data_update_request --- .../src/crates/types/src/abis/public_data_read.nr | 4 ++++ .../src/crates/types/src/abis/public_data_update_request.nr | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_data_read.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_data_read.nr index 60959fef6035..35cbbe8d4b05 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_data_read.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_data_read.nr @@ -12,4 +12,8 @@ impl PublicDataRead { self.value, ], constants_gen::GENERATOR_INDEX__PUBLIC_DATA_READ) } + + pub fn is_empty(self) -> bool { + (self.leaf_index == 0) & (self.value == 0) + } } diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_data_update_request.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_data_update_request.nr index 10c7d3f9ffb7..b933fa9c3de1 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_data_update_request.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_data_update_request.nr @@ -14,4 +14,8 @@ impl PublicDataUpdateRequest { self.new_value ], constants_gen::GENERATOR_INDEX__PUBLIC_DATA_UPDATE_REQUEST) } + + pub fn is_empty(self) -> bool { + (self.leaf_index == 0) & (self.old_value == 0) & (self.new_value == 0) + } } From 82891883e3b5d763067b2723fabe5a795f75f8e3 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Fri, 3 Nov 2023 16:10:59 +0000 Subject: [PATCH 3/3] fill in everything except for new nullifier_tree --- .../rollup-lib/src/base/base_rollup_inputs.nr | 392 +++++++++++++++++- 1 file changed, 387 insertions(+), 5 deletions(-) diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/base/base_rollup_inputs.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/base/base_rollup_inputs.nr index f110656e7dbb..f46650450ab9 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/base/base_rollup_inputs.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/base/base_rollup_inputs.nr @@ -1,7 +1,13 @@ use crate::abis::nullifier_leaf_preimage::NullifierLeafPreimage; use crate::abis::append_only_tree_snapshot::AppendOnlyTreeSnapshot; use crate::abis::constant_rollup_data::ConstantRollupData; -use crate::abis::base_or_merge_rollup_public_inputs::BaseOrMergeRollupPublicInputs; +use crate::abis::base_or_merge_rollup_public_inputs::{BaseOrMergeRollupPublicInputs, BASE_ROLLUP_TYPE}; +use crate::merkle_tree::{calculate_subtree, calculate_empty_tree_root}; +use crate::components; +use dep::types::utils::uint256::U256; +use dep::types::abis::public_data_update_request::PublicDataUpdateRequest; +use dep::types::abis::public_data_read::PublicDataRead; +use dep::types::mocked::{AggregationObject, Proof}; use dep::aztec::constants_gen::{ MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP, NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH, @@ -11,6 +17,17 @@ use dep::aztec::constants_gen::{ MAX_PUBLIC_DATA_READS_PER_BASE_ROLLUP, PUBLIC_DATA_TREE_HEIGHT, KERNELS_PER_BASE_ROLLUP, + MAX_NEW_CONTRACTS_PER_TX, + NOTE_HASH_SUBTREE_HEIGHT, + CONTRACT_SUBTREE_HEIGHT, + NUM_FIELDS_PER_SHA256, + MAX_NEW_COMMITMENTS_PER_TX, + MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, + MAX_PUBLIC_DATA_READS_PER_TX, + MAX_NEW_NULLIFIERS_PER_TX, + NUM_ENCRYPTED_LOGS_HASHES_PER_TX, + MAX_NEW_L2_TO_L1_MSGS_PER_TX, + NUM_UNENCRYPTED_LOGS_HASHES_PER_TX }; use dep::types::abis::previous_kernel_data::PreviousKernelData; use dep::types::abis::membership_witness::NullifierMembershipWitness; @@ -41,8 +58,373 @@ struct BaseRollupInputs { } impl BaseRollupInputs { - pub fn base_rollup_circuit(self) -> BaseOrMergeRollupPublicInputs { - let zeroed = dep::std::unsafe::zeroed(); - zeroed + pub fn base_rollup_circuit(self) -> BaseOrMergeRollupPublicInputs { + // Verify the previous kernel proofs + // TODO(Kev) : Change this two to a named constant + for i in 0..2 { + let proof = self.kernel_data[i].proof; + assert(verify_kernel_proof(proof), "kernel proof verification failed"); + }; + + // Verify the kernel chain_id and versions + for i in 0..2 { + assert(self.kernel_data[i].public_inputs.constants.tx_context.chain_id == + self.constants.global_variables.chain_id, "kernel chain_id does not match the rollup chain_id"); + assert(self.kernel_data[i].public_inputs.constants.tx_context.version == + self.constants.global_variables.version, "kernel version does not match the rollup version"); + }; + + // First we compute the contract tree leaves + let contract_leaves = self.calculate_contract_leaves(); + let contracts_tree_subroot = self.calculate_contract_subtree(contract_leaves); + + let commitments_tree_subroot = self.calculate_commitments_subtree(); + + let empty_commitments_subtree_root = calculate_empty_tree_root(NOTE_HASH_SUBTREE_HEIGHT); + + let end_note_hash_tree_snapshot = components::insert_subtree_to_snapshot_tree( + self.start_note_hash_tree_snapshot, + self.new_commitments_subtree_sibling_path, + empty_commitments_subtree_root, + commitments_tree_subroot, + NOTE_HASH_SUBTREE_HEIGHT as u8, + ); + + // Insert contract subtrees: + let empty_contracts_subtree_root = calculate_empty_tree_root(CONTRACT_SUBTREE_HEIGHT); + let end_contract_tree_snapshot = components::insert_subtree_to_snapshot_tree( + self.start_contract_tree_snapshot, + self.new_contracts_subtree_sibling_path, + empty_contracts_subtree_root, + contracts_tree_subroot, + CONTRACT_SUBTREE_HEIGHT as u8, + ); + + // Insert nullifiers: + let end_nullifier_tree_snapshot = self.check_nullifier_tree_non_membership_and_insert_to_tree(); + + // Validate public public data reads and public data update requests, and update public data tree + let end_public_data_tree_root = self.validate_and_process_public_state(); + + // Calculate the overall calldata hash + let calldata_hash = BaseRollupInputs::components_compute_kernel_calldata_hash(self.kernel_data); + + // Perform membership checks that the notes provided exist within the historic trees data + self.perform_historical_blocks_tree_membership_checks(); + + let aggregation_object = self.aggregate_proofs(); + + BaseOrMergeRollupPublicInputs { + rollup_type : BASE_ROLLUP_TYPE, + rollup_subtree_height : 0, + end_aggregation_object : aggregation_object, + constants : self.constants, + start_note_hash_tree_snapshot : self.start_note_hash_tree_snapshot, + end_note_hash_tree_snapshot : end_note_hash_tree_snapshot, + start_nullifier_tree_snapshot : self.start_nullifier_tree_snapshot, + end_nullifier_tree_snapshot : end_nullifier_tree_snapshot, + start_contract_tree_snapshot : self.start_contract_tree_snapshot, + end_contract_tree_snapshot : end_contract_tree_snapshot, + start_public_data_tree_root : self.start_public_data_tree_root, + end_public_data_tree_root : end_public_data_tree_root, + calldata_hash : calldata_hash, + } + } + + fn calculate_contract_leaves(self) -> [Field; NUM_CONTRACT_LEAVES] { + let mut contract_leaves = [0; NUM_CONTRACT_LEAVES]; + for i in 0..2 { + let new_contracts = self.kernel_data[i].public_inputs.end.new_contracts; + + // loop over the new contracts + // TODO(Madiaa): NOTE: we are currently assuming that there is only going to be one + for i in 0..new_contracts.len() { + let leaf_preimage = new_contracts[i]; + // When there is no contract deployment, we should insert a zero leaf into the tree and ignore the + // member-ship check. This is to ensure that we don't hit "already deployed" errors when we are not + // deploying contracts. e.g., when we are only calling functions on existing contracts. + let to_push = if leaf_preimage.contract_address.to_field() == 0 { + 0 + } else { + leaf_preimage.hash() + }; + + contract_leaves[i] = to_push; + } + } + + contract_leaves + } + + // TODO(Kev): This should say calculate_contract_subtree_root + // Cpp code says calculate_contract_subtree, so I'm leaving it as is for now + fn calculate_contract_subtree(self, leaves : [Field; NUM_CONTRACT_LEAVES]) -> Field { + calculate_subtree(leaves) + } + + // TODO(Kev): This should say calculate_commitments_subtree_root + // Cpp code says calculate_commitments_subtree, so I'm leaving it as is for now + fn calculate_commitments_subtree(self) -> Field { + let mut commitment_tree_leaves = [0; NOTE_HASH_SUBTREE_WIDTH]; + + for i in 0..2 { + let new_commitments = self.kernel_data[i].public_inputs.end.new_commitments; + + // Our commitments size MUST be 4 to calculate our subtrees correctly + assert(new_commitments.len() == MAX_NEW_COMMITMENTS_PER_TX, "New commitments in kernel data must be MAX_NEW_COMMITMENTS_PER_TX (see constants.hpp)"); + + for j in 0..MAX_NEW_COMMITMENTS_PER_TX { + // TODO(Maddiaa): batch insert + commitment_tree_leaves[i * MAX_NEW_COMMITMENTS_PER_TX + j] = new_commitments[j]; + } + } + + calculate_subtree(commitment_tree_leaves) + } + + fn check_nullifier_tree_non_membership_and_insert_to_tree(self) -> AppendOnlyTreeSnapshot { + AppendOnlyTreeSnapshot { + next_available_leaf_index: 0, + root: 0, + } + } + + fn validate_and_process_public_state(self) -> Field { + // TODO(#2521) - data read validation should happen against the current state of the tx and not the start state. + // Blocks all interesting usecases that read and write to the same public state in the same tx. + // https://aztecprotocol.slack.com/archives/C02M7VC7TN0/p1695809629015719?thread_ts=1695653252.007339&cid=C02M7VC7TN0 + + + // Process public data reads and public data update requests for left input + // validate_public_data_reads( + // self.start_public_data_tree_root, + // self.kernel_data[0].public_inputs.end.public_data_reads, + // 0, + // self.new_public_data_reads_sibling_paths); + + let mid_public_data_tree_root = insert_public_data_update_requests( + self.start_public_data_tree_root, + self.kernel_data[0].public_inputs.end.public_data_update_requests, + 0, + self.new_public_data_update_requests_sibling_paths + ); + + + // TODO(#2521) - data read validation should happen against the current state of the tx and not the start state. + // Blocks all interesting usecases that read and write to the same public state in the same tx. + // https://aztecprotocol.slack.com/archives/C02M7VC7TN0/p1695809629015719?thread_ts=1695653252.007339&cid=C02M7VC7TN0 + + + // Process public data reads and public data update requests for right input using the resulting tree root from the + // left one + // validate_public_data_reads( + // mid_public_data_tree_root, + // baseRollupInputs.kernel_data[1].public_inputs.end.public_data_reads, + // MAX_PUBLIC_DATA_READS_PER_TX, + // baseRollupInputs.new_public_data_reads_sibling_paths); + + let end_public_data_tree_root = insert_public_data_update_requests( + mid_public_data_tree_root, + self.kernel_data[1].public_inputs.end.public_data_update_requests, + MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, + self.new_public_data_update_requests_sibling_paths + ); + + end_public_data_tree_root + } + + // Computes the calldata hash for a base rollup + // TODO(Kev): move this into components module + fn components_compute_kernel_calldata_hash(kernel_data : [PreviousKernelData; KERNELS_PER_BASE_ROLLUP]) -> [Field; NUM_FIELDS_PER_SHA256]{ + // Compute calldata hashes + // Consist of 2 kernels + // 2 * MAX_NEW_COMMITMENTS_PER_TX fields for commitments + // 2 * MAX_NEW_NULLIFIERS_PER_TX fields for nullifiers + // 8 public data update requests (4 per kernel) -> 16 fields + // 4 l2 -> l1 messages (2 per kernel) -> 4 fields + // 2 contract deployments (1 per kernel) -> 6 fields + // 2 encrypted logs hashes (1 per kernel) -> 4 fields --> 2 sha256 hashes --> 64 bytes + // 2 unencrypted logs hashes (1 per kernel) -> 4 fields --> 2 sha256 hashes --> 64 bytes + let mut calldata_hash_inputs = [0; CALLDATA_HASH_INPUT_SIZE]; + + for i in 0..2 { + let new_commitments = kernel_data[i].public_inputs.end.new_commitments; + let new_nullifiers = kernel_data[i].public_inputs.end.new_nullifiers; + let public_data_update_requests = kernel_data[i].public_inputs.end.public_data_update_requests; + let newL2ToL1msgs = kernel_data[i].public_inputs.end.new_l2_to_l1_msgs; + let encryptedLogsHash = kernel_data[i].public_inputs.end.encrypted_logs_hash; + let unencryptedLogsHash = kernel_data[i].public_inputs.end.unencrypted_logs_hash; + + let mut offset = 0; + + for j in 0..MAX_NEW_COMMITMENTS_PER_TX { + calldata_hash_inputs[offset + i * MAX_NEW_COMMITMENTS_PER_TX + j] = new_commitments[j]; + } + offset += MAX_NEW_COMMITMENTS_PER_TX * 2; + + for j in 0..MAX_NEW_NULLIFIERS_PER_TX { + calldata_hash_inputs[offset + i * MAX_NEW_NULLIFIERS_PER_TX + j] = new_nullifiers[j]; + } + offset += MAX_NEW_NULLIFIERS_PER_TX * 2; + + for j in 0..MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX { + calldata_hash_inputs[offset + i * MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX * 2 + j * 2] = + public_data_update_requests[j].leaf_index; + calldata_hash_inputs[offset + i * MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX * 2 + j * 2 + 1] = + public_data_update_requests[j].new_value; + } + offset += MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX * 2 * 2; + + for j in 0..MAX_NEW_L2_TO_L1_MSGS_PER_TX { + calldata_hash_inputs[offset + i * MAX_NEW_L2_TO_L1_MSGS_PER_TX + j] = newL2ToL1msgs[j]; + } + offset += MAX_NEW_L2_TO_L1_MSGS_PER_TX * 2; + + let contract_leaf = kernel_data[i].public_inputs.end.new_contracts[0]; + calldata_hash_inputs[offset + i] = contract_leaf.hash(); + + offset += MAX_NEW_CONTRACTS_PER_TX * 2; + + let new_contracts = kernel_data[i].public_inputs.end.new_contracts; + calldata_hash_inputs[offset + i * 2] = new_contracts[0].contract_address.to_field(); + calldata_hash_inputs[offset + i * 2 + 1] = new_contracts[0].portal_contract_address.to_field(); + + offset += MAX_NEW_CONTRACTS_PER_TX * 2 * 2; + + for j in 0..NUM_FIELDS_PER_SHA256 { + calldata_hash_inputs[offset + i * 2 + j] = encryptedLogsHash[j]; + } + + offset += NUM_ENCRYPTED_LOGS_HASHES_PER_TX * NUM_FIELDS_PER_SHA256 * 2; + + for j in 0..NUM_FIELDS_PER_SHA256 { + calldata_hash_inputs[offset + i * 2 + j] = unencryptedLogsHash[j]; + } + } + + // NOTE! + // We deviate from the cpp implementation. + // TODO(KEV): I'm going to modify the cpp implementation to follow this route instead. + // Summary: + // The cpp implementation optimizes for the fact that the log hashes + // are actually 128 bits field elements. We can apply this optimization + // after, the code for it in cpp looks messy and error prone. + let mut hash_input_flattened = [0;CALLDATA_HASH_INPUT_SIZE * 32]; + for offset in 0..CALLDATA_HASH_INPUT_SIZE { + let input_as_bytes = calldata_hash_inputs[offset].to_be_bytes(32); + for byte_index in 0..32 { + hash_input_flattened[offset * 32 + byte_index] = input_as_bytes[byte_index]; + } + } + + let sha_digest = dep::std::hash::sha256(hash_input_flattened); + U256::from_bytes32(sha_digest).to_u128_limbs() + } + + // Check all of the provided commitments against the historical tree roots + fn perform_historical_blocks_tree_membership_checks(self) { + // For each of the historic_note_hash_tree_membership_checks, we need to do an inclusion proof + // against the historical root provided in the rollup constants + let historic_root = self.constants.start_historic_blocks_tree_roots_snapshot.root; + + for i in 0..2 { + // Rebuild the block hash + let historical_block_data = self.kernel_data[i].public_inputs.constants.block_data; + let previous_block_hash = historical_block_data.block.hash(); + + let historic_root_witness = self.historic_blocks_tree_root_membership_witnesses[i]; + + components::assert_check_membership( + previous_block_hash, + historic_root_witness.leaf_index, + historic_root_witness.sibling_path, + historic_root + ); + } + } + + // TODO(Kev): This aggregate_proof method is duplicated in a lot of places + fn aggregate_proofs(self) -> AggregationObject { + // TODO: for now we simply return the aggregation object from the first proof + self.kernel_data[0].public_inputs.end.aggregation_object } -} \ No newline at end of file +} + +fn verify_kernel_proof(proof : Proof) -> bool { + // TODO: Just return true as we are mocking out the proof verification + // and aggregation. + // TODO(Kev): It may make sense to move all of these methods into a + // separate module. + true +} + +fn insert_public_data_update_requests( + tree_root : Field, + public_data_update_requests : [PublicDataUpdateRequest;MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX], + witnesses_offset : Field, + witnesses : [[Field; PUBLIC_DATA_TREE_HEIGHT]; 2 * MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX] +) -> Field { + let mut root = tree_root; + + for i in 0..MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX { + let state_write = public_data_update_requests[i]; + let witness = witnesses[i + witnesses_offset]; + + if (!state_write.is_empty()) { + components::assert_check_membership( + state_write.old_value, + state_write.leaf_index, + witness, + root, + ); + root = components::root_from_sibling_path(state_write.new_value, state_write.leaf_index, witness); + } + } + + root +} + +fn validate_public_data_reads( + tree_root : Field, + public_data_reads : [PublicDataRead; MAX_PUBLIC_DATA_READS_PER_TX], + witnesses_offset : Field, + witnesses : [[Field; PUBLIC_DATA_TREE_HEIGHT]; 2 * MAX_PUBLIC_DATA_READS_PER_TX] +) { + for i in 0..MAX_PUBLIC_DATA_READS_PER_TX { + let public_data_read = public_data_reads[i]; + let witness = witnesses[i + witnesses_offset]; + + if (!public_data_read.is_empty()) { + components::assert_check_membership( + public_data_read.value, + public_data_read.leaf_index, + witness, + tree_root + ); + } + } +} + + + +global NUM_CONTRACT_LEAVES = 2; +#[test] +fn consistent_num_contract_leaves() { + assert(NUM_CONTRACT_LEAVES == MAX_NEW_CONTRACTS_PER_TX * 2, "num contract leaves incorrect, see calculate_contract_leaves to see how it is computed"); +} + +global NOTE_HASH_SUBTREE_WIDTH = 128; +#[test] +fn consistent_not_hash_subtree_width() { + assert(NOTE_HASH_SUBTREE_WIDTH == 2.pow_32(NOTE_HASH_SUBTREE_HEIGHT), "note hash subtree width is incorrect"); +} + +global CALLDATA_HASH_INPUT_SIZE = 338; +#[test] +fn consistent_calldata_hash_input_size() { + let expected_size = (MAX_NEW_COMMITMENTS_PER_TX + MAX_NEW_NULLIFIERS_PER_TX + MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX * 2 + + MAX_NEW_L2_TO_L1_MSGS_PER_TX + MAX_NEW_CONTRACTS_PER_TX * 3 + + NUM_ENCRYPTED_LOGS_HASHES_PER_TX * NUM_FIELDS_PER_SHA256 + + NUM_UNENCRYPTED_LOGS_HASHES_PER_TX * NUM_FIELDS_PER_SHA256) * 2; + assert(CALLDATA_HASH_INPUT_SIZE == expected_size, "calldata hash input size is incorrect"); +}