Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions barretenberg/cpp/pil/vm2/bytecode/bc_decomposition.pil
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ sel = 0;
// This includes the "current byte" and the WINDOW_SIZE - 1 lookahead bytes.
pol WINDOW_SIZE = 37;

// Internal bytecode identifier which is defined in bc_retrieval.pil
pol commit id;
// Contract address which identifies the bytecode
pol commit address;
// pc counts from 0 to bytecode_length - 1.
pol commit pc;
// bytes_remaining counts from bytecode_length to 1.
Expand Down Expand Up @@ -72,11 +72,11 @@ sel * (1 - last_of_contract) * (pc' - pc - 1) = 0;
#[BC_DEC_BYTES_REMAINING_DECREMENT]
sel * (1 - last_of_contract) * (bytes_remaining' - bytes_remaining + 1) = 0;

// This is crucial to propagate bytecode_id as this is part of the unique identifier
// for a given instruction (pair [pc, bytecode_id]). Otherwise, instruction fetching might
// This is crucial to propagate contract address as this is part of the unique identifier
// for a given instruction (pair [pc, address]). Otherwise, instruction fetching might
// be misled to copy the wrong instruction from this subtrace.
#[BC_DEC_ID_CONSTANT]
(1 - FIRST_OR_LAST_CONTRACT) * (id' - id) = 0;
#[BC_DEC_ADDRESS_CONSTANT]
(1 - FIRST_OR_LAST_CONTRACT) * (address' - address) = 0;

// This constrains that the bytes are in the range 0 to 255.
#[BYTES_ARE_BYTES]
Expand Down Expand Up @@ -211,12 +211,12 @@ bytes_pc_plus_36 = (1 - FIRST_OR_LAST_CONTRACT) * bytes_pc_plus_35';
// does not choose at least the minimum amount of rows, the lookup will fail.
// NOTE: The bytecode hashing trace will constrain itself that every 31st pc is packed.

// TODO: We need to ensure that there is a single row per pair (pc, bytecode_id) in
// TODO: We need to ensure that there is a single row per pair (pc, address) in
// this subtrace (otherwise, a malicious prover might copy wrong values).
// This is ensured by #[SEL_TOGGLED_AT_PACKED] and #[BC_DEC_PC_INCREMENT] relations but
// still have to implement the unicity on bytecode_id. The current plan is to
// still have to implement the unicity on contract address. The current plan is to
// add a permutation between this sub-trace and bc_retrieval.pil of the form:
// last_of_contract {id} is bc_retrieval.sel_XXX {bc_retrieval.bytecode_id}
// last_of_contract {address} is bc_retrieval.sel_XXX {bc_retrieval.address}

pol commit sel_packed;
sel_packed * (1 - sel_packed) = 0;
Expand Down
42 changes: 21 additions & 21 deletions barretenberg/cpp/pil/vm2/bytecode/bc_hashing.pil
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ include "../poseidon2_hash.pil";
// Finally, we copy through a lookup/permutation based on the selector these field elements
// to here (bc_hashing.pil) and then proceed to hashing.

// pc_index | bytecode_id | packed_field | incremental_hash | output_hash | latch |
// pc_index | address | packed_field | incremental_hash | output_hash | latch |
// -----------+-------------+--------------+------------------+--------------------+--------
// 0 | 0 | 0xabc | len | H(0xabc,len) = 0x2 | 0
// 31 | 0 | 0x123 | 0x2 | H(0x123,0x2) = 0x3 | 0
// 62 | 0 | 0x62d | 0x3 | H(0x62d,0x3) = end | 1
// 0 | 1 | 0x4ab | len | H(0x4ab,len) = 0x5 | 0
// 31 | 1 | 0x21f | 0x5 | H(0x21f,0x5) = 0x6 | 0
// 62 | 2 | 0x12a | 0x6 | H(0x12a,0x6) = 0x9 | 0
// 93 | 2 | 0x982 | 0x9 | H(0x982,0x9) = end | 1
// 0 | 0x123 | 0xabc | len | H(0xabc,len) = 0x2 | 0
// 31 | 0x123 | 0x123 | 0x2 | H(0x123,0x2) = 0x3 | 0
// 62 | 0x123 | 0x62d | 0x3 | H(0x62d,0x3) = end | 1
// 0 | 0x456 | 0x4ab | len | H(0x4ab,len) = 0x5 | 0
// 31 | 0x456 | 0x21f | 0x5 | H(0x21f,0x5) = 0x6 | 0
// 62 | 0x789 | 0x12a | 0x6 | H(0x12a,0x6) = 0x9 | 0
// 93 | 0x789 | 0x982 | 0x9 | H(0x982,0x9) = end | 1


// Notes
Expand Down Expand Up @@ -53,7 +53,7 @@ namespace bc_hashing;
#[TRACE_CONTINUITY]
(1 - precomputed.first_row) * (1 - sel) * sel' = 0;

// Triggers the lookup to the address derivation subtrace, signifies the row that contains the final bytecode hash for this id
// Triggers the lookup to the address derivation subtrace, signifies the row that contains the final bytecode hash for this address
// The sequencer can decide where to put this latch.
pol commit latch;
latch * (1 - latch) = 0;
Expand All @@ -66,7 +66,7 @@ namespace bc_hashing;
// to be activated which is impossible on first row.), LATCH_CONDITION is a boolean.
pol LATCH_CONDITION = latch + precomputed.first_row;

// The start of a new bytecode id and new set of hashing runs. Needs to be a committed column as it is used in the lookup
// The start of a new contract address and new set of hashing runs. Needs to be a committed column as it is used in the lookup
pol commit start;
start * (1 - start) = 0;

Expand All @@ -80,26 +80,26 @@ namespace bc_hashing;
#[PC_INCREMENTS]
sel * (pc_index' - ((1 - LATCH_CONDITION) * (31 + pc_index))) = 0;

pol commit bytecode_id;
#[ID_CONSISTENCY]
(1 - LATCH_CONDITION) * (bytecode_id' - bytecode_id) = 0;
pol commit address;
#[ADDRESS_CONSISTENCY]
(1 - LATCH_CONDITION) * (address' - address) = 0;

pol commit packed_field;
#[GET_PACKED_FIELD]
sel { pc_index, bytecode_id, packed_field }
sel { pc_index, address, packed_field }
in
bc_decomposition.sel_packed { bc_decomposition.pc, bc_decomposition.id, bc_decomposition.packed_field };
bc_decomposition.sel_packed { bc_decomposition.pc, bc_decomposition.address, bc_decomposition.packed_field };

// This tracks the incremental bytecode hash after the i-th input
// The first incremental hash of each new bytecode_id is the length of the bytecode
// The first incremental hash of each new contract address is the length of the bytecode
pol commit incremental_hash;

// At the start of a new bytecode hash, the initial incremental hash has to be the length of the bytecode
// Note the looked up PC = 0 (enforced by the PC_INCREMENTS relation), i.e. the initial incremental hash value == bytecode length
#[IV_IS_LEN]
start { pc_index, bytecode_id, incremental_hash }
start { pc_index, address, incremental_hash }
in
bc_decomposition.sel_packed { bc_decomposition.pc, bc_decomposition.id, bc_decomposition.bytes_remaining };
bc_decomposition.sel_packed { bc_decomposition.pc, bc_decomposition.address, bc_decomposition.bytes_remaining };

// Start Hashing, Poseidon2(packed_field, running_hash)
pol commit output_hash;
Expand All @@ -117,12 +117,12 @@ namespace bc_hashing;
// Proof Sketch
// #########################################################################################
// We want to show that the output_hash at a row with latch == 1 correctly enforces that it
// is the result of hashing the bytes of a given bytecode identified by bytecode_id.
// is the result of hashing the bytes of a given bytecode identified by contract address.
// Thanks to #[TRACE_CONTINUITY] and #[SEL_TOGGLED_AT_LATCH], we have the guarantee that
// the rows above the final output_hash are activated. If they are activated, then
// bytecode_id is maintained and pc_index decrements by 31 when we move to the top.
// contract address is maintained and pc_index decrements by 31 when we move to the top.
// From #[START_AFTER_LATCH], we have the guarantee that we cannot meet a row with
// latch == 1 before we meet start == 1 when we go up. This shows that bytecode_id,
// latch == 1 before we meet start == 1 when we go up. This shows that contract address,
// pc_index, and incremental_hash evolution did not deviate from the happy path. When
// we reach a row with start == 1 (we know we must reach one thanks to #[START_AFTER_LATCH]
// enforces it on the second row.), then #[IV_IS_LEN] implies that pc_index and incremental_hash
Expand Down
20 changes: 6 additions & 14 deletions barretenberg/cpp/pil/vm2/bytecode/bc_retrieval.pil
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ include "../constants_gen.pil";
// - Check updatability.
//
// The lookups into this subtrace are expected to be on the columns
// { bytecode_id, address, error }
// This trace is the owner of the bytecode_id (incrementing).
// { address, nullifier_tree_root, public_data_tree_root, error }
// This trace deduplicates by contract address.
//
// Note that this trace will prove both success or failure of retrieval.

Expand All @@ -32,17 +32,9 @@ sel = 0;
#[TRACE_CONTINUITY]
(1 - sel) * (1 - precomputed.first_row) * sel' = 0;

// This id is generated at runtime starting from zero and incremented by 1.
// The primary source of bytecode_id is this sub-trace.
pol commit bytecode_id;
pol commit error; // some error occurred.
pol commit address; // contract address.

// `bytecode_id` starts from zero and incremented by 1.
// Note: we only need to require TRACE_CONTINUITY because of this.
((1 - sel) * sel') * bytecode_id = 0; // First row that has sel = 1.
sel * sel' * (bytecode_id' - bytecode_id) = 0; // Next row.

// contract instance members.
//
// Note that bytecode retrieval doesn't care about the other instance members
Expand Down Expand Up @@ -105,11 +97,11 @@ instance_exists {
// TODO(dbanks12): re-enable once C++ and PIL use standard poseidon2 hashing for bytecode commitments.
// Note: only need to hash the bytecode if the instance exists. Otherwise there is nothing to hash!
//#[BYTECODE_HASH_IS_CORRECT]
//instance_exists { bytecode_id, public_bytecode_commitment } in bc_hashing.latch { bc_hashing.bytecode_id, bc_hashing.output_hash };
//instance_exists { address, public_bytecode_commitment } in bc_hashing.latch { bc_hashing.address, bc_hashing.output_hash };

// Note: we don't need to silo and check the class id because the deployer contract guarrantees
// that if a contract instance exists, the class has been registered.

// TODO: To ensure byetcode_id unicity inside of bc_decomposition.pil, we will have to introduce
// a permutation of the form: sel_XXX {bytecode_id} is bc_decomposition.last_of_contract {bc_decomposition.id}
// sel_XXX will have to be picked so that it selects a bytecode_id iff it has an entry in bc_decomposition
// TODO: To ensure contract address unicity inside of bc_decomposition.pil, we will have to introduce
// a permutation of the form: sel_XXX {address} is bc_decomposition.last_of_contract {bc_decomposition.address}
// sel_XXX will have to be picked so that it selects a contract address iff it has an entry in bc_decomposition
12 changes: 6 additions & 6 deletions barretenberg/cpp/pil/vm2/bytecode/instr_fetching.pil
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ sel = 0;

// This pair identifies uniquely a given instruction among the bytecodes.
pol commit pc;
pol commit bytecode_id;
pol commit contract_address;


// ****************************************************************************
Expand Down Expand Up @@ -88,7 +88,7 @@ sel_parsing_err * (1 - sel_parsing_err) = 0; // enforces disjoint errors
// Handling pc_out_of_range error
// ****************************************************************************

// Retrieved from bc_decomposition.pil based on bytecode_id with pc == 0
// Retrieved from bc_decomposition.pil based on contract_address with pc == 0
pol commit bytecode_size;

// We have to enforce that: pc < bytecode_size <==> pc_out_of_range == 0
Expand Down Expand Up @@ -159,8 +159,8 @@ sel_has_tag { tag_value, tag_out_of_range } in precomputed.sel_range_8 { precomp

// The size of the bytecode is bytes_remaining at pc == 0.
#[BYTECODE_SIZE_FROM_BC_DEC]
sel { bytecode_id, precomputed.zero, bytecode_size } in
bc_decomposition.sel { bc_decomposition.id, bc_decomposition.pc, bc_decomposition.bytes_remaining };
sel { contract_address, precomputed.zero, bytecode_size } in
bc_decomposition.sel { bc_decomposition.address, bc_decomposition.pc, bc_decomposition.bytes_remaining };

// bytecode[pc] to bytecode[pc + 36]
pol commit bd0, bd1, bd2, bd3, bd4,
Expand All @@ -182,7 +182,7 @@ sel_pc_in_range = sel * (1 - pc_out_of_range);
// Bring in the bytes from the bytecode columns.
#[BYTES_FROM_BC_DEC]
sel_pc_in_range {
bytecode_id,
contract_address,
pc,
bytes_to_read,
bd0, bd1, bd2, bd3, bd4,
Expand All @@ -195,7 +195,7 @@ sel_pc_in_range {
bd35, bd36
} in
bc_decomposition.sel {
bc_decomposition.id,
bc_decomposition.address,
bc_decomposition.pc,
bc_decomposition.bytes_to_read,
bc_decomposition.bytes, bc_decomposition.bytes_pc_plus_1, bc_decomposition.bytes_pc_plus_2, bc_decomposition.bytes_pc_plus_3, bc_decomposition.bytes_pc_plus_4,
Expand Down
15 changes: 5 additions & 10 deletions barretenberg/cpp/pil/vm2/execution.pil
Original file line number Diff line number Diff line change
Expand Up @@ -95,21 +95,16 @@ last = sel * (1 - sel');
* Temporality group 1: Bytecode retrieval (unconditional)
**************************************************************************************************/

pol commit bytecode_id;
pol commit sel_bytecode_retrieval_failure;

// TODO(dbanks12): since this lookup is no longer active for all rows,
// we likely need to constrain that bytecode_id does not change during a context,
// or we need to otherwise consider the implications of this.
// Note: bytecode_id concept removed - bytecode retrieval now uses contract address directly
#[BYTECODE_RETRIEVAL_RESULT]
sel_first_row_in_context {
bytecode_id,
contract_address,
prev_nullifier_tree_root, // from context.pil
prev_public_data_tree_root, // from context.pil
sel_bytecode_retrieval_failure
} in bc_retrieval.sel {
bc_retrieval.bytecode_id,
bc_retrieval.address,
bc_retrieval.nullifier_tree_root,
bc_retrieval.public_data_tree_root,
Expand Down Expand Up @@ -139,9 +134,9 @@ pol commit op[7]; // operands

#[INSTRUCTION_FETCHING_RESULT]
sel_bytecode_retrieval_success {
pc, bytecode_id, sel_instruction_fetching_failure
pc, contract_address, sel_instruction_fetching_failure
} in instr_fetching.sel {
instr_fetching.pc, instr_fetching.bytecode_id, instr_fetching.sel_parsing_err
instr_fetching.pc, instr_fetching.contract_address, instr_fetching.sel_parsing_err
};

pol commit sel_instruction_fetching_success;
Expand All @@ -150,11 +145,11 @@ sel_instruction_fetching_success = sel * (1 - sel_instruction_fetching_failure);

#[INSTRUCTION_FETCHING_BODY]
sel_instruction_fetching_success {
pc, bytecode_id, ex_opcode, instr_length,
pc, contract_address, ex_opcode, instr_length,
indirect, op[0], op[1], op[2], op[3], op[4], op[5], op[6]
} in instr_fetching.sel {
instr_fetching.pc,
instr_fetching.bytecode_id,
instr_fetching.contract_address,
instr_fetching.exec_opcode,
instr_fetching.instr_size,
instr_fetching.indirect,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ TEST(BytecodeDecompositionConstrainingTest, SingleBytecode)
init_trace(trace);
BytecodeTraceBuilder builder;
builder.process_decomposition(
{ { .bytecode_id = 1, .bytecode = std::make_shared<std::vector<uint8_t>>(random_bytes(40)) } }, trace);
{ { .bytecode_commitment = FF(1), .bytecode = std::make_shared<std::vector<uint8_t>>(random_bytes(40)) } },
trace);

EXPECT_EQ(trace.get_num_rows(), 1 + 40);
check_relation<bc_decomposition>(trace);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ TEST(BytecodeHashingConstrainingTest, SingleBytecodeHash)
BytecodeTraceBuilder builder;

builder.process_hashing(
{ { .bytecode_id = 1, .bytecode_length = 62, .bytecode_fields = random_fields(2) /* 62 bytes */ } }, trace);
{ { .bytecode_commitment = FF(1), .bytecode_length = 62, .bytecode_fields = random_fields(2) /* 62 bytes */ } },
trace);

check_relation<bc_hashing>(trace);
}
Expand All @@ -74,8 +75,10 @@ TEST(BytecodeHashingConstrainingTest, MultipleBytecodeHash)
BytecodeTraceBuilder builder;

builder.process_hashing(
{ { .bytecode_id = 1, .bytecode_length = 62, .bytecode_fields = random_fields(2) /* 62 bytes */ },
{ .bytecode_id = 2, .bytecode_length = 62000, .bytecode_fields = random_fields(2000) /* 62k bytes */ } },
{ { .bytecode_commitment = FF(1), .bytecode_length = 62, .bytecode_fields = random_fields(2) /* 62 bytes */ },
{ .bytecode_commitment = FF(2),
.bytecode_length = 62000,
.bytecode_fields = random_fields(2000) /* 62k bytes */ } },
trace);

check_relation<bc_hashing>(trace);
Expand Down Expand Up @@ -110,7 +113,7 @@ TEST(BytecodeHashingConstrainingTest, PoseidonInteractions)

poseidon2_builder.process_hash(hash_event_emitter.dump_events(), trace);
bytecode_builder.process_hashing(
{ { .bytecode_id = 1, .bytecode_length = 62, .bytecode_fields = fields /* 62 bytes */ } }, trace);
{ { .bytecode_commitment = FF(1), .bytecode_length = 62, .bytecode_fields = fields /* 62 bytes */ } }, trace);

// TODO(dbanks12): re-enable once C++ and PIL use standard poseidon2 hashing for bytecode commitments.
// check_interaction<BytecodeTraceBuilder, lookup_bc_hashing_poseidon2_hash_settings>(trace);
Expand All @@ -128,9 +131,10 @@ TEST(BytecodeHashingConstrainingTest, BytecodeInteractions)
std::vector<FF> fields = simulation::encode_bytecode(bytecode);
BytecodeTraceBuilder builder;

builder.process_hashing({ { .bytecode_id = 1, .bytecode_length = 40, .bytecode_fields = fields } }, trace);
builder.process_hashing({ { .bytecode_commitment = FF(1), .bytecode_length = 40, .bytecode_fields = fields } },
trace);
builder.process_decomposition(
{ { .bytecode_id = 1, .bytecode = std::make_shared<std::vector<uint8_t>>(bytecode) } }, trace);
{ { .bytecode_commitment = FF(1), .bytecode = std::make_shared<std::vector<uint8_t>>(bytecode) } }, trace);

check_interaction<BytecodeTraceBuilder,
lookup_bc_hashing_get_packed_field_settings,
Expand All @@ -146,9 +150,10 @@ TEST(BytecodeHashingConstrainingTest, NegativeInvalidStartAfterLatch)
});

BytecodeTraceBuilder builder;
builder.process_hashing({ { .bytecode_id = 1, .bytecode_length = 62, .bytecode_fields = random_fields(2) },
{ .bytecode_id = 2, .bytecode_length = 93, .bytecode_fields = random_fields(3) } },
trace);
builder.process_hashing(
{ { .bytecode_commitment = FF(1), .bytecode_length = 62, .bytecode_fields = random_fields(2) },
{ .bytecode_commitment = FF(2), .bytecode_length = 93, .bytecode_fields = random_fields(3) } },
trace);
check_relation<bc_hashing>(trace, bc_hashing::SR_START_AFTER_LATCH);

// Row = 3 is the start of the hashing for bytecode id = 2
Expand All @@ -165,7 +170,7 @@ TEST(BytecodeHashingConstrainingTest, NegativeInvalidPCIncrement)
BytecodeTraceBuilder builder;
builder.process_hashing(
{
{ .bytecode_id = 1, .bytecode_length = 62, .bytecode_fields = random_fields(2) },
{ .bytecode_commitment = FF(1), .bytecode_length = 62, .bytecode_fields = random_fields(2) },
},
trace);
check_relation<bc_hashing>(trace, bc_hashing::SR_PC_INCREMENTS);
Expand All @@ -184,7 +189,7 @@ TEST(BytecodeHashingConstrainingTest, NegativeChainOutput)
BytecodeTraceBuilder builder;
builder.process_hashing(
{
{ .bytecode_id = 1, .bytecode_length = 62, .bytecode_fields = random_fields(2) },
{ .bytecode_commitment = FF(1), .bytecode_length = 62, .bytecode_fields = random_fields(2) },
},
trace);
check_relation<bc_hashing>(trace, bc_hashing::SR_CHAIN_OUTPUT_TO_INCR);
Expand Down Expand Up @@ -222,9 +227,10 @@ TEST(BytecodeHashingConstrainingTest, NegativeBytecodeInteraction)

BytecodeTraceBuilder builder;

builder.process_hashing({ { .bytecode_id = 1, .bytecode_length = 40, .bytecode_fields = fields } }, trace);
builder.process_hashing({ { .bytecode_commitment = FF(1), .bytecode_length = 40, .bytecode_fields = fields } },
trace);
builder.process_decomposition(
{ { .bytecode_id = 1, .bytecode = std::make_shared<std::vector<uint8_t>>(bytecode) } }, trace);
{ { .bytecode_commitment = FF(1), .bytecode = std::make_shared<std::vector<uint8_t>>(bytecode) } }, trace);

// Row = 3 is the start of the hashing for bytecode id = 2
// Modify the pc index for the lookup of the second packed field
Expand Down
Loading
Loading