diff --git a/yarn-project/pxe/src/contract_function_simulator/contract_function_simulator.ts b/yarn-project/pxe/src/contract_function_simulator/contract_function_simulator.ts index e4e783f26b02..b23535571a58 100644 --- a/yarn-project/pxe/src/contract_function_simulator/contract_function_simulator.ts +++ b/yarn-project/pxe/src/contract_function_simulator/contract_function_simulator.ts @@ -689,6 +689,7 @@ function squashTransientSideEffects( scopedNullifiersCLA, /*futureNoteHashReads=*/ [], /*futureNullifierReads=*/ [], + /*futureLogs=*/ [], noteHashNullifierCounterMap, minRevertibleSideEffectCounter, ); @@ -731,16 +732,8 @@ async function verifyReadRequests( nullifierReadRequests.length, ); - const noteHashResetActions = getNoteHashReadRequestResetActions( - noteHashReadRequestsCLA, - scopedNoteHashesCLA, - /*futureNoteHashes=*/ [], - ); - const nullifierResetActions = getNullifierReadRequestResetActions( - nullifierReadRequestsCLA, - scopedNullifiersCLA, - /*futureNullifiers=*/ [], - ); + const noteHashResetActions = getNoteHashReadRequestResetActions(noteHashReadRequestsCLA, scopedNoteHashesCLA); + const nullifierResetActions = getNullifierReadRequestResetActions(nullifierReadRequestsCLA, scopedNullifiersCLA); const settledNoteHashReads: { index: number; value: Fr }[] = []; for (let i = 0; i < noteHashResetActions.actions.length; i++) { diff --git a/yarn-project/pxe/src/private_kernel/hints/private_kernel_reset_private_inputs_builder.test.ts b/yarn-project/pxe/src/private_kernel/hints/private_kernel_reset_private_inputs_builder.test.ts new file mode 100644 index 000000000000..7d2588ef13c5 --- /dev/null +++ b/yarn-project/pxe/src/private_kernel/hints/private_kernel_reset_private_inputs_builder.test.ts @@ -0,0 +1,1122 @@ +import { + MAX_KEY_VALIDATION_REQUESTS_PER_TX, + MAX_NOTE_HASHES_PER_TX, + MAX_NOTE_HASH_READ_REQUESTS_PER_TX, + MAX_NULLIFIERS_PER_TX, + MAX_NULLIFIER_READ_REQUESTS_PER_TX, + MAX_PRIVATE_LOGS_PER_TX, + NOTE_HASH_TREE_HEIGHT, + VK_TREE_HEIGHT, +} from '@aztec/constants'; +import { Fr } from '@aztec/foundation/curves/bn254'; +import type { GrumpkinScalar } from '@aztec/foundation/curves/grumpkin'; +import { MembershipWitness } from '@aztec/foundation/trees'; +import { AztecAddress } from '@aztec/stdlib/aztec-address'; +import { + type DimensionName, + type PrivateKernelResetDimensions, + privateKernelResetDimensionNames, +} from '@aztec/stdlib/kernel'; +import { NullifierMembershipWitness } from '@aztec/stdlib/trees'; + +import { mock } from 'jest-mock-extended'; +import times from 'lodash.times'; + +import type { PrivateKernelOracle } from '../private_kernel_oracle.js'; +import { PrivateKernelResetPrivateInputsBuilder } from './private_kernel_reset_private_inputs_builder.js'; +import { + PrivateCircuitPublicInputsBuilder, + PrivateKernelCircuitPublicInputsBuilder, + makeExecutionResult, + makeKernelOutput, +} from './test_utils.js'; + +/** + * Generate a random integer between value and toValue (inclusive). + * If toValue is not provided, the random integer is between 0 and value (inclusive). + */ +function randomInt(value: number, toValue?: number) { + const from = toValue !== undefined ? value : 0; + const to = toValue ?? value; + return from + Math.floor(Math.random() * (to - from + 1)); +} + +describe('PrivateKernelResetPrivateInputsBuilder', () => { + let oracle: ReturnType>; + let kernel: PrivateKernelCircuitPublicInputsBuilder; + let noteHashNullifierCounterMap: Map; + let splitCounter: number; + + beforeEach(() => { + kernel = new PrivateKernelCircuitPublicInputsBuilder(); + noteHashNullifierCounterMap = new Map(); + splitCounter = 0; + oracle = mock(); + oracle.getVkMembershipWitness.mockResolvedValue(MembershipWitness.random(VK_TREE_HEIGHT)); + oracle.getNoteHashMembershipWitness.mockResolvedValue(MembershipWitness.random(NOTE_HASH_TREE_HEIGHT)); + oracle.getNullifierMembershipWitness.mockResolvedValue(NullifierMembershipWitness.random()); + oracle.getMasterSecretKey.mockResolvedValue(Fr.random() as unknown as GrumpkinScalar); + }); + + describe('no next iteration (final reset)', () => { + const makeResetBuilder = () => + new PrivateKernelResetPrivateInputsBuilder( + makeKernelOutput(kernel.build()), + [], + noteHashNullifierCounterMap, + splitCounter, + ); + + const expectDimensions = ( + builder: PrivateKernelResetPrivateInputsBuilder, + actualDimensions: PrivateKernelResetDimensions, + expectedDimensions: Partial<{ [K in DimensionName]: number }>, + ) => { + const requested = builder.getRequestedDimensions(); + for (const [name, value] of Object.entries(expectedDimensions)) { + if (value === 0) { + throw new Error( + `Dimension ${name} is 0. Check the test fixtures to ensure the target dimension is non-zero.`, + ); + } + const key = name as DimensionName; + // Requested dimensions must match exactly. + expect(requested[key]).toBe(value); + // Actual dimensions (from config) must be at least as large. + expect(actualDimensions[key]).toBeGreaterThanOrEqual(value); + } + }; + + it('does not need reset when no data present', async () => { + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + + // Throws if attempting to build. + await expect(builder.build(oracle)).rejects.toThrow('Reset is not required.'); + }); + + it('throws when building without calling needsReset', async () => { + kernel.addSettledNoteHashReadRequest(); + const builder = makeResetBuilder(); + await expect(builder.build(oracle)).rejects.toThrow('Reset is not required.'); + }); + + describe('note hash read requests', () => { + it('resets pending reads', async () => { + const numReads = randomInt(1, MAX_NOTE_HASH_READ_REQUESTS_PER_TX); + times(numReads, i => + kernel.addNoteHash({ value: new Fr(1 + i) }).addPendingNoteHashReadRequest({ value: new Fr(1 + i) }), + ); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { NOTE_HASH_PENDING_READ: numReads }); + }); + + it('resets settled reads', async () => { + const numReads = randomInt(1, MAX_NOTE_HASH_READ_REQUESTS_PER_TX); + times(numReads, () => kernel.addSettledNoteHashReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { NOTE_HASH_SETTLED_READ: numReads }); + expect(oracle.getNoteHashMembershipWitness).toHaveBeenCalledTimes(numReads); + }); + + it('resets both pending and settled reads', async () => { + const numPending = randomInt(1, MAX_NOTE_HASH_READ_REQUESTS_PER_TX - 1); + const numSettled = randomInt(1, MAX_NOTE_HASH_READ_REQUESTS_PER_TX - numPending); + times(numPending, i => + kernel.addNoteHash({ value: new Fr(i + 1) }).addPendingNoteHashReadRequest({ value: new Fr(i + 1) }), + ); + times(numSettled, () => kernel.addSettledNoteHashReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { + NOTE_HASH_PENDING_READ: numPending, + NOTE_HASH_SETTLED_READ: numSettled, + }); + expect(oracle.getNoteHashMembershipWitness).toHaveBeenCalledTimes(numSettled); + }); + + it('throws when settled read request has no matching membership witness', async () => { + kernel.addSettledNoteHashReadRequest(); + + oracle.getNoteHashMembershipWitness.mockResolvedValue(undefined); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + await expect(builder.build(oracle)).rejects.toThrow('Read request is reading an unknown note hash.'); + }); + + it('throws when pending read request has no matching note hash', () => { + kernel.addPendingNoteHashReadRequest({ value: new Fr(999) }); + + const builder = makeResetBuilder(); + expect(() => builder.needsReset()).toThrow('No matching note hash found for note hash read request.'); + }); + }); + + describe('nullifier read requests', () => { + it('resets pending reads', async () => { + const numReads = randomInt(1, MAX_NULLIFIER_READ_REQUESTS_PER_TX); + times(numReads, i => { + kernel.addNullifier({ value: new Fr(1 + i) }); + kernel.addPendingNullifierReadRequest({ value: new Fr(1 + i) }); + }); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { NULLIFIER_PENDING_READ: numReads }); + }); + + it('resets settled reads', async () => { + const numReads = randomInt(1, MAX_NULLIFIER_READ_REQUESTS_PER_TX); + times(numReads, () => kernel.addSettledNullifierReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { NULLIFIER_SETTLED_READ: numReads }); + expect(oracle.getNullifierMembershipWitness).toHaveBeenCalledTimes(numReads); + }); + + it('resets both pending and settled reads', async () => { + const numPending = randomInt(1, MAX_NULLIFIER_READ_REQUESTS_PER_TX - 1); + const numSettled = randomInt(1, MAX_NULLIFIER_READ_REQUESTS_PER_TX - numPending); + times(numPending, i => { + kernel.addNullifier({ value: new Fr(1 + i) }); + kernel.addPendingNullifierReadRequest({ value: new Fr(1 + i) }); + }); + times(numSettled, () => kernel.addSettledNullifierReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { + NULLIFIER_PENDING_READ: numPending, + NULLIFIER_SETTLED_READ: numSettled, + }); + expect(oracle.getNullifierMembershipWitness).toHaveBeenCalledTimes(numSettled); + }); + + it('throws when settled read request has no matching membership witness', async () => { + kernel.addSettledNullifierReadRequest(); + + oracle.getNullifierMembershipWitness.mockResolvedValue(undefined); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + await expect(builder.build(oracle)).rejects.toThrow('Cannot find the leaf for nullifier'); + }); + + it('throws when pending read request has no matching nullifier', () => { + kernel.addPendingNullifierReadRequest({ value: new Fr(999) }); + + const builder = makeResetBuilder(); + expect(() => builder.needsReset()).toThrow('No matching nullifier found for nullifier read request.'); + }); + }); + + describe('key validation requests', () => { + it('resets key validation requests and invokes oracle', async () => { + const numRequests = randomInt(1, MAX_KEY_VALIDATION_REQUESTS_PER_TX); + times(numRequests, () => kernel.addKeyValidationRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { KEY_VALIDATION: numRequests }); + expect(oracle.getMasterSecretKey).toHaveBeenCalledTimes(numRequests); + }); + }); + + describe('transient data', () => { + it('resets transient data squashing', async () => { + const numSquashing = randomInt(1, Math.min(MAX_NULLIFIERS_PER_TX, MAX_NOTE_HASHES_PER_TX)); + const numNoteHashes = randomInt(numSquashing, MAX_NOTE_HASHES_PER_TX); + const numNullifiers = randomInt(numSquashing, MAX_NULLIFIERS_PER_TX); + times(numNoteHashes, i => { + kernel.addNoteHash({ value: new Fr(i + 1), counter: i + 1 }); + }); + times(numNullifiers, i => { + const noteHashCounter = i + 1; + const nullifierCounter = i + 1000; + const noteHash = i < numSquashing ? new Fr(i + 1) : Fr.ZERO; + kernel.addNullifier({ noteHash, counter: nullifierCounter }); + if (i < numSquashing) { + noteHashNullifierCounterMap.set(noteHashCounter, nullifierCounter); + } + }); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { TRANSIENT_DATA_SQUASHING: numSquashing }); + }); + }); + + describe('siloing', () => { + it('resets with note hash siloing', async () => { + const numNoteHashes = randomInt(1, MAX_NOTE_HASHES_PER_TX); + times(numNoteHashes, () => kernel.addNoteHash()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + + expectDimensions(builder, result.dimensions, { NOTE_HASH_SILOING: numNoteHashes }); + }); + + it('resets with nullifier siloing', async () => { + const numNullifiers = randomInt(1, MAX_NULLIFIERS_PER_TX); + times(numNullifiers, () => kernel.addNullifier()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + + expectDimensions(builder, result.dimensions, { NULLIFIER_SILOING: numNullifiers }); + }); + + it('resets with private log siloing', async () => { + const numPrivateLogs = randomInt(1, MAX_PRIVATE_LOGS_PER_TX); + times(numPrivateLogs, () => kernel.addPrivateLog()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + + expectDimensions(builder, result.dimensions, { PRIVATE_LOG_SILOING: numPrivateLogs }); + }); + + it('does not need note hash siloing when already siloed', () => { + kernel.addNoteHash({ contractAddress: AztecAddress.ZERO }); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + }); + + it('does not need nullifier siloing when already siloed', () => { + kernel.addNullifier({ contractAddress: AztecAddress.ZERO }); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + }); + + it('does not need private log siloing when already siloed', () => { + kernel.addPrivateLog({ contractAddress: AztecAddress.ZERO }); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + }); + + it('does not need reset when all side effects are already siloed', () => { + kernel + .addNoteHash({ contractAddress: AztecAddress.ZERO }) + .addNullifier({ contractAddress: AztecAddress.ZERO }) + .addPrivateLog({ contractAddress: AztecAddress.ZERO }); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + }); + + it('still needs reset for unsiloed dimensions when some are already siloed', async () => { + // Note hashes already siloed, but nullifiers and logs still need siloing. + kernel.addNoteHash({ contractAddress: AztecAddress.ZERO }).addNullifier().addPrivateLog(); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { NULLIFIER_SILOING: 1, PRIVATE_LOG_SILOING: 1 }); + }); + }); + + describe('squashes transient data before siloing', () => { + it('subtracts squashed note hashes and nullifiers from silo counts', async () => { + kernel + .addNoteHash({ value: new Fr(11), counter: 1 }) + .addNoteHash({ value: new Fr(22), counter: 2 }) + .addNullifier({ value: new Fr(333), noteHash: new Fr(11), counter: 3 }) + .addNullifier({ value: new Fr(444), counter: 4 }); + + noteHashNullifierCounterMap.set(1, 3); // noteHash 11 squashed by nullifier 333 + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { + NOTE_HASH_SILOING: 1, + NULLIFIER_SILOING: 1, + }); + }); + + it('subtracts squashed logs from silo count', async () => { + kernel + .addNoteHash({ value: new Fr(11), counter: 1 }) + .addNullifier({ value: new Fr(222), noteHash: new Fr(11), counter: 2 }) + .addNoteHash({ value: new Fr(33), counter: 3 }) + // A non-squashed nullifier. + .addNullifier() + // A private log linked to the squashed note hash. + .addPrivateLog({ noteHashCounter: 1 }) + // A private log not linked to any note hash. + .addPrivateLog({ noteHashCounter: 0 }); + + noteHashNullifierCounterMap.set(1, 2); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, { + PRIVATE_LOG_SILOING: 1, + NOTE_HASH_SILOING: 1, + NULLIFIER_SILOING: 1, + }); + }); + }); + }); + + describe('has next iteration (inner reset)', () => { + let previousIterations: PrivateCircuitPublicInputsBuilder[]; + let nextIteration: PrivateCircuitPublicInputsBuilder; + + const makeResetBuilder = () => { + const executionStack = [...previousIterations, ...(nextIteration ? [nextIteration] : [])].map(iteration => + makeExecutionResult(iteration.build()), + ); + return new PrivateKernelResetPrivateInputsBuilder( + makeKernelOutput(kernel.build()), + executionStack, + noteHashNullifierCounterMap, + splitCounter, + ); + }; + + // For inner reset, only one dimension should be non-zero. + const expectDimensions = ( + builder: PrivateKernelResetPrivateInputsBuilder, + actualDimensions: PrivateKernelResetDimensions, + dimensionName: DimensionName, + expectedValue: number, + ) => { + const requested = builder.getRequestedDimensions(); + // Requested dimension must match exactly. + expect(requested[dimensionName]).toBe(expectedValue); + // Actual dimensions (from config) must be at least as large. + expect(actualDimensions[dimensionName]).toBeGreaterThanOrEqual(expectedValue); + // All other dimensions should be 0 in both requested and actual. + for (const name of privateKernelResetDimensionNames) { + if (name !== dimensionName) { + expect(requested[name]).toBe(0); + expect(actualDimensions[name]).toBe(0); + } + } + }; + + beforeEach(() => { + previousIterations = []; + nextIteration = new PrivateCircuitPublicInputsBuilder(); + }); + + it('does not need reset when no data present', async () => { + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + + // Throws if attempting to build. + await expect(builder.build(oracle)).rejects.toThrow('Reset is not required.'); + }); + + describe('note hash read requests', () => { + it('does not need reset when read requests fit', () => { + const numReads = randomInt(MAX_NOTE_HASH_READ_REQUESTS_PER_TX); + times(numReads, () => kernel.addSettledNoteHashReadRequest()); + + // The next iteration will add 0 or more reads to keep the total at or below MAX. + const numNext = randomInt(MAX_NOTE_HASH_READ_REQUESTS_PER_TX - numReads); + times(numNext, () => nextIteration.addSettledNoteHashReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + }); + + it('resets when pending reads would overflow', async () => { + const numReads = randomInt(1, MAX_NOTE_HASH_READ_REQUESTS_PER_TX); + times(numReads, i => { + kernel.addNoteHash({ value: new Fr(i + 1) }); + kernel.addPendingNoteHashReadRequest({ value: new Fr(i + 1) }); + }); + + // The next iteration adds enough items to exceed the maximum allowed by 1. + times(MAX_NOTE_HASH_READ_REQUESTS_PER_TX - numReads + 1, () => nextIteration.addPendingNoteHashReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'NOTE_HASH_PENDING_READ', numReads); + }); + + it('resets when settled reads would overflow', async () => { + const numReads = randomInt(1, MAX_NOTE_HASH_READ_REQUESTS_PER_TX); + times(numReads, () => kernel.addSettledNoteHashReadRequest()); + + // The next iteration adds enough items to exceed the maximum allowed by 1. + times(MAX_NOTE_HASH_READ_REQUESTS_PER_TX - numReads + 1, () => nextIteration.addSettledNoteHashReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'NOTE_HASH_SETTLED_READ', numReads); + }); + + it('resets when mixed reads would overflow, pending reads are larger', async () => { + // Add at least 2 pending reads and 1 settled read. + const numPending = randomInt(2, MAX_NOTE_HASH_READ_REQUESTS_PER_TX - 1); + // The number of settled reads is less than the pending reads. + const numSettled = randomInt(1, Math.min(numPending - 1, MAX_NOTE_HASH_READ_REQUESTS_PER_TX - numPending)); + times(numPending, i => { + kernel.addNoteHash({ value: new Fr(i + 1) }); + kernel.addPendingNoteHashReadRequest({ value: new Fr(i + 1) }); + }); + times(numSettled, () => kernel.addSettledNoteHashReadRequest()); + + // The next iteration adds enough items to exceed the maximum allowed by 1. + const numNext = MAX_NOTE_HASH_READ_REQUESTS_PER_TX - numPending - numSettled + 1; + const numNextPending = randomInt(numNext); + const numNextSettled = numNext - numNextPending; + times(numNextPending, () => nextIteration.addPendingNoteHashReadRequest()); + times(numNextSettled, () => nextIteration.addSettledNoteHashReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'NOTE_HASH_PENDING_READ', numPending); + }); + + it('resets when mixed reads would overflow, settled reads are larger', async () => { + // Add at least 2 settled reads and 1 pending read. + const numSettled = randomInt(2, MAX_NOTE_HASH_READ_REQUESTS_PER_TX - 1); + // The number of pending reads is less than the settled reads. + const numPending = randomInt(1, Math.min(numSettled - 1, MAX_NOTE_HASH_READ_REQUESTS_PER_TX - numSettled)); + times(numPending, i => { + kernel.addNoteHash({ value: new Fr(i + 1) }); + kernel.addPendingNoteHashReadRequest({ value: new Fr(i + 1) }); + }); + times(numSettled, () => kernel.addSettledNoteHashReadRequest()); + + // The next iteration adds enough items to exceed the maximum allowed by 1. + const numNext = MAX_NOTE_HASH_READ_REQUESTS_PER_TX - numPending - numSettled + 1; + const numNextPending = randomInt(numNext); + const numNextSettled = numNext - numNextPending; + times(numNextPending, () => nextIteration.addPendingNoteHashReadRequest()); + times(numNextSettled, () => nextIteration.addSettledNoteHashReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'NOTE_HASH_SETTLED_READ', numSettled); + }); + + it('throws when pending reads without matching note hashes would overflow', () => { + // Pending reads for note hashes that haven't been emitted yet. + const numUnresolvableReads = randomInt(1, MAX_NOTE_HASH_READ_REQUESTS_PER_TX); + times(numUnresolvableReads, i => kernel.addPendingNoteHashReadRequest({ value: new Fr(i + 1) })); + + // The next iteration adds enough reads to exceed the maximum allowed by 1. + times(MAX_NOTE_HASH_READ_REQUESTS_PER_TX - numUnresolvableReads + 1, () => + nextIteration.addPendingNoteHashReadRequest(), + ); + + const builder = makeResetBuilder(); + expect(() => builder.needsReset()).toThrow('Number of note hash read requests exceeds the limit.'); + }); + }); + + describe('nullifier read requests', () => { + it('does not need reset when read requests fit', () => { + const numReads = randomInt(MAX_NULLIFIER_READ_REQUESTS_PER_TX); + times(numReads, () => kernel.addSettledNullifierReadRequest()); + + // The next iteration will add 0 or more reads to keep the total at or below MAX. + const numNext = randomInt(MAX_NULLIFIER_READ_REQUESTS_PER_TX - numReads); + times(numNext, () => nextIteration.addSettledNullifierReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + }); + + it('resets when pending reads would overflow', async () => { + const numReads = randomInt(1, MAX_NULLIFIER_READ_REQUESTS_PER_TX); + times(numReads, i => { + kernel.addNullifier({ value: new Fr(i + 1) }); + kernel.addPendingNullifierReadRequest({ value: new Fr(i + 1) }); + }); + + // The next iteration adds enough items to exceed the maximum allowed by 1. + times(MAX_NULLIFIER_READ_REQUESTS_PER_TX - numReads + 1, () => nextIteration.addPendingNullifierReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'NULLIFIER_PENDING_READ', numReads); + }); + + it('resets when settled reads would overflow', async () => { + const numReads = randomInt(1, MAX_NULLIFIER_READ_REQUESTS_PER_TX); + times(numReads, () => kernel.addSettledNullifierReadRequest()); + + // The next iteration adds enough items to exceed the maximum allowed by 1. + times(MAX_NULLIFIER_READ_REQUESTS_PER_TX - numReads + 1, () => nextIteration.addSettledNullifierReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'NULLIFIER_SETTLED_READ', numReads); + }); + + it('resets when mixed reads would overflow, pending reads are larger', async () => { + // Add at least 2 pending reads and 1 settled read. + const numPending = randomInt(2, MAX_NULLIFIER_READ_REQUESTS_PER_TX - 1); + // The number of settled reads is less than the pending reads. + const numSettled = randomInt(1, Math.min(numPending - 1, MAX_NULLIFIER_READ_REQUESTS_PER_TX - numPending)); + times(numPending, i => { + kernel.addNullifier({ value: new Fr(i + 1) }); + kernel.addPendingNullifierReadRequest({ value: new Fr(i + 1) }); + }); + times(numSettled, () => kernel.addSettledNullifierReadRequest()); + + // The next iteration adds enough items to exceed the maximum allowed by 1. + const numNext = MAX_NULLIFIER_READ_REQUESTS_PER_TX - numPending - numSettled + 1; + const numNextPending = randomInt(numNext); + const numNextSettled = numNext - numNextPending; + times(numNextPending, () => nextIteration.addPendingNullifierReadRequest()); + times(numNextSettled, () => nextIteration.addSettledNullifierReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + if (numPending >= numSettled) { + expectDimensions(builder, result.dimensions, 'NULLIFIER_PENDING_READ', numPending); + } else { + expectDimensions(builder, result.dimensions, 'NULLIFIER_SETTLED_READ', numSettled); + } + }); + + it('resets when mixed reads would overflow, settled reads are larger', async () => { + // Add at least 2 settled reads and 1 pending read. + const numSettled = randomInt(2, MAX_NULLIFIER_READ_REQUESTS_PER_TX - 1); + // The number of pending reads is less than the settled reads. + const numPending = randomInt(1, Math.min(numSettled - 1, MAX_NULLIFIER_READ_REQUESTS_PER_TX - numSettled)); + times(numPending, i => { + kernel.addNullifier({ value: new Fr(i + 1) }); + kernel.addPendingNullifierReadRequest({ value: new Fr(i + 1) }); + }); + times(numSettled, () => kernel.addSettledNullifierReadRequest()); + + // The next iteration adds enough items to exceed the maximum allowed by 1. + const numNext = MAX_NULLIFIER_READ_REQUESTS_PER_TX - numPending - numSettled + 1; + const numNextPending = randomInt(numNext); + const numNextSettled = numNext - numNextPending; + times(numNextPending, () => nextIteration.addPendingNullifierReadRequest()); + times(numNextSettled, () => nextIteration.addSettledNullifierReadRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + if (numPending >= numSettled) { + expectDimensions(builder, result.dimensions, 'NULLIFIER_PENDING_READ', numPending); + } else { + expectDimensions(builder, result.dimensions, 'NULLIFIER_SETTLED_READ', numSettled); + } + }); + + it('throws when pending reads without matching nullifiers would overflow', () => { + // Pending reads for nullifiers that haven't been emitted yet. + const numUnresolvableReads = randomInt(1, MAX_NULLIFIER_READ_REQUESTS_PER_TX); + times(numUnresolvableReads, i => kernel.addPendingNullifierReadRequest({ value: new Fr(i + 1) })); + + // The next iteration adds enough reads to exceed the maximum allowed by 1. + times(MAX_NULLIFIER_READ_REQUESTS_PER_TX - numUnresolvableReads + 1, () => + nextIteration.addPendingNullifierReadRequest(), + ); + + const builder = makeResetBuilder(); + expect(() => builder.needsReset()).toThrow('Number of nullifier read requests exceeds the limit.'); + }); + }); + + describe('key validation requests', () => { + it('does not need reset when key validation requests fit', () => { + const numRequests = randomInt(MAX_KEY_VALIDATION_REQUESTS_PER_TX); + times(numRequests, () => kernel.addKeyValidationRequest()); + + // The next iteration will add 0 or more requests to keep the total at or below MAX. + const numNext = randomInt(MAX_KEY_VALIDATION_REQUESTS_PER_TX - numRequests); + times(numNext, () => nextIteration.addKeyValidationRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + }); + + it('resets when key validation requests would overflow', async () => { + const numRequests = randomInt(1, MAX_KEY_VALIDATION_REQUESTS_PER_TX); + times(numRequests, () => kernel.addKeyValidationRequest()); + + // The next iteration adds enough items to exceed the maximum allowed by 1. + times(MAX_KEY_VALIDATION_REQUESTS_PER_TX - numRequests + 1, () => nextIteration.addKeyValidationRequest()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'KEY_VALIDATION', numRequests); + }); + }); + + describe('transient data', () => { + it('does not need reset when note hashes, nullifiers, and private logs will not overflow', () => { + const numNoteHashes = randomInt(MAX_NOTE_HASHES_PER_TX); + const numNullifiers = randomInt(MAX_NULLIFIERS_PER_TX); + const numPrivateLogs = randomInt(MAX_PRIVATE_LOGS_PER_TX); + const numSquashed = randomInt(Math.min(numNoteHashes, numNullifiers)); + times(numNoteHashes, i => { + kernel.addNoteHash({ value: new Fr(i + 1) }); + }); + times(numNullifiers, i => { + const noteHashCounter = i + 1; + const nullifierCounter = i + 1000; + const noteHash = i < numSquashed ? new Fr(i + 1) : Fr.ZERO; + kernel.addNullifier({ noteHash, counter: nullifierCounter }); + if (i < numSquashed) { + noteHashNullifierCounterMap.set(noteHashCounter, nullifierCounter); + } + }); + times(numPrivateLogs, i => { + const noteHashCounter = i < numSquashed ? i + 1 : 0; + kernel.addPrivateLog({ noteHashCounter }); + }); + + // The next iteration will add 0 or more items to keep the total at or below MAX. + times(randomInt(MAX_NOTE_HASHES_PER_TX - numNoteHashes), () => nextIteration.addNoteHash()); + times(randomInt(MAX_NULLIFIERS_PER_TX - numNullifiers), () => nextIteration.addNullifier()); + times(randomInt(MAX_PRIVATE_LOGS_PER_TX - numPrivateLogs), () => nextIteration.addPrivateLog()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(false); + }); + + it('resets when note hashes will overflow and transient data can be squashed', async () => { + const numNoteHashes = randomInt(1, MAX_NOTE_HASHES_PER_TX); + times(numNoteHashes, i => { + kernel.addNoteHash({ value: new Fr(i + 1), counter: i + 1 }); + }); + + // The next iteration adds enough note hashes to exceed the maximum allowed by 1. + const numNextNoteHashes = MAX_NOTE_HASHES_PER_TX - numNoteHashes + 1; + times(numNextNoteHashes, () => nextIteration.addNoteHash()); + + // Squash at least 1 note hash to prevent the next iteration from overflowing. + const numSquashed = randomInt(1, numNoteHashes); + + // Create at least `numSquashed` nullifier to be squashed. + const numNullifiers = randomInt(numSquashed, MAX_NULLIFIERS_PER_TX); + times(numNullifiers, i => { + const noteHashCounter = i + 1; + const nullifierCounter = i + 1000; + const noteHash = i < numSquashed ? new Fr(i + 1) : Fr.ZERO; + kernel.addNullifier({ noteHash, counter: nullifierCounter }); + if (i < numSquashed) { + noteHashNullifierCounterMap.set(noteHashCounter, nullifierCounter); + } + }); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'TRANSIENT_DATA_SQUASHING', numSquashed); + }); + + it('resets when nullifiers will overflow and transient data can be squashed', async () => { + const numNullifiers = randomInt(1, MAX_NULLIFIERS_PER_TX); + // The next iteration adds enough nullifiers to exceed the maximum allowed by 1. + const numNextNullifiers = MAX_NULLIFIERS_PER_TX - numNullifiers + 1; + // Squash at least 1 nullifier to prevent the next iteration from overflowing. + const numSquashed = randomInt(1, numNullifiers); + + // Create at least `numSquashed` note hash to be squashed. + const numNoteHashes = randomInt(numSquashed, MAX_NOTE_HASHES_PER_TX); + times(numNoteHashes, i => { + kernel.addNoteHash({ value: new Fr(i + 1), counter: i + 1 }); + }); + + times(numNullifiers, i => { + const noteHashCounter = i + 1; + const nullifierCounter = i + 1000; + const noteHash = i < numSquashed ? new Fr(i + 1) : Fr.ZERO; + kernel.addNullifier({ noteHash, counter: nullifierCounter }); + if (i < numSquashed) { + noteHashNullifierCounterMap.set(noteHashCounter, nullifierCounter); + } + }); + + times(numNextNullifiers, () => nextIteration.addNullifier()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'TRANSIENT_DATA_SQUASHING', numSquashed); + }); + + it('resets when private logs will overflow and transient data can be squashed', async () => { + const numLogs = randomInt(1, MAX_PRIVATE_LOGS_PER_TX); + // The next iteration adds enough logs to exceed the maximum allowed by 1. + const numNextLogs = MAX_PRIVATE_LOGS_PER_TX - numLogs + 1; + // Squash at least 1 log to prevent the next iteration from overflowing. + const numSquashed = randomInt(1, numLogs); + // Create at least `numSquashed` note hash and nullifier squashable pairs. + const numNoteHashes = randomInt(numSquashed, MAX_NOTE_HASHES_PER_TX); + const numNullifiers = randomInt(numSquashed, MAX_NULLIFIERS_PER_TX); + + times(numNoteHashes, i => { + kernel.addNoteHash({ value: new Fr(i + 1), counter: i + 1 }); + }); + + times(numLogs, i => { + const noteHashCounter = i < numSquashed ? i + 1 : 0; + kernel.addPrivateLog({ noteHashCounter }); + }); + + times(numNullifiers, i => { + const noteHashCounter = i + 1; + const nullifierCounter = i + 1000; + const noteHash = i < numSquashed ? new Fr(i + 1) : Fr.ZERO; + kernel.addNullifier({ noteHash, counter: nullifierCounter }); + if (i < numSquashed) { + noteHashNullifierCounterMap.set(noteHashCounter, nullifierCounter); + } + }); + + times(numNextLogs, () => nextIteration.addPrivateLog()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'TRANSIENT_DATA_SQUASHING', numSquashed); + }); + + it('resets note hash read requests when note hashes overflow by more than squashable transient data', async () => { + // 2 squashable pairs, but one is blocked by a read request. + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + + kernel.addNoteHash({ value: new Fr(3), counter: 3 }); + kernel.addNullifier({ noteHash: new Fr(3), counter: 4 }); + noteHashNullifierCounterMap.set(3, 4); + // A pending read request blocks squashing of the second pair. + kernel.addPendingNoteHashReadRequest({ value: new Fr(3) }); + + // Fill remaining note hashes so that the overflow is 2 (but only 1 pair can be squashed). + times(MAX_NOTE_HASHES_PER_TX - 2, () => kernel.addNoteHash()); + times(2, () => nextIteration.addNoteHash()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + // Must reset the read request to unblock more squashing in the next round. + expectDimensions(builder, result.dimensions, 'NOTE_HASH_PENDING_READ', 1); + }); + + it('resets nullifier read requests when nullifiers overflow by more than squashable transient data', async () => { + // 2 squashable pairs, but one is blocked by a nullifier read request. + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ value: new Fr(99), noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + + kernel.addNoteHash({ value: new Fr(3), counter: 3 }); + kernel.addNullifier({ value: new Fr(98), noteHash: new Fr(3), counter: 4 }); + noteHashNullifierCounterMap.set(3, 4); + // A pending read request blocks squashing of the second pair. + kernel.addPendingNullifierReadRequest({ value: new Fr(98) }); + + // Fill remaining nullifiers so that the overflow is 2 (but only 1 pair can be squashed). + times(MAX_NULLIFIERS_PER_TX - 2, () => kernel.addNullifier()); + times(2, () => nextIteration.addNullifier()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + // Must reset the read request to unblock more squashing in the next round. + expectDimensions(builder, result.dimensions, 'NULLIFIER_PENDING_READ', 1); + }); + + it('resets note hash read requests when private logs overflow and squashed logs are insufficient', async () => { + // 2 squashable pairs with linked logs, but one is blocked by a read request. + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + kernel.addPrivateLog({ noteHashCounter: 1 }); + + kernel.addNoteHash({ value: new Fr(3), counter: 3 }); + kernel.addNullifier({ noteHash: new Fr(3), counter: 4 }); + noteHashNullifierCounterMap.set(3, 4); + kernel.addPrivateLog({ noteHashCounter: 3 }); + // A pending read request blocks squashing of the second pair. + kernel.addPendingNoteHashReadRequest({ value: new Fr(3) }); + + // Fill remaining private logs so that the overflow is 2 (but only 1 log can be squashed). + times(MAX_PRIVATE_LOGS_PER_TX - 2, () => kernel.addPrivateLog()); + times(2, () => nextIteration.addPrivateLog()); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + // Must reset the read request to unblock more squashing in the next round. + expectDimensions(builder, result.dimensions, 'NOTE_HASH_PENDING_READ', 1); + }); + + it('resets note hash read requests first when note hashes overflow and transient data is blocked by reads', async () => { + // A note hash that could be squashed, but is being read. + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + // A pending read request for the note hash prevents squashing. + kernel.addPendingNoteHashReadRequest({ value: new Fr(1) }); + + // Fill remaining note hashes to cause overflow. + times(MAX_NOTE_HASHES_PER_TX - 1, () => kernel.addNoteHash()); + nextIteration.addNoteHash(); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + // Resets the note hash read request instead of transient data. + expectDimensions(builder, result.dimensions, 'NOTE_HASH_PENDING_READ', 1); + }); + + it('resets nullifier read requests first when nullifiers overflow and transient data is blocked by reads', async () => { + // A note hash and nullifier that could be squashed, but the nullifier is being read. + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ value: new Fr(99), noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + // A pending read request for the nullifier prevents squashing. + kernel.addPendingNullifierReadRequest({ value: new Fr(99) }); + + // Fill remaining nullifiers to cause overflow. + times(MAX_NULLIFIERS_PER_TX - 1, () => kernel.addNullifier()); + nextIteration.addNullifier(); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + // Resets the nullifier read request instead of transient data. + expectDimensions(builder, result.dimensions, 'NULLIFIER_PENDING_READ', 1); + }); + + it('resets note hash read requests first when private logs overflow and transient data is blocked by reads', async () => { + // A note hash with a linked log that could be squashed, but the note hash is being read. + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + kernel.addPrivateLog({ noteHashCounter: 1 }); + // A pending read request for the note hash prevents squashing. + kernel.addPendingNoteHashReadRequest({ value: new Fr(1) }); + + // Fill remaining private logs to cause overflow. + times(MAX_PRIVATE_LOGS_PER_TX - 1, () => kernel.addPrivateLog()); + nextIteration.addPrivateLog(); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + // Resets the note hash read request instead of transient data. + expectDimensions(builder, result.dimensions, 'NOTE_HASH_PENDING_READ', 1); + }); + + it('resets only one dimension when both note hashes and nullifiers overflow', async () => { + // A squashable pair blocked by a note hash read request. + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ value: new Fr(99), noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + kernel.addPendingNoteHashReadRequest({ value: new Fr(1) }); + // A squashable pair blocked by a nullifier read request. + kernel.addNoteHash({ value: new Fr(3), counter: 3 }); + kernel.addNullifier({ value: new Fr(98), noteHash: new Fr(3), counter: 4 }); + noteHashNullifierCounterMap.set(3, 4); + kernel.addPendingNullifierReadRequest({ value: new Fr(98) }); + + // Fill remaining to cause both note hash and nullifier overflow. + times(MAX_NOTE_HASHES_PER_TX - 2, () => kernel.addNoteHash()); + times(MAX_NULLIFIERS_PER_TX - 2, () => kernel.addNullifier()); + nextIteration.addNoteHash(); + nextIteration.addNullifier(); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + // Only one dimension should be reset for an inner reset -> note hash read request is reset first. + expectDimensions(builder, result.dimensions, 'NOTE_HASH_PENDING_READ', 1); + }); + + it('does not squash note hash when a future log in the execution stack is linked to it', () => { + // A squashable pair: note hash (counter 1) <> nullifier (counter 2). + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + + // Fill remaining note hashes to cause overflow. + times(MAX_NOTE_HASHES_PER_TX - 1, () => kernel.addNoteHash()); + nextIteration.addNoteHash(); + + // A future log linked to the note hash prevents squashing. + nextIteration.addPrivateLog({ noteHashCounter: 1 }); + + const builder = makeResetBuilder(); + // Without the future log, the pair would be squashed and the overflow resolved. + // With it, squashing is blocked and the overflow cannot be resolved. + expect(() => builder.needsReset()).toThrow('Number of note hashes exceeds the limit.'); + }); + + it('squashes note hash when future log is linked to a different note hash', async () => { + // A squashable pair: note hash (counter 1) <> nullifier (counter 2). + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + + // Fill remaining note hashes to cause overflow. + times(MAX_NOTE_HASHES_PER_TX - 1, () => kernel.addNoteHash()); + nextIteration.addNoteHash(); + + // A future log linked to a different note hash (counter 999) does not block squashing. + nextIteration.addPrivateLog({ noteHashCounter: 999 }); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'TRANSIENT_DATA_SQUASHING', 1); + }); + + it('squashes note hash when future log has noteHashCounter of 0', async () => { + // A squashable pair: note hash (counter 1) <> nullifier (counter 2). + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + + // Fill remaining note hashes to cause overflow. + times(MAX_NOTE_HASHES_PER_TX - 1, () => kernel.addNoteHash()); + nextIteration.addNoteHash(); + + // A future log with noteHashCounter = 0 is not linked to any note hash. + nextIteration.addPrivateLog({ noteHashCounter: 0 }); + + const builder = makeResetBuilder(); + expect(builder.needsReset()).toBe(true); + + const result = await builder.build(oracle); + expectDimensions(builder, result.dimensions, 'TRANSIENT_DATA_SQUASHING', 1); + }); + + it('does not squash note hash when a future log from a previous iteration is linked to it', () => { + // A squashable pair: note hash (counter 1) <> nullifier (counter 2). + kernel.addNoteHash({ value: new Fr(1), counter: 1 }); + kernel.addNullifier({ noteHash: new Fr(1), counter: 2 }); + noteHashNullifierCounterMap.set(1, 2); + + // Fill remaining note hashes to cause overflow. + times(MAX_NOTE_HASHES_PER_TX - 1, () => kernel.addNoteHash()); + nextIteration.addNoteHash(); + + // A future log from a previous (unprocessed) iteration linked to the note hash. + const prevIteration = new PrivateCircuitPublicInputsBuilder(); + prevIteration.addPrivateLog({ noteHashCounter: 1 }); + previousIterations.push(prevIteration); + + const builder = makeResetBuilder(); + expect(() => builder.needsReset()).toThrow('Number of note hashes exceeds the limit.'); + }); + + it('throws when note hashes overflow and cannot be squashed', () => { + for (let i = 0; i < MAX_NOTE_HASHES_PER_TX; i++) { + kernel.addNoteHash({ value: new Fr(i + 1), counter: (i + 1) * 10 }); + } + + nextIteration.addNoteHash(); + + const builder = makeResetBuilder(); + expect(() => builder.needsReset()).toThrow('Number of note hashes exceeds the limit.'); + }); + + it('throws when nullifiers overflow and cannot be resolved', () => { + for (let i = 0; i < MAX_NULLIFIERS_PER_TX; i++) { + kernel.addNullifier({ value: new Fr(i + 1), counter: (i + 1) * 10 }); + } + + nextIteration.addNullifier(); + + const builder = makeResetBuilder(); + expect(() => builder.needsReset()).toThrow('Number of nullifiers exceeds the limit.'); + }); + + it('throws when private logs overflow and cannot be resolved', () => { + for (let i = 0; i < MAX_PRIVATE_LOGS_PER_TX; i++) { + kernel.addPrivateLog({ counter: (i + 1) * 10 }); + } + + nextIteration.addPrivateLog(); + + const builder = makeResetBuilder(); + expect(() => builder.needsReset()).toThrow('Number of private logs exceeds the limit.'); + }); + }); + + describe('siloing', () => { + it('does not check siloing', () => { + kernel.addNoteHash().addNullifier().addPrivateLog(); + + const builder = makeResetBuilder(); + // Nothing overflows, and siloing is not required for inner resets. + expect(builder.needsReset()).toBe(false); + }); + }); + }); +}); diff --git a/yarn-project/pxe/src/private_kernel/hints/private_kernel_reset_private_inputs_builder.ts b/yarn-project/pxe/src/private_kernel/hints/private_kernel_reset_private_inputs_builder.ts index 5d8eb4f4a4a7..9b4c5542d145 100644 --- a/yarn-project/pxe/src/private_kernel/hints/private_kernel_reset_private_inputs_builder.ts +++ b/yarn-project/pxe/src/private_kernel/hints/private_kernel_reset_private_inputs_builder.ts @@ -10,7 +10,7 @@ import { import { makeTuple } from '@aztec/foundation/array'; import { padArrayEnd } from '@aztec/foundation/collection'; import type { Fr } from '@aztec/foundation/curves/bn254'; -import { type Tuple, assertLength } from '@aztec/foundation/serialize'; +import { assertLength } from '@aztec/foundation/serialize'; import { MembershipWitness } from '@aztec/foundation/trees'; import { privateKernelResetDimensionsConfig } from '@aztec/noir-protocol-circuits-types/client'; import { @@ -27,13 +27,11 @@ import { ReadRequestActionEnum, ReadRequestResetActions, type ScopedKeyValidationRequestAndSeparator, - ScopedNoteHash, - ScopedNullifier, - ScopedReadRequest, TransientDataSquashingHint, buildNoteHashReadRequestHintsFromResetActions, buildNullifierReadRequestHintsFromResetActions, buildTransientDataHints, + countSquashedLogs, findPrivateKernelResetDimensions, getNoteHashReadRequestResetActions, getNullifierReadRequestResetActions, @@ -44,15 +42,6 @@ import { VkData } from '@aztec/stdlib/vks'; import type { PrivateKernelOracle } from '../private_kernel_oracle.js'; -function collectNestedReadRequests( - executionStack: PrivateCallExecutionResult[], - extractReadRequests: (execution: PrivateCallExecutionResult) => ClaimedLengthArray, -): ScopedReadRequest[] { - return collectNested(executionStack, executionResult => { - return extractReadRequests(executionResult).getActiveItems(); - }); -} - function getNullifierMembershipWitnessResolver(oracle: PrivateKernelOracle) { return async (nullifier: Fr) => { const res = await oracle.getNullifierMembershipWitness(nullifier); @@ -91,11 +80,14 @@ export class PrivateKernelResetPrivateInputsBuilder { // If there's no next iteration, it's the final reset. private nextIteration?: PrivateCircuitPublicInputs; - private noteHashResetActions: ReadRequestResetActions; - private nullifierResetActions: ReadRequestResetActions; + private noteHashResetActions = ReadRequestResetActions.empty(MAX_NOTE_HASH_READ_REQUESTS_PER_TX); + private nullifierResetActions = ReadRequestResetActions.empty(MAX_NULLIFIER_READ_REQUESTS_PER_TX); private numTransientData?: number; - private transientDataSquashingHints: Tuple; - private requestedDimensions: PrivateKernelResetDimensions; + private transientDataSquashingHints = makeTuple( + MAX_NULLIFIERS_PER_TX, + () => new TransientDataSquashingHint(MAX_NULLIFIERS_PER_TX, MAX_NOTE_HASHES_PER_TX), + ); + private requestedDimensions = PrivateKernelResetDimensions.empty(); constructor( private previousKernelOutput: PrivateKernelSimulateOutput, @@ -104,21 +96,18 @@ export class PrivateKernelResetPrivateInputsBuilder { private splitCounter: number, ) { this.previousKernel = previousKernelOutput.publicInputs; - this.requestedDimensions = PrivateKernelResetDimensions.empty(); - this.noteHashResetActions = ReadRequestResetActions.empty(MAX_NOTE_HASH_READ_REQUESTS_PER_TX); - this.nullifierResetActions = ReadRequestResetActions.empty(MAX_NULLIFIER_READ_REQUESTS_PER_TX); - this.transientDataSquashingHints = makeTuple( - MAX_NULLIFIERS_PER_TX, - () => new TransientDataSquashingHint(MAX_NULLIFIERS_PER_TX, MAX_NOTE_HASHES_PER_TX), - ); this.nextIteration = executionStack[this.executionStack.length - 1]?.publicInputs; } + getRequestedDimensions(): PrivateKernelResetDimensions { + return this.requestedDimensions; + } + needsReset(): boolean { const fns: (() => boolean)[] = [ () => this.needsResetNoteHashReadRequests(), () => this.needsResetNullifierReadRequests(), - () => this.needsResetNullifierKeys(), + () => this.needsResetKeyValidationRequests(), () => this.needsResetTransientData(), ]; @@ -145,8 +134,7 @@ export class PrivateKernelResetPrivateInputsBuilder { const isInner = !!this.nextIteration; - // "final" reset must be done at most once. - // Because the code that silo note hashes can't be run repeatedly. + // "final" reset must be done exactly once, because siloing can't be run repeatedly. // The dimensions found must be big enough to reset all values, i.e. empty remainder. const allowRemainder = isInner; @@ -246,24 +234,17 @@ export class PrivateKernelResetPrivateInputsBuilder { resetActions.pendingReadHints = resetActions.pendingReadHints.slice(0, maxPending); } - private needsResetNoteHashReadRequests(forceResetAll = false) { + private needsResetNoteHashReadRequests(forceReset = false) { const numCurr = this.previousKernel.validationRequests.noteHashReadRequests.claimedLength; const numNext = this.nextIteration ? this.nextIteration.noteHashReadRequests.claimedLength : 0; - const maxAmountToKeep = !this.nextIteration || forceResetAll ? 0 : MAX_NOTE_HASH_READ_REQUESTS_PER_TX; + const maxAmountToKeep = !this.nextIteration || forceReset ? 0 : MAX_NOTE_HASH_READ_REQUESTS_PER_TX; if (numCurr + numNext <= maxAmountToKeep) { return false; } - const futureNoteHashes = collectNested(this.executionStack, executionResult => { - return executionResult.publicInputs.noteHashes - .getActiveItems() - .map(noteHash => new ScopedNoteHash(noteHash, executionResult.publicInputs.callContext.contractAddress)); - }); - const resetActions = getNoteHashReadRequestResetActions( this.previousKernel.validationRequests.noteHashReadRequests, this.previousKernel.end.noteHashes, - futureNoteHashes, ); const numPendingReads = resetActions.pendingReadHints.length; @@ -272,53 +253,72 @@ export class PrivateKernelResetPrivateInputsBuilder { 0, ); + const totalReadsToReset = numPendingReads + numSettledReads; + const minResetNeeded = numCurr + numNext - maxAmountToKeep; + if (totalReadsToReset < minResetNeeded) { + if (!this.nextIteration) { + // In the final reset, all note hashes have been emitted. So if we can't reset all requests, at least one + // pending read request doesn't match any of them. + throw new Error('No matching note hash found for note hash read request.'); + } else if (!forceReset) { + // A pending read request can only be reset if its note hash has already been included (e.g. a parent call might + // be reading a note hash emitted by a child call. The read request of the parent call is included before the note + // hash of the child call). + // If we can't clear enough read requests to make room for the next iteration's reads, we're stuck. + throw new Error('Number of note hash read requests exceeds the limit.'); + } else if (totalReadsToReset == 0) { + // It's transient data squashing asking for the read requests to be reset first (forceReset == true), and + // there's nothing to reset, returns false and let needsResetTransientData throw a more descriptive error. + return false; + } + // Otherwise, forceReset is true, we should proceed to reset as many as we can. + } + if (!this.nextIteration) { + // If there's no next iteration, we need to reset all the read requests. this.noteHashResetActions = resetActions; this.requestedDimensions.NOTE_HASH_PENDING_READ = numPendingReads; this.requestedDimensions.NOTE_HASH_SETTLED_READ = numSettledReads; } else { - // Pick only one dimension to reset if next iteration is not empty. + // If there's a next iteration, only one dimension can be reset at a time. + // So we pick the dimension that has more read requests to reset. if (numPendingReads > numSettledReads) { - this.requestedDimensions.NOTE_HASH_PENDING_READ = numPendingReads; - this.noteHashResetActions.actions = assertLength( + // Reset the pending read requests. + const pendingOnlyActions = assertLength( resetActions.actions.map(action => action === ReadRequestActionEnum.READ_AS_PENDING ? action : ReadRequestActionEnum.SKIP, ), MAX_NOTE_HASH_READ_REQUESTS_PER_TX, ); - this.noteHashResetActions.pendingReadHints = resetActions.pendingReadHints; + this.noteHashResetActions = new ReadRequestResetActions(pendingOnlyActions, resetActions.pendingReadHints); + this.requestedDimensions.NOTE_HASH_PENDING_READ = numPendingReads; } else { - this.requestedDimensions.NOTE_HASH_SETTLED_READ = numSettledReads; - this.noteHashResetActions.actions = assertLength( + // Reset the settled read requests. + const settledOnlyActions = assertLength( resetActions.actions.map(action => action === ReadRequestActionEnum.READ_AS_SETTLED ? action : ReadRequestActionEnum.SKIP, ), MAX_NOTE_HASH_READ_REQUESTS_PER_TX, ); + this.noteHashResetActions = new ReadRequestResetActions(settledOnlyActions, []); + this.requestedDimensions.NOTE_HASH_SETTLED_READ = numSettledReads; } } return true; } - private needsResetNullifierReadRequests(forceResetAll = false) { + private needsResetNullifierReadRequests(forceReset = false) { const numCurr = this.previousKernel.validationRequests.nullifierReadRequests.claimedLength; const numNext = this.nextIteration ? this.nextIteration.nullifierReadRequests.claimedLength : 0; - const maxAmountToKeep = !this.nextIteration || forceResetAll ? 0 : MAX_NULLIFIER_READ_REQUESTS_PER_TX; + const maxAmountToKeep = !this.nextIteration || forceReset ? 0 : MAX_NULLIFIER_READ_REQUESTS_PER_TX; if (numCurr + numNext <= maxAmountToKeep) { return false; } - const futureNullifiers = collectNested(this.executionStack, executionResult => { - return executionResult.publicInputs.nullifiers - .getActiveItems() - .map(nullifier => new ScopedNullifier(nullifier, executionResult.publicInputs.callContext.contractAddress)); - }); - const resetActions = getNullifierReadRequestResetActions( this.previousKernel.validationRequests.nullifierReadRequests, this.previousKernel.end.nullifiers, - futureNullifiers, ); const numPendingReads = resetActions.pendingReadHints.length; @@ -327,36 +327,61 @@ export class PrivateKernelResetPrivateInputsBuilder { 0, ); + const totalReadsToReset = numPendingReads + numSettledReads; + const minResetNeeded = numCurr + numNext - maxAmountToKeep; + if (totalReadsToReset < minResetNeeded) { + if (!this.nextIteration) { + // In the final reset, all nullifiers have been emitted. So if we can't reset all requests, at least one pending + // read request doesn't match any of them. + throw new Error('No matching nullifier found for nullifier read request.'); + } else if (!forceReset) { + // A pending read request can only be reset if its nullifier has already been included (e.g. a parent call might + // be reading a nullifier emitted by a child call. The read request of the parent call is included before the + // nullifier of the child call). + // If we can't clear enough read requests to make room for the next iteration's reads, we're stuck. + throw new Error('Number of nullifier read requests exceeds the limit.'); + } else if (totalReadsToReset == 0) { + // It's transient data squashing asking for the read requests to be reset first (forceReset == true), and + // there's nothing to reset, returns false and let needsResetTransientData throw a more descriptive error. + return false; + } + // Otherwise, forceReset is true, we should proceed to reset as many as we can. + } + if (!this.nextIteration) { + // If there's no next iteration, we need to reset all the read requests. this.nullifierResetActions = resetActions; this.requestedDimensions.NULLIFIER_PENDING_READ = numPendingReads; this.requestedDimensions.NULLIFIER_SETTLED_READ = numSettledReads; } else { - // Pick only one dimension to reset if next iteration is not empty. + // If there's a next iteration, we can only reset one dimension at a time. if (numPendingReads > numSettledReads) { - this.requestedDimensions.NULLIFIER_PENDING_READ = numPendingReads; - this.nullifierResetActions.actions = assertLength( + // Reset the pending read requests. + const pendingOnlyActions = assertLength( resetActions.actions.map(action => action === ReadRequestActionEnum.READ_AS_PENDING ? action : ReadRequestActionEnum.SKIP, ), MAX_NULLIFIER_READ_REQUESTS_PER_TX, ); - this.nullifierResetActions.pendingReadHints = resetActions.pendingReadHints; + this.nullifierResetActions = new ReadRequestResetActions(pendingOnlyActions, resetActions.pendingReadHints); + this.requestedDimensions.NULLIFIER_PENDING_READ = numPendingReads; } else { - this.requestedDimensions.NULLIFIER_SETTLED_READ = numSettledReads; - this.nullifierResetActions.actions = assertLength( + // Reset the settled read requests. + const settledOnlyActions = assertLength( resetActions.actions.map(action => action === ReadRequestActionEnum.READ_AS_SETTLED ? action : ReadRequestActionEnum.SKIP, ), MAX_NULLIFIER_READ_REQUESTS_PER_TX, ); + this.nullifierResetActions = new ReadRequestResetActions(settledOnlyActions, []); + this.requestedDimensions.NULLIFIER_SETTLED_READ = numSettledReads; } } return true; } - private needsResetNullifierKeys() { + private needsResetKeyValidationRequests() { const numCurr = this.previousKernel.validationRequests.scopedKeyValidationRequestsAndSeparators.claimedLength; const numNext = this.nextIteration ? this.nextIteration.keyValidationRequestsAndSeparators.claimedLength : 0; const maxAmountToKeep = !this.nextIteration ? 0 : MAX_KEY_VALIDATION_REQUESTS_PER_TX; @@ -370,9 +395,6 @@ export class PrivateKernelResetPrivateInputsBuilder { } private needsResetTransientData() { - // Initialize this to 0 so that needsSilo can be run. - this.numTransientData = 0; - const nextAccumNoteHashes = this.previousKernel.end.noteHashes.claimedLength + (this.nextIteration?.noteHashes.claimedLength ?? 0); const noteHashWillOverflow = nextAccumNoteHashes > MAX_NOTE_HASHES_PER_TX; @@ -387,19 +409,19 @@ export class PrivateKernelResetPrivateInputsBuilder { return false; } - const futureNoteHashReads = collectNestedReadRequests( - this.executionStack, - executionResult => executionResult.publicInputs.noteHashReadRequests, + const futureNoteHashReads = collectNested(this.executionStack, executionResult => + executionResult.publicInputs.noteHashReadRequests.getActiveItems(), + ); + const futureNullifierReads = collectNested(this.executionStack, executionResult => + executionResult.publicInputs.nullifierReadRequests.getActiveItems(), ); - const futureNullifierReads = collectNestedReadRequests( - this.executionStack, - executionResult => executionResult.publicInputs.nullifierReadRequests, + const futureLogs = collectNested(this.executionStack, executionResult => + executionResult.publicInputs.privateLogs.getActiveItems(), ); - // TODO(#15902): Collect future logs and only allow squashing a note hash when all its logs have been emitted - // (i.e. none of the future logs are linked to the to-be-squashed note hashes). if (this.nextIteration) { - // If it's not the final reset, only one dimension will be reset at a time. - // The note hashes and nullifiers for the remaining read requests can't be squashed. + // If it's not the final reset, only one dimension will be reset at a time. Since we are resetting the transient + // data, the note hash and nullifier read requests in the previous kernel won't be squashed and need to be + // included in the future read requests. futureNoteHashReads.push(...this.previousKernel.validationRequests.noteHashReadRequests.getActiveItems()); futureNullifierReads.push(...this.previousKernel.validationRequests.nullifierReadRequests.getActiveItems()); } @@ -409,27 +431,50 @@ export class PrivateKernelResetPrivateInputsBuilder { this.previousKernel.end.nullifiers, futureNoteHashReads, futureNullifierReads, + futureLogs, this.noteHashNullifierCounterMap, this.splitCounter, ); - if (this.nextIteration && !numTransientData) { - const forceResetAll = true; - const canClearReadRequests = - (noteHashWillOverflow && this.needsResetNoteHashReadRequests(forceResetAll)) || - (nullifierWillOverflow && this.needsResetNullifierReadRequests(forceResetAll)) || - (logsWillOverflow && this.needsResetNoteHashReadRequests(forceResetAll)); - if (!canClearReadRequests) { - const overflownData = noteHashWillOverflow - ? 'note hashes' - : nullifierWillOverflow - ? 'nullifiers' - : 'private logs'; - throw new Error(`Number of ${overflownData} exceeds the limit.`); + if (this.nextIteration) { + const noteHashOverflowBy = noteHashWillOverflow + ? nextAccumNoteHashes - MAX_NOTE_HASHES_PER_TX - numTransientData + : 0; + const nullifierOverflowBy = nullifierWillOverflow + ? nextAccumNullifiers - MAX_NULLIFIERS_PER_TX - numTransientData + : 0; + const numSquashedLogs = logsWillOverflow + ? countSquashedLogs( + this.previousKernel.end.noteHashes, + this.previousKernel.end.privateLogs, + transientDataSquashingHints.slice(0, numTransientData), + ) + : 0; + const logsOverflowBy = logsWillOverflow ? nextAccumLogs - MAX_PRIVATE_LOGS_PER_TX - numSquashedLogs : 0; + + if (noteHashOverflowBy > 0 || nullifierOverflowBy > 0 || logsOverflowBy > 0) { + // There's not enough transient data to squash to clear space for the overflow. It may be because some data is + // still required for read requests. Force a reset of the read requests first, and return to transient data + // squashing in the next round of reset. + // Note that clearing the read requests might not be enough to clear more space for the overflow. In this case, + // running the next reset will fail at the following check. + // Only one dimension can be reset at a time for an inner reset, so we try the note hash read requests first + // (which also helps with log overflow), then fall back to nullifier read requests. + const forceReset = true; + if ((noteHashOverflowBy > 0 || logsOverflowBy > 0) && this.needsResetNoteHashReadRequests(forceReset)) { + return true; + } + if (nullifierOverflowBy > 0 && this.needsResetNullifierReadRequests(forceReset)) { + return true; + } + if (noteHashWillOverflow) { + throw new Error('Number of note hashes exceeds the limit.'); + } + if (nullifierWillOverflow) { + throw new Error('Number of nullifiers exceeds the limit.'); + } + throw new Error('Number of private logs exceeds the limit.'); } - // Clearing the read requests might not be enough to squash the overflown data. - // In this case, the next iteration will fail at the above check. - return true; } this.numTransientData = numTransientData; @@ -444,10 +489,13 @@ export class PrivateKernelResetPrivateInputsBuilder { throw new Error('`needsResetTransientData` must be run before `needsSiloNoteHashes`.'); } - const numNoteHashes = this.previousKernel.end.noteHashes - .getActiveItems() - .filter(n => !n.contractAddress.isZero()).length; - const numToSilo = Math.max(0, numNoteHashes - this.numTransientData); + const noteHashes = this.previousKernel.end.noteHashes; + if (noteHashes.claimedLength > 0 && noteHashes.array[0].contractAddress.isZero()) { + // Already siloed. + return false; + } + + const numToSilo = noteHashes.claimedLength - this.numTransientData; this.requestedDimensions.NOTE_HASH_SILOING = numToSilo; return numToSilo > 0; @@ -458,15 +506,14 @@ export class PrivateKernelResetPrivateInputsBuilder { throw new Error('`needsResetTransientData` must be run before `needsSiloNullifiers`.'); } - const numNullifiers = this.previousKernel.end.nullifiers - .getActiveItems() - .filter(n => !n.contractAddress.isZero()).length; - const numToSilo = Math.max(0, numNullifiers - this.numTransientData); - // Include the first nullifier if there's something to silo. - // The reset circuit checks that capped_size must be greater than or equal to all non-empty nullifiers. - // Which includes the first nullifier, even though its contract address is always zero and doesn't need siloing. - const cappedSize = numToSilo ? numToSilo + 1 : 0; - this.requestedDimensions.NULLIFIER_SILOING = cappedSize; + const nullifiers = this.previousKernel.end.nullifiers; + if (nullifiers.claimedLength > 0 && nullifiers.array[0].contractAddress.isZero()) { + // Already siloed. + return false; + } + + const numToSilo = nullifiers.claimedLength - this.numTransientData; + this.requestedDimensions.NULLIFIER_SILOING = numToSilo; return numToSilo > 0; } @@ -477,17 +524,17 @@ export class PrivateKernelResetPrivateInputsBuilder { } const privateLogs = this.previousKernel.end.privateLogs; - const numLogs = privateLogs.getActiveItems().filter(l => !l.contractAddress.isZero()).length; + if (privateLogs.claimedLength > 0 && privateLogs.array[0].contractAddress.isZero()) { + // Already siloed. + return false; + } - const noteHashes = this.previousKernel.end.noteHashes; - const squashedNoteHashCounters = this.transientDataSquashingHints - .filter(h => h.noteHashIndex < noteHashes.claimedLength) - .map(h => noteHashes.array[h.noteHashIndex].counter); - const numSquashedLogs = privateLogs - .getActiveItems() - .filter(l => squashedNoteHashCounters.includes(l.inner.noteHashCounter)).length; - - const numToSilo = numLogs - numSquashedLogs; + const numSquashedLogs = countSquashedLogs( + this.previousKernel.end.noteHashes, + privateLogs, + this.transientDataSquashingHints.slice(0, this.numTransientData), + ); + const numToSilo = privateLogs.claimedLength - numSquashedLogs; this.requestedDimensions.PRIVATE_LOG_SILOING = numToSilo; return numToSilo > 0; diff --git a/yarn-project/pxe/src/private_kernel/hints/test_utils.ts b/yarn-project/pxe/src/private_kernel/hints/test_utils.ts new file mode 100644 index 000000000000..be5bf7a66f7c --- /dev/null +++ b/yarn-project/pxe/src/private_kernel/hints/test_utils.ts @@ -0,0 +1,325 @@ +import { + MAX_KEY_VALIDATION_REQUESTS_PER_CALL, + MAX_KEY_VALIDATION_REQUESTS_PER_TX, + MAX_NOTE_HASHES_PER_CALL, + MAX_NOTE_HASHES_PER_TX, + MAX_NOTE_HASH_READ_REQUESTS_PER_CALL, + MAX_NOTE_HASH_READ_REQUESTS_PER_TX, + MAX_NULLIFIERS_PER_CALL, + MAX_NULLIFIERS_PER_TX, + MAX_NULLIFIER_READ_REQUESTS_PER_CALL, + MAX_NULLIFIER_READ_REQUESTS_PER_TX, + MAX_PRIVATE_LOGS_PER_CALL, + MAX_PRIVATE_LOGS_PER_TX, +} from '@aztec/constants'; +import { makeTuple } from '@aztec/foundation/array'; +import { Fr } from '@aztec/foundation/curves/bn254'; +import { Point } from '@aztec/foundation/curves/grumpkin'; +import type { Serializable } from '@aztec/foundation/serialize'; +import { AztecAddress } from '@aztec/stdlib/aztec-address'; +import { + ClaimedLengthArray, + KeyValidationRequest, + KeyValidationRequestAndSeparator, + NoteHash, + Nullifier, + PrivateCircuitPublicInputs, + PrivateKernelCircuitPublicInputs, + type PrivateKernelSimulateOutput, + ReadRequest, + ScopedKeyValidationRequestAndSeparator, + ScopedNoteHash, + ScopedNullifier, + ScopedReadRequest, +} from '@aztec/stdlib/kernel'; +import { PrivateLogData, ScopedPrivateLogData } from '@aztec/stdlib/kernel'; +import { PrivateLog } from '@aztec/stdlib/logs'; +import { PrivateCallExecutionResult } from '@aztec/stdlib/tx'; +import { VerificationKeyData } from '@aztec/stdlib/vks'; + +const DEFAULT_CONTRACT_ADDRESS = AztecAddress.fromBigInt(987654n); + +/** + * Builds a ClaimedLengthArray from a list of items, padding to the required size. + */ +function makeClaimed(items: T[], emptyFactory: { empty(): T }, maxSize: N) { + const padded = makeTuple(maxSize, i => items[i] ?? emptyFactory.empty()); + return new ClaimedLengthArray(padded, items.length); +} + +/** Builder for PrivateKernelCircuitPublicInputs with fluent API for adding side effects. */ +export class PrivateKernelCircuitPublicInputsBuilder { + private noteHashes: ScopedNoteHash[] = []; + private nullifiers: ScopedNullifier[] = []; + private noteHashReadRequests: ScopedReadRequest[] = []; + private nullifierReadRequests: ScopedReadRequest[] = []; + private keyValidationRequests: ScopedKeyValidationRequestAndSeparator[] = []; + private privateLogs: ScopedPrivateLogData[] = []; + private nextCounter: number; + + constructor( + private contractAddress: AztecAddress = DEFAULT_CONTRACT_ADDRESS, + startCounter = 1, + ) { + this.nextCounter = startCounter; + } + + private getCounter(sideEffectCounter?: number): number { + if (sideEffectCounter !== undefined) { + this.nextCounter = sideEffectCounter + 1; + return sideEffectCounter; + } + return this.nextCounter++; + } + + /** Adds a note hash to the accumulated data. Defaults are generated randomly. */ + addNoteHash(opts?: { value?: Fr; counter?: number; contractAddress?: AztecAddress }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + const addr = opts?.contractAddress ?? this.contractAddress; + this.noteHashes.push(new NoteHash(value, counter).scope(addr)); + return this; + } + + /** Adds a nullifier to the accumulated data. Defaults are generated randomly. */ + addNullifier(opts?: { value?: Fr; noteHash?: Fr; counter?: number; contractAddress?: AztecAddress }): this { + const value = opts?.value ?? Fr.random(); + const noteHash = opts?.noteHash ?? Fr.ZERO; + const counter = this.getCounter(opts?.counter); + const addr = opts?.contractAddress ?? this.contractAddress; + this.nullifiers.push(new Nullifier(value, noteHash, counter).scope(addr)); + return this; + } + + /** Adds a pending note hash read request (non-empty contract address, can match a pending note hash). */ + addPendingNoteHashReadRequest(opts?: { value?: Fr; counter?: number; contractAddress?: AztecAddress }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + const addr = opts?.contractAddress ?? this.contractAddress; + this.noteHashReadRequests.push(new ScopedReadRequest(new ReadRequest(value, counter), addr)); + return this; + } + + /** Adds a settled note hash read request (empty contract address, resolved against the note hash tree). */ + addSettledNoteHashReadRequest(opts?: { value?: Fr; counter?: number }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + this.noteHashReadRequests.push(new ScopedReadRequest(new ReadRequest(value, counter), AztecAddress.ZERO)); + return this; + } + + /** Adds a pending nullifier read request (non-empty contract address, can match a pending nullifier). */ + addPendingNullifierReadRequest(opts?: { value?: Fr; counter?: number; contractAddress?: AztecAddress }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + const addr = opts?.contractAddress ?? this.contractAddress; + this.nullifierReadRequests.push(new ScopedReadRequest(new ReadRequest(value, counter), addr)); + return this; + } + + /** Adds a settled nullifier read request (empty contract address, resolved against the nullifier tree). */ + addSettledNullifierReadRequest(opts?: { value?: Fr; counter?: number }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + this.nullifierReadRequests.push(new ScopedReadRequest(new ReadRequest(value, counter), AztecAddress.ZERO)); + return this; + } + + /** Adds a key validation request to validation requests. */ + addKeyValidationRequest(opts?: { contractAddress?: AztecAddress }): this { + const addr = opts?.contractAddress ?? this.contractAddress; + this.keyValidationRequests.push( + new ScopedKeyValidationRequestAndSeparator( + new KeyValidationRequestAndSeparator( + new KeyValidationRequest(new Point(Fr.random(), Fr.random(), false), Fr.random()), + Fr.random(), + ), + addr, + ), + ); + return this; + } + + /** Adds a private log to the accumulated data. Defaults are generated randomly. */ + addPrivateLog(opts?: { noteHashCounter?: number; counter?: number; contractAddress?: AztecAddress }): this { + const noteHashCounter = opts?.noteHashCounter ?? 0; + const counter = this.getCounter(opts?.counter); + const addr = opts?.contractAddress ?? this.contractAddress; + this.privateLogs.push( + new ScopedPrivateLogData(new PrivateLogData(PrivateLog.empty(), noteHashCounter, counter), addr), + ); + return this; + } + + /** Builds the PrivateKernelCircuitPublicInputs with all added side effects. */ + build(): PrivateKernelCircuitPublicInputs { + const publicInputs = PrivateKernelCircuitPublicInputs.empty(); + publicInputs.end.noteHashes = makeClaimed(this.noteHashes, ScopedNoteHash, MAX_NOTE_HASHES_PER_TX); + publicInputs.end.nullifiers = makeClaimed(this.nullifiers, ScopedNullifier, MAX_NULLIFIERS_PER_TX); + publicInputs.end.privateLogs = makeClaimed(this.privateLogs, ScopedPrivateLogData, MAX_PRIVATE_LOGS_PER_TX); + publicInputs.validationRequests.noteHashReadRequests = makeClaimed( + this.noteHashReadRequests, + ScopedReadRequest, + MAX_NOTE_HASH_READ_REQUESTS_PER_TX, + ); + publicInputs.validationRequests.nullifierReadRequests = makeClaimed( + this.nullifierReadRequests, + ScopedReadRequest, + MAX_NULLIFIER_READ_REQUESTS_PER_TX, + ); + publicInputs.validationRequests.scopedKeyValidationRequestsAndSeparators = makeClaimed( + this.keyValidationRequests, + ScopedKeyValidationRequestAndSeparator, + MAX_KEY_VALIDATION_REQUESTS_PER_TX, + ); + return publicInputs; + } +} + +/** Builder for PrivateCircuitPublicInputs (call-level) with fluent API for adding side effects. */ +export class PrivateCircuitPublicInputsBuilder { + private noteHashes: NoteHash[] = []; + private nullifiers: Nullifier[] = []; + private noteHashReadRequests: ScopedReadRequest[] = []; + private nullifierReadRequests: ScopedReadRequest[] = []; + private keyValidationRequests: KeyValidationRequestAndSeparator[] = []; + private privateLogs: PrivateLogData[] = []; + private nextCounter: number; + + constructor( + private contractAddress: AztecAddress = DEFAULT_CONTRACT_ADDRESS, + startCounter = 1, + ) { + this.nextCounter = startCounter; + } + + private getCounter(sideEffectCounter?: number): number { + if (sideEffectCounter !== undefined) { + this.nextCounter = sideEffectCounter + 1; + return sideEffectCounter; + } + return this.nextCounter++; + } + + /** Adds a note hash. Defaults are generated randomly. */ + addNoteHash(opts?: { value?: Fr; counter?: number }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + this.noteHashes.push(new NoteHash(value, counter)); + return this; + } + + /** Adds a nullifier. Defaults are generated randomly. */ + addNullifier(opts?: { value?: Fr; noteHash?: Fr; counter?: number }): this { + const value = opts?.value ?? Fr.random(); + const noteHash = opts?.noteHash ?? Fr.ZERO; + const counter = this.getCounter(opts?.counter); + this.nullifiers.push(new Nullifier(value, noteHash, counter)); + return this; + } + + /** Adds a pending note hash read request (non-empty contract address, can match a pending note hash). */ + addPendingNoteHashReadRequest(opts?: { value?: Fr; counter?: number }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + this.noteHashReadRequests.push(new ScopedReadRequest(new ReadRequest(value, counter), this.contractAddress)); + return this; + } + + /** Adds a settled note hash read request (empty contract address, resolved against the note hash tree). */ + addSettledNoteHashReadRequest(opts?: { value?: Fr; counter?: number }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + this.noteHashReadRequests.push(new ScopedReadRequest(new ReadRequest(value, counter), AztecAddress.ZERO)); + return this; + } + + /** Adds a pending nullifier read request (non-empty contract address, can match a pending nullifier). */ + addPendingNullifierReadRequest(opts?: { value?: Fr; counter?: number }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + this.nullifierReadRequests.push(new ScopedReadRequest(new ReadRequest(value, counter), this.contractAddress)); + return this; + } + + /** Adds a settled nullifier read request (empty contract address, resolved against the nullifier tree). */ + addSettledNullifierReadRequest(opts?: { value?: Fr; counter?: number }): this { + const value = opts?.value ?? Fr.random(); + const counter = this.getCounter(opts?.counter); + this.nullifierReadRequests.push(new ScopedReadRequest(new ReadRequest(value, counter), AztecAddress.ZERO)); + return this; + } + + /** Adds a key validation request. */ + addKeyValidationRequest(): this { + this.keyValidationRequests.push( + new KeyValidationRequestAndSeparator( + new KeyValidationRequest(new Point(Fr.random(), Fr.random(), false), Fr.random()), + Fr.random(), + ), + ); + return this; + } + + /** Adds a private log. Defaults are generated randomly. */ + addPrivateLog(opts?: { noteHashCounter?: number; counter?: number }): this { + const noteHashCounter = opts?.noteHashCounter ?? 0; + const counter = this.getCounter(opts?.counter); + this.privateLogs.push(new PrivateLogData(PrivateLog.empty(), noteHashCounter, counter)); + return this; + } + + /** Builds the PrivateCircuitPublicInputs with all added side effects. */ + build(): PrivateCircuitPublicInputs { + const publicInputs = PrivateCircuitPublicInputs.empty(); + publicInputs.callContext.contractAddress = this.contractAddress; + publicInputs.noteHashes = makeClaimed(this.noteHashes, NoteHash, MAX_NOTE_HASHES_PER_CALL); + publicInputs.nullifiers = makeClaimed(this.nullifiers, Nullifier, MAX_NULLIFIERS_PER_CALL); + publicInputs.privateLogs = makeClaimed(this.privateLogs, PrivateLogData, MAX_PRIVATE_LOGS_PER_CALL); + publicInputs.noteHashReadRequests = makeClaimed( + this.noteHashReadRequests, + ScopedReadRequest, + MAX_NOTE_HASH_READ_REQUESTS_PER_CALL, + ); + publicInputs.nullifierReadRequests = makeClaimed( + this.nullifierReadRequests, + ScopedReadRequest, + MAX_NULLIFIER_READ_REQUESTS_PER_CALL, + ); + publicInputs.keyValidationRequestsAndSeparators = makeClaimed( + this.keyValidationRequests, + KeyValidationRequestAndSeparator, + MAX_KEY_VALIDATION_REQUESTS_PER_CALL, + ); + return publicInputs; + } +} + +/** Wraps a PrivateKernelCircuitPublicInputs in a PrivateKernelSimulateOutput. */ +export function makeKernelOutput( + publicInputs?: PrivateKernelCircuitPublicInputs, +): PrivateKernelSimulateOutput { + return { + publicInputs: publicInputs ?? PrivateKernelCircuitPublicInputs.empty(), + verificationKey: VerificationKeyData.empty(), + outputWitness: new Map(), + bytecode: Buffer.from([]), + }; +} + +/** Wraps a PrivateCircuitPublicInputs in a PrivateCallExecutionResult. */ +export function makeExecutionResult(publicInputs?: PrivateCircuitPublicInputs): PrivateCallExecutionResult { + return new PrivateCallExecutionResult( + Buffer.alloc(0), + Buffer.alloc(0), + new Map(), + publicInputs ?? PrivateCircuitPublicInputs.empty(), + [], + new Map(), + [], + [], + [], + [], + [], + ); +} diff --git a/yarn-project/pxe/src/private_kernel/private_kernel_execution_prover.test.ts b/yarn-project/pxe/src/private_kernel/private_kernel_execution_prover.test.ts index b1e73bf2e919..a30455750679 100644 --- a/yarn-project/pxe/src/private_kernel/private_kernel_execution_prover.test.ts +++ b/yarn-project/pxe/src/private_kernel/private_kernel_execution_prover.test.ts @@ -1,28 +1,30 @@ import { BackendType, BarretenbergSync } from '@aztec/bb.js'; -import { MAX_NOTE_HASHES_PER_CALL, MAX_NOTE_HASHES_PER_TX, MAX_TX_LIFETIME, VK_TREE_HEIGHT } from '@aztec/constants'; -import { padArrayEnd } from '@aztec/foundation/collection'; +import { + MAX_KEY_VALIDATION_REQUESTS_PER_TX, + MAX_NOTE_HASH_READ_REQUESTS_PER_TX, + MAX_TX_LIFETIME, + VK_TREE_HEIGHT, +} from '@aztec/constants'; import { Fr } from '@aztec/foundation/curves/bn254'; import { createLogger } from '@aztec/foundation/log'; import { MembershipWitness } from '@aztec/foundation/trees'; -import { FunctionSelector, NoteSelector } from '@aztec/stdlib/abi'; +import { FunctionSelector } from '@aztec/stdlib/abi'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; import type { PrivateKernelProver } from '@aztec/stdlib/interfaces/client'; -import { - ClaimedLengthArray, - NoteHash, - PrivateCircuitPublicInputs, - PrivateKernelCircuitPublicInputs, - PrivateKernelTailCircuitPublicInputs, - ScopedNoteHash, -} from '@aztec/stdlib/kernel'; +import { PrivateCircuitPublicInputs, PrivateKernelTailCircuitPublicInputs } from '@aztec/stdlib/kernel'; import { PublicKeys } from '@aztec/stdlib/keys'; -import { Note } from '@aztec/stdlib/note'; import { makeTxRequest } from '@aztec/stdlib/testing'; -import { NoteAndSlot, PrivateCallExecutionResult, PrivateExecutionResult, type TxRequest } from '@aztec/stdlib/tx'; +import { PrivateCallExecutionResult, PrivateExecutionResult, type TxRequest } from '@aztec/stdlib/tx'; import { VerificationKey, VerificationKeyData } from '@aztec/stdlib/vks'; import { mock } from 'jest-mock-extended'; +import times from 'lodash.times'; +import { + PrivateCircuitPublicInputsBuilder, + PrivateKernelCircuitPublicInputsBuilder, + makeKernelOutput, +} from './hints/test_utils.js'; import { PrivateKernelExecutionProver } from './private_kernel_execution_prover.js'; import type { PrivateKernelOracle } from './private_kernel_oracle.js'; @@ -43,105 +45,64 @@ describe('Private Kernel Sequencer', () => { await BarretenbergSync.initSingleton({ backend: BackendType.NativeSharedMemory, logger: logger.debug }); }); - const notesAndSlots: NoteAndSlot[] = Array(10) - .fill(null) - .map(() => - NoteAndSlot.from({ - note: new Note([Fr.random(), Fr.random(), Fr.random()]), - storageSlot: Fr.random(), - randomness: Fr.random(), - noteTypeId: NoteSelector.random(), - }), - ); - - const createFakeSiloedCommitment = (commitment: Fr) => new Fr(commitment.value + 1n); - const generateFakeCommitment = (noteAndSlot: NoteAndSlot) => noteAndSlot.note.items[0]; - const generateFakeSiloedCommitment = (note: NoteAndSlot) => createFakeSiloedCommitment(generateFakeCommitment(note)); - - const createExecutionResult = (fnName: string, newNoteIndices: number[] = []): PrivateExecutionResult => { - return new PrivateExecutionResult(createCallExecutionResult(fnName, newNoteIndices), Fr.zero(), []); + const createExecutionResult = (fnName: string): PrivateExecutionResult => { + return new PrivateExecutionResult(createCallExecutionResult(fnName), Fr.zero(), []); }; - const createCallExecutionResult = (fnName: string, newNoteIndices: number[] = []): PrivateCallExecutionResult => { - const publicInputs = PrivateCircuitPublicInputs.empty(); - publicInputs.noteHashes = new ClaimedLengthArray( - padArrayEnd( - newNoteIndices.map(newNoteIndex => new NoteHash(generateFakeCommitment(notesAndSlots[newNoteIndex]), 0)), - NoteHash.empty(), - MAX_NOTE_HASHES_PER_CALL, - ), - newNoteIndices.length, - ); + const createCallExecutionResult = ( + fnName: string, + { + publicInputs, + childPublicInputs = [], + }: { + publicInputs?: PrivateCircuitPublicInputs; + childPublicInputs?: PrivateCircuitPublicInputs[]; + } = {}, + ): PrivateCallExecutionResult => { + if (!publicInputs) { + publicInputs = PrivateCircuitPublicInputs.empty(); + } publicInputs.callContext.functionSelector = new FunctionSelector(fnName.charCodeAt(0)); publicInputs.callContext.contractAddress = contractAddress; + return new PrivateCallExecutionResult( Buffer.alloc(0), VerificationKey.makeFakeMegaHonk(), new Map(), publicInputs, - newNoteIndices.map(idx => notesAndSlots[idx]), + [], new Map(), [], [], [], - (dependencies[fnName] || []).map(name => createCallExecutionResult(name)), + (dependencies[fnName] || []).map((name, i) => + createCallExecutionResult(name, { publicInputs: childPublicInputs[i] }), + ), [], ); }; - const simulateProofOutput = (newNoteIndices: number[]) => { - const publicInputs = PrivateKernelCircuitPublicInputs.empty(); + /** Creates a mock kernel output. Optionally accepts a callback to configure the builder before building. */ + const simulateProofOutput = (configure?: (builder: PrivateKernelCircuitPublicInputsBuilder) => void) => { + const builder = new PrivateKernelCircuitPublicInputsBuilder(contractAddress); + // Every tx has at least one nullifier (the first nullifier), which needs siloing in the final reset. + builder.addNullifier(); + configure?.(builder); + + const publicInputs = builder.build(); publicInputs.constants.anchorBlockHeader.globalVariables.timestamp = blockTimestamp; publicInputs.expirationTimestamp = expirationTimestamp; - publicInputs.end.noteHashes = new ClaimedLengthArray( - padArrayEnd( - newNoteIndices.map(newNoteIndex => - new NoteHash(generateFakeSiloedCommitment(notesAndSlots[newNoteIndex]), 0).scope(contractAddress), - ), - ScopedNoteHash.empty(), - MAX_NOTE_HASHES_PER_TX, - ), - newNoteIndices.length, - ); - - return { - publicInputs, - verificationKey: VerificationKeyData.empty(), - outputWitness: new Map(), - bytecode: Buffer.from([]), - }; - }; - - const simulateProofOutputFinal = (newNoteIndices: number[]) => { - const publicInputs = PrivateKernelTailCircuitPublicInputs.empty(); - publicInputs.forRollup!.end.noteHashes = padArrayEnd( - newNoteIndices.map(newNoteIndex => generateFakeSiloedCommitment(notesAndSlots[newNoteIndex])), - Fr.ZERO, - MAX_NOTE_HASHES_PER_TX, - ); - return { - publicInputs, - outputWitness: new Map(), - verificationKey: VerificationKeyData.empty(), - bytecode: Buffer.from([]), - }; + return makeKernelOutput(publicInputs); }; - const expectExecution = (fns: string[]) => { - const callStackItemsInit = proofCreator.simulateInit.mock.calls.map(args => - String.fromCharCode(args[0].privateCall.publicInputs.callContext.functionSelector.value), - ); - const callStackItemsInner = proofCreator.simulateInner.mock.calls.map(args => - String.fromCharCode(args[0].privateCall.publicInputs.callContext.functionSelector.value), - ); - - expect(proofCreator.simulateInit).toHaveBeenCalledTimes(Math.min(1, fns.length)); - expect(proofCreator.simulateInner).toHaveBeenCalledTimes(Math.max(0, fns.length - 1)); - expect(callStackItemsInit.concat(callStackItemsInner)).toEqual(fns); - proofCreator.simulateInner.mockClear(); - proofCreator.simulateInit.mockClear(); - }; + /** Creates a mock kernel output for the final iteration. Returns empty result as we don't care about it in the tests */ + const simulateProofOutputFinal = () => ({ + publicInputs: PrivateKernelTailCircuitPublicInputs.empty(), + outputWitness: new Map(), + verificationKey: VerificationKeyData.empty(), + bytecode: Buffer.from([]), + }); const prove = (executionResult: PrivateExecutionResult) => prover.proveWithKernels(txRequest, executionResult); @@ -149,8 +110,8 @@ describe('Private Kernel Sequencer', () => { txRequest = makeTxRequest(); oracle = mock(); - // TODO(dbanks12): will need to mock oracle.getNoteMembershipWitness() to test non-transient reads oracle.getVkMembershipWitness.mockResolvedValue(MembershipWitness.random(VK_TREE_HEIGHT)); + oracle.getMasterSecretKey.mockResolvedValue(Fr.random() as any); oracle.getContractAddressPreimage.mockResolvedValue({ version: 1 as const, @@ -169,42 +130,215 @@ describe('Private Kernel Sequencer', () => { privateFunctionsRoot: Fr.random(), }); + oracle.getDebugFunctionName.mockImplementation((_, selector) => + Promise.resolve(String.fromCharCode(selector.value)), + ); + proofCreator = mock(); - proofCreator.simulateInit.mockResolvedValue(simulateProofOutput([])); - proofCreator.simulateInner.mockResolvedValue(simulateProofOutput([])); - proofCreator.simulateReset.mockResolvedValue(simulateProofOutput([])); - proofCreator.simulateTail.mockResolvedValue(simulateProofOutputFinal([])); + proofCreator.simulateInit.mockResolvedValue(simulateProofOutput()); + proofCreator.simulateInner.mockResolvedValue(simulateProofOutput()); + proofCreator.simulateReset.mockResolvedValue(simulateProofOutput()); + proofCreator.simulateTail.mockResolvedValue(simulateProofOutputFinal()); prover = new PrivateKernelExecutionProver(oracle, proofCreator, true); }); - it('should create proofs in correct order', async () => { + it('should execute private functions in correct order', async () => { { dependencies = { a: [] }; const executionResult = createExecutionResult('a'); await prove(executionResult); - expectExecution(['a']); + + expect(proofCreator.simulateInit).toHaveBeenCalledTimes(1); + expect(proofCreator.simulateInner).not.toHaveBeenCalled(); + proofCreator.simulateInit.mockClear(); } { + // a { + // b { + // c {} + // } + // d {} + // } dependencies = { a: ['b', 'd'], b: ['c'], }; const executionResult = createExecutionResult('a'); await prove(executionResult); - expectExecution(['a', 'b', 'c', 'd']); + + // Init for 'a', inner for 'b', 'c', 'd'. + expect(proofCreator.simulateInit).toHaveBeenCalledTimes(1); + expect(proofCreator.simulateInner).toHaveBeenCalledTimes(3); + proofCreator.simulateInit.mockClear(); + proofCreator.simulateInner.mockClear(); } { + // a { + // b { + // d { + // h {} + // } + // } + // c { + // e {} + // f { + // i {} + // j { + // l { + // n {} + // } + // m {} + // } + // k {} + // } + // g {} + // } dependencies = { - k: ['m', 'o'], - m: ['q'], - o: ['n', 'p', 'r'], + a: ['b', 'c'], + b: ['d'], + d: ['h'], + c: ['e', 'f', 'g'], + f: ['i', 'j', 'k'], + j: ['l', 'm'], + l: ['n'], }; - const executionResult = createExecutionResult('k'); + const executionResult = createExecutionResult('a'); await prove(executionResult); - expectExecution(['k', 'm', 'q', 'o', 'n', 'p', 'r']); + + // Init for 'a', inner for the remaining 13 functions. + expect(proofCreator.simulateInit).toHaveBeenCalledTimes(1); + expect(proofCreator.simulateInner).toHaveBeenCalledTimes(13); } }); + + it('executes init, final reset, and tail for a single function', async () => { + dependencies = { a: [] }; + const executionResult = createExecutionResult('a'); + const result = await prove(executionResult); + + const stepNames = result.executionSteps.map(s => s.functionName); + expect(stepNames).toEqual(['a', 'private_kernel_init', 'private_kernel_reset', 'private_kernel_tail']); + + expect(proofCreator.simulateInit).toHaveBeenCalledTimes(1); + expect(proofCreator.simulateInner).not.toHaveBeenCalled(); + expect(proofCreator.simulateReset).toHaveBeenCalledTimes(1); + expect(proofCreator.simulateTail).toHaveBeenCalledTimes(1); + }); + + it('executes init, inners, final reset, and tail for nested functions', async () => { + // a { + // b { + // c {} + // } + // d {} + // } + dependencies = { a: ['b', 'd'], b: ['c'] }; + + const executionResult = createExecutionResult('a'); + const result = await prove(executionResult); + + const stepNames = result.executionSteps.map(s => s.functionName); + expect(stepNames).toEqual([ + 'a', + 'private_kernel_init', + 'b', + 'private_kernel_inner', + 'c', + 'private_kernel_inner', + 'd', + 'private_kernel_inner', + 'private_kernel_reset', + 'private_kernel_tail', + ]); + }); + + it('runs inner reset before next iteration when key validation requests overflow', async () => { + // Set up: init output has MAX key validation requests. + proofCreator.simulateInit.mockResolvedValue( + simulateProofOutput(b => times(MAX_KEY_VALIDATION_REQUESTS_PER_TX, () => b.addKeyValidationRequest())), + ); + + // Child function b adds 1 key validation request → total exceeds MAX → inner reset needed. + const childBuilder = new PrivateCircuitPublicInputsBuilder(contractAddress); + childBuilder.addKeyValidationRequest(); + const childPublicInputs = childBuilder.build(); + + // a { b {} } + dependencies = { a: ['b'] }; + + const entryExecResult = createCallExecutionResult('a', { childPublicInputs: [childPublicInputs] }); + const executionResult = new PrivateExecutionResult(entryExecResult, Fr.zero(), []); + const result = await prove(executionResult); + + const stepNames = result.executionSteps.map(s => s.functionName); + expect(stepNames).toEqual([ + 'a', + 'private_kernel_init', + // Inner reset to clear key validation requests before processing b. + 'private_kernel_reset', + 'b', + 'private_kernel_inner', + // Final reset for siloing. + 'private_kernel_reset', + 'private_kernel_tail', + ]); + + expect(proofCreator.simulateInit).toHaveBeenCalledTimes(1); + expect(proofCreator.simulateInner).toHaveBeenCalledTimes(1); + expect(proofCreator.simulateReset).toHaveBeenCalledTimes(2); + expect(proofCreator.simulateTail).toHaveBeenCalledTimes(1); + }); + + it('runs two consecutive inner resets when first reset output still overflows', async () => { + // Set up: init output has MAX note hash read requests and key validation requests. + proofCreator.simulateInit.mockResolvedValue( + simulateProofOutput(b => { + times(MAX_NOTE_HASH_READ_REQUESTS_PER_TX, i => { + b.addNoteHash({ value: new Fr(i + 1) }); + b.addPendingNoteHashReadRequest({ value: new Fr(i + 1) }); + }); + times(MAX_KEY_VALIDATION_REQUESTS_PER_TX, () => b.addKeyValidationRequest()); + }), + ); + + // First inner reset clears the note hash read requests, but still returns MAX key validation requests → second inner reset needed. + proofCreator.simulateReset.mockResolvedValueOnce( + simulateProofOutput(b => times(MAX_KEY_VALIDATION_REQUESTS_PER_TX, () => b.addKeyValidationRequest())), + ); + + // Child function b adds 1 note hash read request and 1 key validation request → total exceeds MAX → inner reset triggered. + const childBuilder = new PrivateCircuitPublicInputsBuilder(contractAddress); + childBuilder.addPendingNoteHashReadRequest(); + childBuilder.addKeyValidationRequest(); + const childPublicInputs = childBuilder.build(); + + // a { b {} } + dependencies = { a: ['b'] }; + const entryExecResult = createCallExecutionResult('a', { childPublicInputs: [childPublicInputs] }); + + const executionResult = new PrivateExecutionResult(entryExecResult, Fr.zero(), []); + const result = await prove(executionResult); + + const stepNames = result.executionSteps.map(s => s.functionName); + expect(stepNames).toEqual([ + 'a', + 'private_kernel_init', + // Two consecutive inner resets to clear note hash read requests and key validation requests before processing b. + 'private_kernel_reset', + 'private_kernel_reset', + 'b', + 'private_kernel_inner', + // Final reset for siloing. + 'private_kernel_reset', + 'private_kernel_tail', + ]); + + expect(proofCreator.simulateInit).toHaveBeenCalledTimes(1); + expect(proofCreator.simulateInner).toHaveBeenCalledTimes(1); + expect(proofCreator.simulateReset).toHaveBeenCalledTimes(3); + expect(proofCreator.simulateTail).toHaveBeenCalledTimes(1); + }); }); diff --git a/yarn-project/pxe/src/private_kernel/private_kernel_execution_prover.ts b/yarn-project/pxe/src/private_kernel/private_kernel_execution_prover.ts index 61825073b921..d4652a36dce5 100644 --- a/yarn-project/pxe/src/private_kernel/private_kernel_execution_prover.ts +++ b/yarn-project/pxe/src/private_kernel/private_kernel_execution_prover.ts @@ -116,6 +116,7 @@ export class PrivateKernelExecutionProver { splitCounter, ); while (resetBuilder.needsReset()) { + // Inner reset: without siloing. const witgenTimer = new Timer(); const privateInputs = await resetBuilder.build(this.oracle); output = generateWitnesses @@ -216,16 +217,24 @@ export class PrivateKernelExecutionProver { firstIteration = false; } - // Reset. - let resetBuilder = new PrivateKernelResetPrivateInputsBuilder( + // Final reset: include siloing of note hashes, nullifiers and private logs. + const finalResetBuilder = new PrivateKernelResetPrivateInputsBuilder( output, [], noteHashNullifierCounterMap, splitCounter, ); - while (resetBuilder.needsReset()) { + if (!finalResetBuilder.needsReset()) { + // The final reset must be performed exactly once, because each tx has at least one nullifier that requires + // siloing, and siloing cannot be done multiple times. + // While, in theory, it might be possible to silo note hashes first and then run another reset to silo nullifiers + // and/or private logs, we currently don't have standalone dimensions for the arrays that require siloing. As a + // result, all necessary siloing must be done together in a single reset. + // Refer to the possible combinations of dimensions in private_kernel_reset_config.json. + throw new Error('Nothing to reset for the final reset.'); + } else { const witgenTimer = new Timer(); - const privateInputs = await resetBuilder.build(this.oracle); + const privateInputs = await finalResetBuilder.build(this.oracle); output = generateWitnesses ? await this.proofCreator.generateResetOutput(privateInputs) : await this.proofCreator.simulateReset(privateInputs); @@ -239,8 +248,6 @@ export class PrivateKernelExecutionProver { witgen: witgenTimer.ms(), }, }); - - resetBuilder = new PrivateKernelResetPrivateInputsBuilder(output, [], noteHashNullifierCounterMap, splitCounter); } if (output.publicInputs.feePayer.isZero() && skipFeeEnforcement) { diff --git a/yarn-project/stdlib/src/kernel/hints/build_note_hash_read_request_hints.test.ts b/yarn-project/stdlib/src/kernel/hints/build_note_hash_read_request_hints.test.ts index a19b2ac10470..45d25ecd985a 100644 --- a/yarn-project/stdlib/src/kernel/hints/build_note_hash_read_request_hints.test.ts +++ b/yarn-project/stdlib/src/kernel/hints/build_note_hash_read_request_hints.test.ts @@ -17,9 +17,12 @@ describe('buildNoteHashReadRequestHints', () => { const getNoteHashValue = (index: number) => index + 9999; - const makeReadRequest = (value: number, counter = 2) => + const makePendingReadRequest = (value: number, counter = 2) => new ReadRequest(new Fr(value), counter).scope(contractAddress); + const makeSettledReadRequest = (value: number, counter = 2) => + new ReadRequest(new Fr(value), counter).scope(AztecAddress.ZERO); + const makeNoteHash = (value: number, counter = 1) => new NoteHash(new Fr(value), counter).scope(contractAddress); /** * Create fixtures. @@ -53,7 +56,7 @@ describe('buildNoteHashReadRequestHints', () => { const readPendingNoteHash = (noteHashIndex: number) => { const readRequestIndex = numReadRequests; const hintIndex = numPendingReads; - noteHashReadRequests[readRequestIndex] = makeReadRequest(getNoteHashValue(noteHashIndex)); + noteHashReadRequests[readRequestIndex] = makePendingReadRequest(getNoteHashValue(noteHashIndex)); expectedHints.readRequestActions[readRequestIndex] = ReadRequestAction.readAsPending(hintIndex); expectedHints.pendingReadHints[hintIndex] = new PendingReadHint(readRequestIndex, noteHashIndex); numReadRequests++; @@ -64,7 +67,7 @@ describe('buildNoteHashReadRequestHints', () => { const readRequestIndex = numReadRequests; const hintIndex = numSettledReads; const value = settledNoteHashes[noteHashIndex]; - noteHashReadRequests[readRequestIndex] = makeReadRequest(settledNoteHashes[noteHashIndex]); + noteHashReadRequests[readRequestIndex] = makeSettledReadRequest(settledNoteHashes[noteHashIndex]); expectedHints.readRequestActions[readRequestIndex] = ReadRequestAction.readAsSettled(hintIndex); expectedHints.settledReadHints[hintIndex] = new SettledReadHint(readRequestIndex, {} as any, new Fr(value)); numReadRequests++; @@ -73,7 +76,7 @@ describe('buildNoteHashReadRequestHints', () => { const readFutureNoteHash = (noteHashIndex: number) => { const readRequestIndex = numReadRequests; - noteHashReadRequests[readRequestIndex] = makeReadRequest(futureNoteHashes[noteHashIndex].value.toNumber()); + noteHashReadRequests[readRequestIndex] = makePendingReadRequest(futureNoteHashes[noteHashIndex].value.toNumber()); numReadRequests++; }; @@ -82,7 +85,6 @@ describe('buildNoteHashReadRequestHints', () => { oracle, new ClaimedLengthArray(noteHashReadRequests, numReadRequests), new ClaimedLengthArray(noteHashes, MAX_NOTE_HASHES_PER_TX), - futureNoteHashes, ); beforeEach(() => { @@ -128,10 +130,10 @@ describe('buildNoteHashReadRequestHints', () => { expect(hints).toEqual(expectedHints); }); - it('throws if cannot find a match in pending set and in the tree', async () => { - readPendingNoteHash(2); - // Tweak the value of the read request. - noteHashReadRequests[0].readRequest.value = new Fr(123); + it('throws if settled read request cannot find a match in the tree', async () => { + // A settled read request for a value that the oracle can't find. + noteHashReadRequests[0] = makeSettledReadRequest(456); + numReadRequests = 1; await expect(() => buildHints()).rejects.toThrow('Read request is reading an unknown note hash.'); }); }); diff --git a/yarn-project/stdlib/src/kernel/hints/build_note_hash_read_request_hints.ts b/yarn-project/stdlib/src/kernel/hints/build_note_hash_read_request_hints.ts index 9316818471a5..b7209a6bfd7d 100644 --- a/yarn-project/stdlib/src/kernel/hints/build_note_hash_read_request_hints.ts +++ b/yarn-project/stdlib/src/kernel/hints/build_note_hash_read_request_hints.ts @@ -11,7 +11,6 @@ import type { ScopedNoteHash } from '../note_hash.js'; import { NoteHashReadRequestHintsBuilder } from './note_hash_read_request_hints.js'; import type { ScopedReadRequest } from './read_request.js'; import { PendingReadHint, ReadRequestActionEnum, ReadRequestResetActions } from './read_request_hints.js'; -import { ScopedValueCache } from './scoped_value_cache.js'; export function isValidNoteHashReadRequest(readRequest: ScopedReadRequest, noteHash: ScopedNoteHash) { return ( @@ -24,7 +23,6 @@ export function isValidNoteHashReadRequest(readRequest: ScopedReadRequest, noteH export function getNoteHashReadRequestResetActions( noteHashReadRequests: ClaimedLengthArray, noteHashes: ClaimedLengthArray, - futureNoteHashes: ScopedNoteHash[], ): ReadRequestResetActions { const resetActions = ReadRequestResetActions.empty(MAX_NOTE_HASH_READ_REQUESTS_PER_TX); @@ -36,24 +34,23 @@ export function getNoteHashReadRequestResetActions( noteHashMap.set(value, arr); }); - const futureNoteHashMap = new ScopedValueCache(futureNoteHashes); - for (let i = 0; i < noteHashReadRequests.claimedLength; ++i) { const readRequest = noteHashReadRequests.array[i]; - const pendingNoteHash = noteHashMap - .get(readRequest.value.toBigInt()) - ?.find(n => isValidNoteHashReadRequest(readRequest, n.noteHash)); - - if (pendingNoteHash !== undefined) { - resetActions.actions[i] = ReadRequestActionEnum.READ_AS_PENDING; - resetActions.pendingReadHints.push(new PendingReadHint(i, pendingNoteHash.index)); - } else if ( - !futureNoteHashMap - .get(readRequest) - .find(futureNoteHash => isValidNoteHashReadRequest(readRequest, futureNoteHash)) - ) { + if (readRequest.contractAddress.isZero()) { + // Settled read: empty contract address means resolve against the note hash tree. resetActions.actions[i] = ReadRequestActionEnum.READ_AS_SETTLED; + } else { + // Pending read: non-empty contract address means match against a pending note hash. + const pendingNoteHash = noteHashMap + .get(readRequest.value.toBigInt()) + ?.find(n => isValidNoteHashReadRequest(readRequest, n.noteHash)); + + if (pendingNoteHash) { + resetActions.actions[i] = ReadRequestActionEnum.READ_AS_PENDING; + resetActions.pendingReadHints.push(new PendingReadHint(i, pendingNoteHash.index)); + } + // Otherwise, the read request may be resolved by a future note hash. Leave as NOOP. } } @@ -115,11 +112,10 @@ export async function buildNoteHashReadRequestHints, noteHashes: ClaimedLengthArray, - futureNoteHashes: ScopedNoteHash[], maxPending: PENDING = MAX_NOTE_HASH_READ_REQUESTS_PER_TX as PENDING, maxSettled: SETTLED = MAX_NOTE_HASH_READ_REQUESTS_PER_TX as SETTLED, ) { - const resetActions = getNoteHashReadRequestResetActions(noteHashReadRequests, noteHashes, futureNoteHashes); + const resetActions = getNoteHashReadRequestResetActions(noteHashReadRequests, noteHashes); return await buildNoteHashReadRequestHintsFromResetActions( oracle, noteHashReadRequests, diff --git a/yarn-project/stdlib/src/kernel/hints/build_nullifier_read_request_hints.test.ts b/yarn-project/stdlib/src/kernel/hints/build_nullifier_read_request_hints.test.ts index 3a32f503cd90..7244472f602d 100644 --- a/yarn-project/stdlib/src/kernel/hints/build_nullifier_read_request_hints.test.ts +++ b/yarn-project/stdlib/src/kernel/hints/build_nullifier_read_request_hints.test.ts @@ -97,7 +97,6 @@ describe('buildNullifierReadRequestHints', () => { oracle, new ClaimedLengthArray(nullifierReadRequests, numReadRequests), new ClaimedLengthArray(nullifiers, MAX_NULLIFIERS_PER_TX), - futureNullifiers, ); beforeEach(() => { diff --git a/yarn-project/stdlib/src/kernel/hints/build_nullifier_read_request_hints.ts b/yarn-project/stdlib/src/kernel/hints/build_nullifier_read_request_hints.ts index 0c28269d409a..26390f9ce226 100644 --- a/yarn-project/stdlib/src/kernel/hints/build_nullifier_read_request_hints.ts +++ b/yarn-project/stdlib/src/kernel/hints/build_nullifier_read_request_hints.ts @@ -12,7 +12,6 @@ import type { ScopedNullifier } from '../nullifier.js'; import { NullifierReadRequestHintsBuilder } from './nullifier_read_request_hints.js'; import { ScopedReadRequest } from './read_request.js'; import { PendingReadHint, ReadRequestActionEnum, ReadRequestResetActions } from './read_request_hints.js'; -import { ScopedValueCache } from './scoped_value_cache.js'; export function isValidNullifierReadRequest(readRequest: ScopedReadRequest, nullifier: ScopedNullifier) { return ( @@ -30,7 +29,6 @@ interface NullifierMembershipWitnessWithPreimage { export function getNullifierReadRequestResetActions( nullifierReadRequests: ClaimedLengthArray, nullifiers: ClaimedLengthArray, - futureNullifiers: ScopedNullifier[], ): ReadRequestResetActions { const resetActions = ReadRequestResetActions.empty(MAX_NULLIFIER_READ_REQUESTS_PER_TX); @@ -42,23 +40,23 @@ export function getNullifierReadRequestResetActions( nullifierMap.set(value, arr); }); - const futureNullifiersMap = new ScopedValueCache(futureNullifiers); - for (let i = 0; i < nullifierReadRequests.claimedLength; ++i) { const readRequest = nullifierReadRequests.array[i]; - const pendingNullifier = nullifierMap - .get(readRequest.value.toBigInt()) - ?.find(({ nullifier }) => isValidNullifierReadRequest(readRequest, nullifier)); - - if (pendingNullifier !== undefined) { - resetActions.actions[i] = ReadRequestActionEnum.READ_AS_PENDING; - resetActions.pendingReadHints.push(new PendingReadHint(i, pendingNullifier.index)); - } else if ( - !futureNullifiersMap - .get(readRequest) - .some(futureNullifier => isValidNullifierReadRequest(readRequest, futureNullifier)) - ) { + + if (readRequest.contractAddress.isZero()) { + // Settled read: empty contract address means resolve against the nullifier tree. resetActions.actions[i] = ReadRequestActionEnum.READ_AS_SETTLED; + } else { + // Pending read: non-empty contract address means match against a pending nullifier. + const pendingNullifier = nullifierMap + .get(readRequest.value.toBigInt()) + ?.find(({ nullifier }) => isValidNullifierReadRequest(readRequest, nullifier)); + + if (pendingNullifier) { + resetActions.actions[i] = ReadRequestActionEnum.READ_AS_PENDING; + resetActions.pendingReadHints.push(new PendingReadHint(i, pendingNullifier.index)); + } + // Otherwise, the read request may be resolved by a future nullifier. Leave as NOOP. } } @@ -111,11 +109,10 @@ export async function buildNullifierReadRequestHints, nullifiers: ClaimedLengthArray, - futureNullifiers: ScopedNullifier[], maxPending: PENDING = MAX_NULLIFIER_READ_REQUESTS_PER_TX as PENDING, maxSettled: SETTLED = MAX_NULLIFIER_READ_REQUESTS_PER_TX as SETTLED, ) { - const resetActions = getNullifierReadRequestResetActions(nullifierReadRequests, nullifiers, futureNullifiers); + const resetActions = getNullifierReadRequestResetActions(nullifierReadRequests, nullifiers); return await buildNullifierReadRequestHintsFromResetActions( oracle, nullifierReadRequests, diff --git a/yarn-project/stdlib/src/kernel/hints/build_transient_data_hints.test.ts b/yarn-project/stdlib/src/kernel/hints/build_transient_data_hints.test.ts index 8925a41d20e9..91a2cbd22bdc 100644 --- a/yarn-project/stdlib/src/kernel/hints/build_transient_data_hints.test.ts +++ b/yarn-project/stdlib/src/kernel/hints/build_transient_data_hints.test.ts @@ -4,7 +4,8 @@ import { AztecAddress } from '../../aztec-address/index.js'; import { ClaimedLengthArray } from '../claimed_length_array.js'; import { NoteHash, type ScopedNoteHash } from '../note_hash.js'; import { Nullifier, type ScopedNullifier } from '../nullifier.js'; -import { buildTransientDataHints } from './build_transient_data_hints.js'; +import { PrivateLogData, ScopedPrivateLogData } from '../private_log_data.js'; +import { buildTransientDataHints, countSquashedLogs } from './build_transient_data_hints.js'; import { ReadRequest, ScopedReadRequest } from './read_request.js'; import { TransientDataSquashingHint } from './transient_data_squashing_hint.js'; @@ -16,6 +17,7 @@ describe('buildTransientDataHints', () => { let nadaIndexHint: TransientDataSquashingHint; let futureNoteHashReads: ScopedReadRequest[]; let futureNullifierReads: ScopedReadRequest[]; + let futureLogs: PrivateLogData[]; let noteHashNullifierCounterMap: Map; let validationRequestsSplitCounter = 0; @@ -25,12 +27,14 @@ describe('buildTransientDataHints', () => { new ClaimedLengthArray(nullifiers, nullifiers.length), futureNoteHashReads, futureNullifierReads, + futureLogs, noteHashNullifierCounterMap, validationRequestsSplitCounter, ); beforeEach(() => { validationRequestsSplitCounter = 0; + futureLogs = []; noteHashes = [ new NoteHash(new Fr(11), 100).scope(contractAddress), new NoteHash(new Fr(22), 200).scope(contractAddress), @@ -95,4 +99,93 @@ describe('buildTransientDataHints', () => { noteHashNullifierCounterMap.set(noteHashes[0].counter, noteHashes[0].counter - 1); expect(buildHints).toThrow('Hinted nullifier has smaller counter than note hash.'); }); + + it('keeps the pair if a future log is linked to the note hash', () => { + // noteHashes[0] (counter 100) <> nullifiers[3] would normally be squashed. + // Add a future log linked to noteHashes[0]. + const log = PrivateLogData.empty(); + log.noteHashCounter = 100; + futureLogs = [log]; + const { numTransientData, hints } = buildHints(); + expect(numTransientData).toBe(0); + expect(hints).toEqual(Array(nullifiers.length).fill(nadaIndexHint)); + }); + + it('squashes the pair if future log is linked to a different note hash', () => { + // Future log linked to noteHashes[1] (counter 200), not noteHashes[0] (counter 100). + const log = PrivateLogData.empty(); + log.noteHashCounter = 200; + futureLogs = [log]; + const { numTransientData, hints } = buildHints(); + // noteHashes[0] <> nullifiers[3] should still be squashed. + expect(numTransientData).toBe(1); + expect(hints).toEqual([new TransientDataSquashingHint(3, 0), nadaIndexHint, nadaIndexHint, nadaIndexHint]); + }); + + it('ignores future logs with noteHashCounter of 0', () => { + // A log with noteHashCounter = 0 is not linked to any note hash. + const log = PrivateLogData.empty(); + log.noteHashCounter = 0; + futureLogs = [log]; + const { numTransientData, hints } = buildHints(); + expect(numTransientData).toBe(1); + expect(hints).toEqual([new TransientDataSquashingHint(3, 0), nadaIndexHint, nadaIndexHint, nadaIndexHint]); + }); +}); + +describe('countSquashedLogs', () => { + const contractAddress = AztecAddress.fromBigInt(987654n); + + const makeLog = (noteHashCounter: number, counter: number): ScopedPrivateLogData => { + const log = PrivateLogData.empty(); + log.noteHashCounter = noteHashCounter; + log.counter = counter; + return new ScopedPrivateLogData(log, contractAddress); + }; + + it('returns 0 when no hints are provided', () => { + const noteHashes: ScopedNoteHash[] = [new NoteHash(new Fr(11), 100).scope(contractAddress)]; + const logs: ScopedPrivateLogData[] = [makeLog(100, 101)]; + + const result = countSquashedLogs( + new ClaimedLengthArray(noteHashes, noteHashes.length), + new ClaimedLengthArray(logs, logs.length), + [], + ); + expect(result).toBe(0); + }); + + it('returns 0 when no logs are linked to squashed note hashes', () => { + const noteHashes: ScopedNoteHash[] = [ + new NoteHash(new Fr(11), 100).scope(contractAddress), + new NoteHash(new Fr(22), 200).scope(contractAddress), + ]; + const logs: ScopedPrivateLogData[] = [makeLog(0, 300), makeLog(0, 400)]; + const hints = [new TransientDataSquashingHint(0, 0)]; + + const result = countSquashedLogs( + new ClaimedLengthArray(noteHashes, noteHashes.length), + new ClaimedLengthArray(logs, logs.length), + hints, + ); + expect(result).toBe(0); + }); + + it('counts logs linked to squashed note hashes', () => { + const noteHashes: ScopedNoteHash[] = [ + new NoteHash(new Fr(11), 100).scope(contractAddress), + new NoteHash(new Fr(22), 200).scope(contractAddress), + ]; + // Two logs linked to noteHashes[0] (counter 100), one unlinked. + const logs: ScopedPrivateLogData[] = [makeLog(100, 101), makeLog(100, 102), makeLog(0, 300)]; + // Hint squashes noteHashes[0]. + const hints = [new TransientDataSquashingHint(0, 0)]; + + const result = countSquashedLogs( + new ClaimedLengthArray(noteHashes, noteHashes.length), + new ClaimedLengthArray(logs, logs.length), + hints, + ); + expect(result).toBe(2); + }); }); diff --git a/yarn-project/stdlib/src/kernel/hints/build_transient_data_hints.ts b/yarn-project/stdlib/src/kernel/hints/build_transient_data_hints.ts index c3bb12ceffaa..a03f80912e6c 100644 --- a/yarn-project/stdlib/src/kernel/hints/build_transient_data_hints.ts +++ b/yarn-project/stdlib/src/kernel/hints/build_transient_data_hints.ts @@ -4,6 +4,7 @@ import type { Tuple } from '@aztec/foundation/serialize'; import type { ClaimedLengthArray } from '../claimed_length_array.js'; import type { ScopedNoteHash } from '../note_hash.js'; import type { ScopedNullifier } from '../nullifier.js'; +import type { PrivateLogData, ScopedPrivateLogData } from '../private_log_data.js'; import { isValidNoteHashReadRequest } from './build_note_hash_read_request_hints.js'; import { isValidNullifierReadRequest } from './build_nullifier_read_request_hints.js'; import type { ScopedReadRequest } from './read_request.js'; @@ -15,11 +16,13 @@ export function buildTransientDataHints, futureNoteHashReads: ScopedReadRequest[], futureNullifierReads: ScopedReadRequest[], + futureLogs: PrivateLogData[], noteHashNullifierCounterMap: Map, splitCounter: number, ): { numTransientData: number; hints: Tuple } { const futureNoteHashReadsMap = new ScopedValueCache(futureNoteHashReads); const futureNullifierReadsMap = new ScopedValueCache(futureNullifierReads); + const futureLogNoteHashCounters = new Set(futureLogs.filter(l => l.noteHashCounter > 0).map(l => l.noteHashCounter)); const nullifierIndexMap: Map = new Map(); nullifiers.getActiveItems().forEach((n, i) => nullifierIndexMap.set(n.counter, i)); @@ -28,10 +31,12 @@ export function buildTransientDataHints isValidNoteHashReadRequest(read, noteHash)) + futureNoteHashReadsMap.get(noteHash).find(read => isValidNoteHashReadRequest(read, noteHash)) || + futureLogNoteHashCounters.has(noteHash.counter) ) { continue; } @@ -77,3 +82,13 @@ export function buildTransientDataHints( + noteHashes: ClaimedLengthArray, + privateLogs: ClaimedLengthArray, + squashingHints: TransientDataSquashingHint[], +): number { + const squashedNoteHashCounters = new Set(squashingHints.map(h => noteHashes.array[h.noteHashIndex].counter)); + return privateLogs.getActiveItems().filter(l => squashedNoteHashCounters.has(l.inner.noteHashCounter)).length; +}