diff --git a/noir/.github/workflows/test-cargo.yml b/noir/.github/workflows/test-cargo.yml deleted file mode 100644 index 8d414daa75b4..000000000000 --- a/noir/.github/workflows/test-cargo.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Test cargo - -on: - pull_request: - merge_group: - push: - branches: - - master - -# This will cancel previous runs when a branch or PR is updated -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.ref || github.run_id }} - cancel-in-progress: true - -jobs: - build: - name: Test cargo - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - name: Get current date - id: date - run: echo "date=$(date +'%Y.%m.%d.%H.%M')" >> $GITHUB_STATE - - name: prepare docker images tags - id: prep - run: | - REGISTRY="ghcr.io" - IMG="${REGISTRY}/${{ github.repository }}" - IMAGE=$(echo "$IMG" | tr '[:upper:]' '[:lower:]') - TAGS="${IMAGE}:${{ github.sha }}" - TAGS="${TAGS},${IMAGE}:latest,${IMAGE}:v${{ steps.date.outputs.date }}" - echo ::set-output name=tags::${TAGS} - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v3 - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Test cargo - uses: docker/build-push-action@v5 - with: - context: . - file: Dockerfile.ci - tags: ${{ steps.prep.outputs.tags }} - target: test-cargo - cache-from: type=gha - cache-to: type=gha,mode=max \ No newline at end of file diff --git a/noir/.github/workflows/test-js-packages.yml b/noir/.github/workflows/test-js-packages.yml index a298d67a485e..e2ffc5087bef 100644 --- a/noir/.github/workflows/test-js-packages.yml +++ b/noir/.github/workflows/test-js-packages.yml @@ -319,7 +319,7 @@ jobs: run: yarn workspace @noir-lang/noir_wasm test:browser test-noir-codegen: - needs: [build-acvm-js, build-noirc-abi] + needs: [build-acvm-js, build-noirc-abi, build-nargo] name: noir_codegen runs-on: ubuntu-latest timeout-minutes: 30 @@ -328,6 +328,12 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - name: Download nargo binary + uses: actions/download-artifact@v3 + with: + name: nargo + path: ./nargo + - name: Download acvm_js package artifact uses: actions/download-artifact@v3 with: @@ -339,6 +345,14 @@ jobs: with: name: noirc_abi_wasm path: ./tooling/noirc_abi_wasm + + - name: Set nargo on PATH + run: | + nargo_binary="${{ github.workspace }}/nargo/nargo" + chmod +x $nargo_binary + echo "$(dirname $nargo_binary)" >> $GITHUB_PATH + export PATH="$PATH:$(dirname $nargo_binary)" + nargo -V - name: Install Yarn dependencies uses: ./.github/actions/setup diff --git a/noir/.gitrepo b/noir/.gitrepo index 97b5260ea5d9..053acae1f4f2 100644 --- a/noir/.gitrepo +++ b/noir/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/noir-lang/noir branch = aztec-packages - commit = c0826b58272bbba28d6cd25bc07dd5de3cc12e61 + commit = 114aa6f90f147fe69ff424e881463a65df26c4e6 parent = 9a80008c623a9d26e1b82c9e86561c304ef185f1 method = merge cmdver = 0.4.6 diff --git a/noir/Cargo.lock b/noir/Cargo.lock index 0334ee5f5526..50892d98ff3f 100644 --- a/noir/Cargo.lock +++ b/noir/Cargo.lock @@ -2682,7 +2682,6 @@ dependencies = [ "assert_fs", "async-lsp", "backend-interface", - "bb_abstraction_leaks", "bn254_blackbox_solver", "build-data", "clap", @@ -2827,7 +2826,6 @@ dependencies = [ "noirc_printable_type", "owo-colors", "rexpect", - "rustc_version", "serde_json", "tempfile", "test-binary", diff --git a/noir/acvm-repo/acir/src/circuit/black_box_functions.rs b/noir/acvm-repo/acir/src/circuit/black_box_functions.rs index 78bc4121de7d..0dcfb3bc9f1e 100644 --- a/noir/acvm-repo/acir/src/circuit/black_box_functions.rs +++ b/noir/acvm-repo/acir/src/circuit/black_box_functions.rs @@ -74,7 +74,7 @@ impl BlackBoxFunc { BlackBoxFunc::XOR => "xor", BlackBoxFunc::RANGE => "range", BlackBoxFunc::Keccak256 => "keccak256", - BlackBoxFunc::Keccakf1600 => "keccak_f1600", + BlackBoxFunc::Keccakf1600 => "keccakf1600", BlackBoxFunc::RecursiveAggregation => "recursive_aggregation", BlackBoxFunc::EcdsaSecp256r1 => "ecdsa_secp256r1", } diff --git a/noir/acvm-repo/acir/src/circuit/mod.rs b/noir/acvm-repo/acir/src/circuit/mod.rs index 48c6b14ce695..b248b30b1d96 100644 --- a/noir/acvm-repo/acir/src/circuit/mod.rs +++ b/noir/acvm-repo/acir/src/circuit/mod.rs @@ -99,7 +99,7 @@ impl FromStr for OpcodeLocation { let brillig_index = parts[1].parse()?; Ok(OpcodeLocation::Brillig { acir_index, brillig_index }) } - _ => unreachable!(), + _ => unreachable!("`OpcodeLocation` has too many components"), } } diff --git a/noir/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs b/noir/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs index 2ac4d558b68e..7ee4e2498a5f 100644 --- a/noir/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs +++ b/noir/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs @@ -163,8 +163,12 @@ impl BlackBoxFuncCall { vec![*lhs, *rhs] } BlackBoxFuncCall::FixedBaseScalarMul { low, high, .. } => vec![*low, *high], - BlackBoxFuncCall::EmbeddedCurveAdd { input1_x, input1_y, input2_x, input2_y, .. } => vec![*input1_x, *input1_y, *input2_x, *input2_y], - BlackBoxFuncCall::EmbeddedCurveDouble { input_x, input_y, .. } => vec![*input_x, *input_y], + BlackBoxFuncCall::EmbeddedCurveAdd { + input1_x, input1_y, input2_x, input2_y, .. + } => vec![*input1_x, *input1_y, *input2_x, *input2_y], + BlackBoxFuncCall::EmbeddedCurveDouble { input_x, input_y, .. } => { + vec![*input_x, *input_y] + } BlackBoxFuncCall::RANGE { input } => vec![*input], BlackBoxFuncCall::SchnorrVerify { public_key_x, @@ -245,8 +249,7 @@ impl BlackBoxFuncCall { | BlackBoxFuncCall::Blake2s { outputs, .. } | BlackBoxFuncCall::Blake3 { outputs, .. } | BlackBoxFuncCall::Keccak256 { outputs, .. } - | BlackBoxFuncCall::Keccakf1600 { outputs, .. } - => outputs.to_vec(), + | BlackBoxFuncCall::Keccakf1600 { outputs, .. } => outputs.to_vec(), BlackBoxFuncCall::AND { output, .. } | BlackBoxFuncCall::XOR { output, .. } | BlackBoxFuncCall::SchnorrVerify { output, .. } @@ -257,7 +260,9 @@ impl BlackBoxFuncCall { | BlackBoxFuncCall::PedersenCommitment { outputs, .. } | BlackBoxFuncCall::EmbeddedCurveAdd { outputs, .. } | BlackBoxFuncCall::EmbeddedCurveDouble { outputs, .. } => vec![outputs.0, outputs.1], - BlackBoxFuncCall::RANGE { .. } | BlackBoxFuncCall::RecursiveAggregation { .. } => vec![], + BlackBoxFuncCall::RANGE { .. } | BlackBoxFuncCall::RecursiveAggregation { .. } => { + vec![] + } BlackBoxFuncCall::Keccak256VariableLength { outputs, .. } => outputs.to_vec(), } } diff --git a/noir/acvm-repo/acir/tests/test_program_serialization.rs b/noir/acvm-repo/acir/tests/test_program_serialization.rs index 1f25b6655736..7d3b7b32d35e 100644 --- a/noir/acvm-repo/acir/tests/test_program_serialization.rs +++ b/noir/acvm-repo/acir/tests/test_program_serialization.rs @@ -102,9 +102,9 @@ fn pedersen_circuit() { let bytes = Circuit::serialize_circuit(&circuit); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 138, 9, 10, 0, 64, 8, 2, 103, 15, 250, 255, 139, - 163, 162, 130, 72, 16, 149, 241, 3, 135, 84, 164, 172, 173, 213, 175, 251, 45, 198, 96, - 243, 211, 50, 152, 67, 220, 211, 92, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 138, 9, 10, 0, 64, 8, 2, 103, 15, 232, 255, 31, 142, + 138, 10, 34, 65, 84, 198, 15, 28, 82, 145, 178, 182, 86, 191, 238, 183, 24, 131, 205, 79, + 203, 0, 166, 242, 158, 93, 92, 0, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -145,7 +145,7 @@ fn schnorr_verify_circuit() { let expected_serialization: Vec = vec![ 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 210, 87, 78, 2, 1, 20, 134, 209, 177, 247, 222, 123, 71, 68, 68, 68, 68, 68, 68, 68, 68, 68, 221, 133, 251, 95, 130, 145, 27, 206, 36, 78, 50, - 57, 16, 94, 200, 253, 191, 159, 36, 73, 134, 146, 193, 19, 142, 241, 183, 255, 14, 179, + 57, 16, 94, 200, 253, 191, 159, 36, 73, 134, 146, 193, 19, 142, 243, 183, 255, 14, 179, 233, 247, 145, 254, 59, 217, 127, 71, 57, 198, 113, 78, 48, 125, 167, 56, 205, 25, 206, 114, 142, 243, 92, 224, 34, 151, 184, 204, 21, 174, 114, 141, 235, 220, 224, 38, 183, 184, 205, 29, 238, 114, 143, 251, 60, 224, 33, 143, 120, 204, 19, 158, 242, 140, 25, 158, 51, @@ -158,7 +158,7 @@ fn schnorr_verify_circuit() { 91, 159, 218, 56, 99, 219, 172, 77, 115, 182, 204, 219, 176, 96, 187, 162, 205, 74, 182, 42, 219, 168, 98, 155, 170, 77, 106, 182, 168, 219, 160, 225, 246, 77, 55, 111, 185, 113, 219, 109, 59, 110, 218, 117, 203, 158, 27, 166, 55, 75, 239, 150, 184, 101, 250, 252, 1, - 19, 89, 159, 101, 220, 3, 0, 0, + 55, 204, 92, 74, 220, 3, 0, 0, ]; assert_eq!(bytes, expected_serialization) diff --git a/noir/acvm-repo/acvm/src/compiler/transformers/mod.rs b/noir/acvm-repo/acvm/src/compiler/transformers/mod.rs index bb4dd9e66b2b..99f1ca23b5bc 100644 --- a/noir/acvm-repo/acvm/src/compiler/transformers/mod.rs +++ b/noir/acvm-repo/acvm/src/compiler/transformers/mod.rs @@ -104,7 +104,8 @@ pub(super) fn transform_internal( | acir::circuit::opcodes::BlackBoxFuncCall::XOR { output, .. } => { transformer.mark_solvable(*output); } - acir::circuit::opcodes::BlackBoxFuncCall::RANGE { .. } | acir::circuit::opcodes::BlackBoxFuncCall::RecursiveAggregation { .. } => (), + acir::circuit::opcodes::BlackBoxFuncCall::RANGE { .. } + | acir::circuit::opcodes::BlackBoxFuncCall::RecursiveAggregation { .. } => (), acir::circuit::opcodes::BlackBoxFuncCall::SHA256 { outputs, .. } | acir::circuit::opcodes::BlackBoxFuncCall::Keccak256 { outputs, .. } | acir::circuit::opcodes::BlackBoxFuncCall::Keccak256VariableLength { diff --git a/noir/acvm-repo/acvm/src/pwg/blackbox/mod.rs b/noir/acvm-repo/acvm/src/pwg/blackbox/mod.rs index db58795c8815..eb16c984b00a 100644 --- a/noir/acvm-repo/acvm/src/pwg/blackbox/mod.rs +++ b/noir/acvm-repo/acvm/src/pwg/blackbox/mod.rs @@ -177,10 +177,10 @@ pub(crate) fn solve( BlackBoxFuncCall::FixedBaseScalarMul { low, high, outputs } => { fixed_base_scalar_mul(backend, initial_witness, *low, *high, *outputs) } - BlackBoxFuncCall::EmbeddedCurveAdd { outputs, input1_x, input1_y, input2_x, input2_y } => { + BlackBoxFuncCall::EmbeddedCurveAdd { .. } => { todo!(); } - BlackBoxFuncCall::EmbeddedCurveDouble { outputs, input_x, input_y } => { + BlackBoxFuncCall::EmbeddedCurveDouble { .. } => { todo!(); } // Recursive aggregation will be entirely handled by the backend and is not solved by the ACVM diff --git a/noir/acvm-repo/acvm_js/src/execute.rs b/noir/acvm-repo/acvm_js/src/execute.rs index 439a929cc981..3f691e1abf29 100644 --- a/noir/acvm-repo/acvm_js/src/execute.rs +++ b/noir/acvm-repo/acvm_js/src/execute.rs @@ -61,8 +61,8 @@ pub async fn execute_circuit_with_black_box_solver( foreign_call_handler: ForeignCallHandler, ) -> Result { console_error_panic_hook::set_once(); - let circuit: Circuit = - Circuit::deserialize_circuit(&circuit).expect("Failed to deserialize circuit"); + let circuit: Circuit = Circuit::deserialize_circuit(&circuit) + .map_err(|_| JsExecutionError::new("Failed to deserialize circuit. This is likely due to differing serialization formats between ACVM_JS and your compiler".to_string(), None))?; let mut acvm = ACVM::new(&solver.0, &circuit.opcodes, initial_witness.into()); diff --git a/noir/acvm-repo/acvm_js/test/shared/pedersen.ts b/noir/acvm-repo/acvm_js/test/shared/pedersen.ts index 668ee2b510b8..e35893fc3552 100644 --- a/noir/acvm-repo/acvm_js/test/shared/pedersen.ts +++ b/noir/acvm-repo/acvm_js/test/shared/pedersen.ts @@ -1,7 +1,7 @@ // See `pedersen_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 138, 9, 10, 0, 64, 8, 2, 103, 15, 250, 255, 139, 163, 162, 130, 72, 16, 149, - 241, 3, 135, 84, 164, 172, 173, 213, 175, 251, 45, 198, 96, 243, 211, 50, 152, 67, 220, 211, 92, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 138, 9, 10, 0, 64, 8, 2, 103, 15, 232, 255, 31, 142, 138, 10, 34, 65, 84, 198, + 15, 28, 82, 145, 178, 182, 86, 191, 238, 183, 24, 131, 205, 79, 203, 0, 166, 242, 158, 93, 92, 0, 0, 0, ]); export const initialWitnessMap = new Map([[1, '0x0000000000000000000000000000000000000000000000000000000000000001']]); diff --git a/noir/acvm-repo/acvm_js/test/shared/schnorr_verify.ts b/noir/acvm-repo/acvm_js/test/shared/schnorr_verify.ts index f88a70ba4a10..5716cbd30f84 100644 --- a/noir/acvm-repo/acvm_js/test/shared/schnorr_verify.ts +++ b/noir/acvm-repo/acvm_js/test/shared/schnorr_verify.ts @@ -2,7 +2,7 @@ export const bytecode = Uint8Array.from([ 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 210, 87, 78, 2, 1, 20, 134, 209, 177, 247, 222, 123, 71, 68, 68, 68, 68, 68, 68, 68, 68, 68, 221, 133, 251, 95, 130, 145, 27, 206, 36, 78, 50, 57, 16, 94, 200, 253, 191, 159, 36, 73, 134, 146, - 193, 19, 142, 241, 183, 255, 14, 179, 233, 247, 145, 254, 59, 217, 127, 71, 57, 198, 113, 78, 48, 125, 167, 56, 205, + 193, 19, 142, 243, 183, 255, 14, 179, 233, 247, 145, 254, 59, 217, 127, 71, 57, 198, 113, 78, 48, 125, 167, 56, 205, 25, 206, 114, 142, 243, 92, 224, 34, 151, 184, 204, 21, 174, 114, 141, 235, 220, 224, 38, 183, 184, 205, 29, 238, 114, 143, 251, 60, 224, 33, 143, 120, 204, 19, 158, 242, 140, 25, 158, 51, 203, 11, 230, 120, 201, 60, 175, 88, 224, 53, 139, 188, 97, 137, 183, 44, 243, 142, 21, 222, 179, 202, 7, 214, 248, 200, 58, 159, 216, 224, 51, 155, 124, 97, 235, @@ -11,7 +11,7 @@ export const bytecode = Uint8Array.from([ 162, 149, 232, 36, 26, 137, 62, 162, 141, 232, 34, 154, 136, 30, 162, 133, 232, 32, 26, 136, 253, 99, 251, 195, 100, 176, 121, 236, 29, 91, 159, 218, 56, 99, 219, 172, 77, 115, 182, 204, 219, 176, 96, 187, 162, 205, 74, 182, 42, 219, 168, 98, 155, 170, 77, 106, 182, 168, 219, 160, 225, 246, 77, 55, 111, 185, 113, 219, 109, 59, 110, 218, 117, 203, - 158, 27, 166, 55, 75, 239, 150, 184, 101, 250, 252, 1, 19, 89, 159, 101, 220, 3, 0, 0, + 158, 27, 166, 55, 75, 239, 150, 184, 101, 250, 252, 1, 55, 204, 92, 74, 220, 3, 0, 0, ]); export const initialWitnessMap = new Map([ diff --git a/noir/acvm-repo/brillig_vm/src/black_box.rs b/noir/acvm-repo/brillig_vm/src/black_box.rs index 1b67e52f9b0b..012d527e44ed 100644 --- a/noir/acvm-repo/brillig_vm/src/black_box.rs +++ b/noir/acvm-repo/brillig_vm/src/black_box.rs @@ -78,11 +78,7 @@ pub(crate) fn evaluate_black_box( signature, result: result_register, } => { - let bb_func = match op { - BlackBoxOp::EcdsaSecp256k1 { .. } => BlackBoxFunc::EcdsaSecp256k1, - BlackBoxOp::EcdsaSecp256r1 { .. } => BlackBoxFunc::EcdsaSecp256r1, - _ => unreachable!(), - }; + let bb_func = black_box_function_from_op(op); let public_key_x: [u8; 32] = to_u8_vec(read_heap_array( memory, @@ -117,7 +113,7 @@ pub(crate) fn evaluate_black_box( BlackBoxOp::EcdsaSecp256r1 { .. } => { ecdsa_secp256r1_verify(&hashed_msg, &public_key_x, &public_key_y, &signature)? } - _ => unreachable!(), + _ => unreachable!("`BlackBoxOp` is guarded against being a non-ecdsa operation"), }; registers.set(*result_register, result.into()); @@ -187,6 +183,22 @@ pub(crate) fn evaluate_black_box( } } +fn black_box_function_from_op(op: &BlackBoxOp) -> BlackBoxFunc { + match op { + BlackBoxOp::Sha256 { .. } => BlackBoxFunc::SHA256, + BlackBoxOp::Blake2s { .. } => BlackBoxFunc::Blake2s, + BlackBoxOp::Keccak256 { .. } => BlackBoxFunc::Keccak256, + BlackBoxOp::EcdsaSecp256k1 { .. } => BlackBoxFunc::EcdsaSecp256k1, + BlackBoxOp::EcdsaSecp256r1 { .. } => BlackBoxFunc::EcdsaSecp256r1, + BlackBoxOp::SchnorrVerify { .. } => BlackBoxFunc::SchnorrVerify, + BlackBoxOp::PedersenCommitment { .. } => BlackBoxFunc::PedersenCommitment, + BlackBoxOp::PedersenHash { .. } => BlackBoxFunc::PedersenHash, + BlackBoxOp::FixedBaseScalarMul { .. } => BlackBoxFunc::FixedBaseScalarMul, + BlackBoxOp::EmbeddedCurveAdd { .. } => BlackBoxFunc::EmbeddedCurveAdd, + BlackBoxOp::EmbeddedCurveDouble { .. } => BlackBoxFunc::EmbeddedCurveDouble, + } +} + #[cfg(test)] mod test { use acir::brillig::BlackBoxOp; diff --git a/noir/compiler/integration-tests/circuits/recursion/src/main.nr b/noir/compiler/integration-tests/circuits/recursion/src/main.nr index e60e4e0b61a1..173207766fb9 100644 --- a/noir/compiler/integration-tests/circuits/recursion/src/main.nr +++ b/noir/compiler/integration-tests/circuits/recursion/src/main.nr @@ -1,17 +1,15 @@ use dep::std; fn main( - verification_key : [Field; 114], - proof : [Field; 94], - public_inputs : [Field; 1], - key_hash : Field, -) -> pub [Field;16]{ - let input_aggregation_object = [0; 16]; + verification_key: [Field; 114], + proof: [Field; 93], + public_inputs: [Field; 1], + key_hash: Field +) { std::verify_proof( - verification_key.as_slice(), - proof.as_slice(), - public_inputs.as_slice(), - key_hash, - input_aggregation_object + verification_key.as_slice(), + proof.as_slice(), + public_inputs.as_slice(), + key_hash ) } diff --git a/noir/compiler/integration-tests/test/browser/recursion.test.ts b/noir/compiler/integration-tests/test/browser/recursion.test.ts index faa317b2c3ce..2097164ebcb8 100644 --- a/noir/compiler/integration-tests/test/browser/recursion.test.ts +++ b/noir/compiler/integration-tests/test/browser/recursion.test.ts @@ -79,7 +79,6 @@ describe('It compiles noir program code, receiving circuit bytes and abi object. proof: proofAsFields, public_inputs: [main_inputs.y as Field], key_hash: vkHash, - input_aggregation_object: ['0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0'], }; logger.debug('recursion_inputs', recursion_inputs); diff --git a/noir/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts b/noir/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts index 6c20d44882b6..7a25669d22a4 100644 --- a/noir/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts +++ b/noir/compiler/integration-tests/test/node/onchain_recursive_verification.test.ts @@ -12,7 +12,7 @@ import { PathToFileSourceMap, } from '@noir-lang/noir_wasm'; import { Noir } from '@noir-lang/noir_js'; -import { BarretenbergBackend, flattenPublicInputs } from '@noir-lang/backend_barretenberg'; +import { BarretenbergBackend } from '@noir-lang/backend_barretenberg'; import { Field, InputMap } from '@noir-lang/noirc_abi'; compilerLogLevel('INFO'); @@ -71,10 +71,7 @@ it(`smart contract can verify a recursive proof`, async () => { const contract = await ethers.deployContract('contracts/recursion.sol:UltraVerifier', []); - const result = await contract.verify.staticCall( - recursion_proof.proof, - flattenPublicInputs(recursion_proof.publicInputs), - ); + const result = await contract.verify.staticCall(recursion_proof.proof, recursion_proof.publicInputs); expect(result).to.be.true; }); diff --git a/noir/compiler/integration-tests/test/node/smart_contract_verifier.test.ts b/noir/compiler/integration-tests/test/node/smart_contract_verifier.test.ts index 5b3d0e2d3370..a93a2fc15088 100644 --- a/noir/compiler/integration-tests/test/node/smart_contract_verifier.test.ts +++ b/noir/compiler/integration-tests/test/node/smart_contract_verifier.test.ts @@ -7,7 +7,7 @@ import toml from 'toml'; import { PathToFileSourceMap, compile, init_log_level as compilerLogLevel } from '@noir-lang/noir_wasm'; import { Noir } from '@noir-lang/noir_js'; -import { BarretenbergBackend, flattenPublicInputs } from '@noir-lang/backend_barretenberg'; +import { BarretenbergBackend } from '@noir-lang/backend_barretenberg'; compilerLogLevel('INFO'); @@ -61,7 +61,7 @@ test_cases.forEach((testInfo) => { const contract = await ethers.deployContract(testInfo.compiled, []); - const result = await contract.verify(proofData.proof, flattenPublicInputs(proofData.publicInputs)); + const result = await contract.verify(proofData.proof, proofData.publicInputs); expect(result).to.be.true; }); diff --git a/noir/compiler/noirc_driver/src/abi_gen.rs b/noir/compiler/noirc_driver/src/abi_gen.rs index 9d0d64b63006..e546cd822b70 100644 --- a/noir/compiler/noirc_driver/src/abi_gen.rs +++ b/noir/compiler/noirc_driver/src/abi_gen.rs @@ -33,7 +33,7 @@ pub(super) fn compute_function_abi( ) -> (Vec, Option) { let func_meta = context.def_interner.function_meta(func_id); - let (parameters, return_type) = func_meta.into_function_signature(); + let (parameters, return_type) = func_meta.function_signature(); let parameters = into_abi_params(context, parameters); let return_type = return_type.map(|typ| AbiType::from_type(context, &typ)); (parameters, return_type) diff --git a/noir/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs b/noir/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs index 04152127660e..336e88da48a0 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs @@ -238,14 +238,12 @@ impl GeneratedAcir { BlackBoxFunc::Keccakf1600 => { BlackBoxFuncCall::Keccakf1600 { inputs: inputs[0].clone(), outputs } } - BlackBoxFunc::RecursiveAggregation => { - BlackBoxFuncCall::RecursiveAggregation { - verification_key: inputs[0].clone(), - proof: inputs[1].clone(), - public_inputs: inputs[2].clone(), - key_hash: inputs[3][0], - } - } + BlackBoxFunc::RecursiveAggregation => BlackBoxFuncCall::RecursiveAggregation { + verification_key: inputs[0].clone(), + proof: inputs[1].clone(), + public_inputs: inputs[2].clone(), + key_hash: inputs[3][0], + }, }; self.push_opcode(AcirOpcode::BlackBoxFuncCall(black_box_func_call)); diff --git a/noir/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/noir/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs index edf0461430f5..a1c96a3cd232 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs @@ -472,9 +472,9 @@ impl Context { self.acir_context.assert_eq_var(lhs, rhs, assert_message.clone())?; } } - Instruction::Cast(value_id, typ) => { - let result_acir_var = self.convert_ssa_cast(value_id, typ, dfg)?; - self.define_result_var(dfg, instruction_id, result_acir_var); + Instruction::Cast(value_id, _) => { + let acir_var = self.convert_numeric_value(*value_id, dfg)?; + self.define_result_var(dfg, instruction_id, acir_var); } Instruction::Call { func, arguments } => { let result_ids = dfg.instruction_results(instruction_id); @@ -1636,41 +1636,6 @@ impl Context { } } - /// Returns an `AcirVar` that is constrained to fit in the target type by truncating the input. - /// If the target cast is to a `NativeField`, no truncation is required so the cast becomes a - /// no-op. - fn convert_ssa_cast( - &mut self, - value_id: &ValueId, - typ: &Type, - dfg: &DataFlowGraph, - ) -> Result { - let (variable, incoming_type) = match self.convert_value(*value_id, dfg) { - AcirValue::Var(variable, typ) => (variable, typ), - AcirValue::DynamicArray(_) | AcirValue::Array(_) => { - unreachable!("Cast is only applied to numerics") - } - }; - let target_numeric = match typ { - Type::Numeric(numeric) => numeric, - _ => unreachable!("Can only cast to a numeric"), - }; - match target_numeric { - NumericType::NativeField => { - // Casting into a Field as a no-op - Ok(variable) - } - NumericType::Unsigned { bit_size } | NumericType::Signed { bit_size } => { - let max_bit_size = incoming_type.bit_size(); - if max_bit_size <= *bit_size { - // Incoming variable already fits into target bit size - this is a no-op - return Ok(variable); - } - self.acir_context.truncate_var(variable, *bit_size, max_bit_size) - } - } - } - /// Returns an `AcirVar`that is constrained to be result of the truncation. fn convert_ssa_truncate( &mut self, diff --git a/noir/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs b/noir/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs index 7f3370893215..58d9e0f6d157 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/function_builder/data_bus.rs @@ -84,7 +84,6 @@ impl FunctionBuilder { databus.values.push_back(value); databus.index += 1; } - Type::Reference(_) => unreachable!(), Type::Array(typ, len) => { assert!(typ.len() == 1, "unsupported composite type"); databus.map.insert(value, databus.index); @@ -98,8 +97,11 @@ impl FunctionBuilder { self.add_to_data_bus(element, databus); } } - Type::Slice(_) => unreachable!(), - Type::Function => unreachable!(), + Type::Reference(_) => { + unreachable!("Attempted to add invalid type (reference) to databus") + } + Type::Slice(_) => unreachable!("Attempted to add invalid type (slice) to databus"), + Type::Function => unreachable!("Attempted to add invalid type (function) to databus"), } } diff --git a/noir/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/noir/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index b972afa29903..2871f149b41d 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -255,6 +255,19 @@ impl FunctionBuilder { self.insert_instruction(Instruction::Constrain(lhs, rhs, assert_message), None); } + /// Insert a [`Instruction::RangeCheck`] instruction at the end of the current block. + pub(crate) fn insert_range_check( + &mut self, + value: ValueId, + max_bit_size: u32, + assert_message: Option, + ) { + self.insert_instruction( + Instruction::RangeCheck { value, max_bit_size, assert_message }, + None, + ); + } + /// Insert a call instruction at the end of the current block and return /// the results of the call. pub(crate) fn insert_call( diff --git a/noir/compiler/noirc_evaluator/src/ssa/ir/dfg.rs b/noir/compiler/noirc_evaluator/src/ssa/ir/dfg.rs index abddbfb74c7d..931aee9d079c 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/ir/dfg.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/ir/dfg.rs @@ -160,7 +160,7 @@ impl DataFlowGraph { call_stack: CallStack, ) -> InsertInstructionResult { use InsertInstructionResult::*; - match instruction.simplify(self, block, ctrl_typevars.clone()) { + match instruction.simplify(self, block, ctrl_typevars.clone(), &call_stack) { SimplifyResult::SimplifiedTo(simplification) => SimplifiedTo(simplification), SimplifyResult::SimplifiedToMultiple(simplification) => { SimplifiedToMultiple(simplification) diff --git a/noir/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/noir/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index 628ad638e64f..9691017f04bb 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -236,10 +236,7 @@ impl Instruction { // In ACIR, a division with a false predicate outputs (0,0), so it cannot replace another instruction unless they have the same predicate bin.operator != BinaryOp::Div } - Cast(_, _) | Not(_) | ArrayGet { .. } | ArraySet { .. } => true, - - // Unclear why this instruction causes problems. - Truncate { .. } => false, + Cast(_, _) | Truncate { .. } | Not(_) | ArrayGet { .. } | ArraySet { .. } => true, // These either have side-effects or interact with memory Constrain(..) @@ -408,6 +405,7 @@ impl Instruction { dfg: &mut DataFlowGraph, block: BasicBlockId, ctrl_typevars: Option>, + call_stack: &CallStack, ) -> SimplifyResult { use SimplifyResult::*; match self { @@ -551,7 +549,7 @@ impl Instruction { } } Instruction::Call { func, arguments } => { - simplify_call(*func, arguments, dfg, block, ctrl_typevars) + simplify_call(*func, arguments, dfg, block, ctrl_typevars, call_stack) } Instruction::EnableSideEffects { condition } => { if let Some(last) = dfg[block].instructions().last().copied() { diff --git a/noir/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs b/noir/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs index 5b3a1669bfff..86e855b2cc05 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/ir/instruction/call.rs @@ -31,6 +31,7 @@ pub(super) fn simplify_call( dfg: &mut DataFlowGraph, block: BasicBlockId, ctrl_typevars: Option>, + call_stack: &CallStack, ) -> SimplifyResult { let intrinsic = match &dfg[func] { Value::Intrinsic(intrinsic) => *intrinsic, @@ -242,7 +243,24 @@ pub(super) fn simplify_call( SimplifyResult::SimplifiedToInstruction(instruction) } Intrinsic::FromField => { - let instruction = Instruction::Cast(arguments[0], ctrl_typevars.unwrap().remove(0)); + let incoming_type = Type::field(); + let target_type = ctrl_typevars.unwrap().remove(0); + + let truncate = Instruction::Truncate { + value: arguments[0], + bit_size: target_type.bit_size(), + max_bit_size: incoming_type.bit_size(), + }; + let truncated_value = dfg + .insert_instruction_and_results( + truncate, + block, + Some(vec![incoming_type]), + call_stack.clone(), + ) + .first(); + + let instruction = Instruction::Cast(truncated_value, target_type); SimplifyResult::SimplifiedToInstruction(instruction) } } diff --git a/noir/compiler/noirc_evaluator/src/ssa/ir/types.rs b/noir/compiler/noirc_evaluator/src/ssa/ir/types.rs index bae06a805d05..ae53c7705c25 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/ir/types.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/ir/types.rs @@ -18,6 +18,28 @@ pub enum NumericType { NativeField, } +impl NumericType { + /// Returns the bit size of the provided numeric type. + pub(crate) fn bit_size(self: &NumericType) -> u32 { + match self { + NumericType::NativeField => FieldElement::max_num_bits(), + NumericType::Unsigned { bit_size } | NumericType::Signed { bit_size } => *bit_size, + } + } + + /// Returns true if the given Field value is within the numeric limits + /// for the current NumericType. + pub(crate) fn value_is_within_limits(self, field: FieldElement) -> bool { + match self { + NumericType::Signed { bit_size } | NumericType::Unsigned { bit_size } => { + let max = 2u128.pow(bit_size) - 1; + field <= max.into() + } + NumericType::NativeField => true, + } + } +} + /// All types representable in the IR. #[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub(crate) enum Type { @@ -68,6 +90,18 @@ impl Type { Type::Numeric(NumericType::NativeField) } + /// Returns the bit size of the provided numeric type. + /// + /// # Panics + /// + /// Panics if `self` is not a [`Type::Numeric`] + pub(crate) fn bit_size(&self) -> u32 { + match self { + Type::Numeric(numeric_type) => numeric_type.bit_size(), + other => panic!("bit_size: Expected numeric type, found {other}"), + } + } + /// Returns the size of the element type for this array/slice. /// The size of a type is defined as representing how many Fields are needed /// to represent the type. This is 1 for every primitive type, and is the number of fields @@ -122,20 +156,6 @@ impl Type { } } -impl NumericType { - /// Returns true if the given Field value is within the numeric limits - /// for the current NumericType. - pub(crate) fn value_is_within_limits(self, field: FieldElement) -> bool { - match self { - NumericType::Signed { bit_size } | NumericType::Unsigned { bit_size } => { - let max = 2u128.pow(bit_size) - 1; - field <= max.into() - } - NumericType::NativeField => true, - } - } -} - /// Composite Types are essentially flattened struct or tuple types. /// Array types may have these as elements where each flattened field is /// included in the array sequentially. diff --git a/noir/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs b/noir/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs index e944d7d99d88..7d345a9a4abd 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs @@ -187,7 +187,7 @@ mod test { instruction::{BinaryOp, Instruction, TerminatorInstruction}, map::Id, types::Type, - value::{Value, ValueId}, + value::Value, }, }; @@ -280,11 +280,11 @@ mod test { let return_value_id = match entry_block.unwrap_terminator() { TerminatorInstruction::Return { return_values, .. } => return_values[0], - _ => unreachable!(), + _ => unreachable!("Should have terminator instruction"), }; let return_element = match &main.dfg[return_value_id] { Value::Array { array, .. } => array[0], - _ => unreachable!(), + _ => unreachable!("Return type should be array"), }; // The return element is expected to refer to the new add instruction result. assert_eq!(main.dfg.resolve(new_add_instr_result), main.dfg.resolve(return_element)); @@ -293,7 +293,7 @@ mod test { #[test] fn instruction_deduplication() { // fn main f0 { - // b0(v0: Field): + // b0(v0: u16): // v1 = cast v0 as u32 // v2 = cast v0 as u32 // constrain v1 v2 @@ -308,7 +308,7 @@ mod test { // Compiling main let mut builder = FunctionBuilder::new("main".into(), main_id, RuntimeType::Acir); - let v0 = builder.add_parameter(Type::field()); + let v0 = builder.add_parameter(Type::unsigned(16)); let v1 = builder.insert_cast(v0, Type::unsigned(32)); let v2 = builder.insert_cast(v0, Type::unsigned(32)); @@ -322,7 +322,7 @@ mod test { // Expected output: // // fn main f0 { - // b0(v0: Field): + // b0(v0: u16): // v1 = cast v0 as u32 // } let ssa = ssa.fold_constants(); @@ -332,6 +332,6 @@ mod test { assert_eq!(instructions.len(), 1); let instruction = &main.dfg[instructions[0]]; - assert_eq!(instruction, &Instruction::Cast(ValueId::test_new(0), Type::unsigned(32))); + assert_eq!(instruction, &Instruction::Cast(v0, Type::unsigned(32))); } } diff --git a/noir/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs b/noir/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs index fdd7c66684cb..6bdf2ab1c0af 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/opt/flatten_cfg.rs @@ -1102,7 +1102,7 @@ mod test { let main = ssa.main(); let ret = match main.dfg[main.entry_block()].terminator() { Some(TerminatorInstruction::Return { return_values, .. }) => return_values[0], - _ => unreachable!(), + _ => unreachable!("Should have terminator instruction"), }; let merged_values = get_all_constants_reachable_from_instruction(&main.dfg, ret); @@ -1473,7 +1473,7 @@ mod test { None => unreachable!("Expected constant 200 for return value"), } } - _ => unreachable!(), + _ => unreachable!("Should have terminator instruction"), } } } diff --git a/noir/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs b/noir/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs index 5724bf56e8e8..b34b667c31a7 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/ssa_gen/context.rs @@ -275,6 +275,8 @@ impl<'a> FunctionContext<'a> { let bit_width = self.builder.numeric_constant(FieldElement::from(2_i128.pow(bit_size)), Type::field()); let sign_not = self.builder.insert_binary(one, BinaryOp::Sub, sign); + + // We use unsafe casts here, this is fine as we're casting to a `field` type. let as_field = self.builder.insert_cast(input, Type::field()); let sign_field = self.builder.insert_cast(sign, Type::field()); let positive_predicate = self.builder.insert_binary(sign_field, BinaryOp::Mul, as_field); @@ -310,12 +312,12 @@ impl<'a> FunctionContext<'a> { match operator { BinaryOpKind::Add | BinaryOpKind::Subtract => { // Result is computed modulo the bit size - let mut result = - self.builder.insert_truncate(result, bit_size, bit_size + 1); - result = self.builder.insert_cast(result, Type::unsigned(bit_size)); + let result = self.builder.insert_truncate(result, bit_size, bit_size + 1); + let result = + self.insert_safe_cast(result, Type::unsigned(bit_size), location); self.check_signed_overflow(result, lhs, rhs, operator, bit_size, location); - self.builder.insert_cast(result, result_type) + self.insert_safe_cast(result, result_type, location) } BinaryOpKind::Multiply => { // Result is computed modulo the bit size @@ -324,7 +326,7 @@ impl<'a> FunctionContext<'a> { result = self.builder.insert_truncate(result, bit_size, 2 * bit_size); self.check_signed_overflow(result, lhs, rhs, operator, bit_size, location); - self.builder.insert_cast(result, result_type) + self.insert_safe_cast(result, result_type, location) } BinaryOpKind::ShiftLeft | BinaryOpKind::ShiftRight => { self.check_shift_overflow(result, rhs, bit_size, location, true) @@ -349,12 +351,11 @@ impl<'a> FunctionContext<'a> { self.check_shift_overflow(result, rhs, bit_size, location, false) } else { let message = format!("attempt to {} with overflow", op_name); - let range_constraint = Instruction::RangeCheck { - value: result, - max_bit_size: bit_size, - assert_message: Some(message), - }; - self.builder.set_location(location).insert_instruction(range_constraint, None); + self.builder.set_location(location).insert_range_check( + result, + bit_size, + Some(message), + ); result } } @@ -375,8 +376,11 @@ impl<'a> FunctionContext<'a> { is_signed: bool, ) -> ValueId { let one = self.builder.numeric_constant(FieldElement::one(), Type::bool()); - let rhs = - if is_signed { self.builder.insert_cast(rhs, Type::unsigned(bit_size)) } else { rhs }; + let rhs = if is_signed { + self.insert_safe_cast(rhs, Type::unsigned(bit_size), location) + } else { + rhs + }; // Bit-shift with a negative number is an overflow if is_signed { // We compute the sign of rhs. @@ -432,8 +436,8 @@ impl<'a> FunctionContext<'a> { Type::unsigned(bit_size), ); // We compute the sign of the operands. The overflow checks for signed integers depends on these signs - let lhs_as_unsigned = self.builder.insert_cast(lhs, Type::unsigned(bit_size)); - let rhs_as_unsigned = self.builder.insert_cast(rhs, Type::unsigned(bit_size)); + let lhs_as_unsigned = self.insert_safe_cast(lhs, Type::unsigned(bit_size), location); + let rhs_as_unsigned = self.insert_safe_cast(rhs, Type::unsigned(bit_size), location); let lhs_sign = self.builder.insert_binary(lhs_as_unsigned, BinaryOp::Lt, half_width); let mut rhs_sign = self.builder.insert_binary(rhs_as_unsigned, BinaryOp::Lt, half_width); let message = if is_sub { @@ -464,18 +468,17 @@ impl<'a> FunctionContext<'a> { let product_field = self.builder.insert_binary(lhs_abs, BinaryOp::Mul, rhs_abs); // It must not already overflow the bit_size let message = "attempt to multiply with overflow".to_string(); - let size_overflow = Instruction::RangeCheck { - value: product_field, - max_bit_size: bit_size, - assert_message: Some(message.clone()), - }; - self.builder.set_location(location).insert_instruction(size_overflow, None); + self.builder.set_location(location).insert_range_check( + product_field, + bit_size, + Some(message.clone()), + ); let product = self.builder.insert_cast(product_field, Type::unsigned(bit_size)); // Then we check the signed product fits in a signed integer of bit_size-bits let not_same = self.builder.insert_binary(one, BinaryOp::Sub, same_sign); let not_same_sign_field = - self.builder.insert_cast(not_same, Type::unsigned(bit_size)); + self.insert_safe_cast(not_same, Type::unsigned(bit_size), location); let positive_maximum_with_offset = self.builder.insert_binary(half_width, BinaryOp::Add, not_same_sign_field); let product_overflow_check = @@ -665,6 +668,29 @@ impl<'a> FunctionContext<'a> { reshaped_return_values } + /// Inserts a cast instruction at the end of the current block and returns the results + /// of the cast. + /// + /// Compared to `self.builder.insert_cast`, this version will automatically truncate `value` to be a valid `typ`. + pub(super) fn insert_safe_cast( + &mut self, + mut value: ValueId, + typ: Type, + location: Location, + ) -> ValueId { + self.builder.set_location(location); + + // To ensure that `value` is a valid `typ`, we insert an `Instruction::Truncate` instruction beforehand if + // we're narrowing the type size. + let incoming_type_size = self.builder.type_of_value(value).bit_size(); + let target_type_size = typ.bit_size(); + if target_type_size < incoming_type_size { + value = self.builder.insert_truncate(value, target_type_size, incoming_type_size); + } + + self.builder.insert_cast(value, typ) + } + /// Create a const offset of an address for an array load or store pub(super) fn make_offset(&mut self, mut address: ValueId, offset: u128) -> ValueId { if offset != 0 { diff --git a/noir/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/noir/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index c00fbbbcb405..0b8c3a37ef96 100644 --- a/noir/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/noir/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -434,8 +434,8 @@ impl<'a> FunctionContext<'a> { fn codegen_cast(&mut self, cast: &ast::Cast) -> Result { let lhs = self.codegen_non_tuple_expression(&cast.lhs)?; let typ = Self::convert_non_tuple_type(&cast.r#type); - self.builder.set_location(cast.location); - Ok(self.builder.insert_cast(lhs, typ).into()) + + Ok(self.insert_safe_cast(lhs, typ, cast.location).into()) } /// Codegens a for loop, creating three new blocks in the process. diff --git a/noir/compiler/noirc_frontend/src/ast/mod.rs b/noir/compiler/noirc_frontend/src/ast/mod.rs index 5c10d3fe8f04..d9af5893024e 100644 --- a/noir/compiler/noirc_frontend/src/ast/mod.rs +++ b/noir/compiler/noirc_frontend/src/ast/mod.rs @@ -257,7 +257,20 @@ impl UnresolvedTypeExpression { BinaryOpKind::Multiply => BinaryTypeOperator::Multiplication, BinaryOpKind::Divide => BinaryTypeOperator::Division, BinaryOpKind::Modulo => BinaryTypeOperator::Modulo, - _ => unreachable!(), // impossible via operator_allowed check + + BinaryOpKind::Equal + | BinaryOpKind::NotEqual + | BinaryOpKind::Less + | BinaryOpKind::LessEqual + | BinaryOpKind::Greater + | BinaryOpKind::GreaterEqual + | BinaryOpKind::And + | BinaryOpKind::Or + | BinaryOpKind::Xor + | BinaryOpKind::ShiftRight + | BinaryOpKind::ShiftLeft => { + unreachable!("impossible via `operator_allowed` check") + } }; Ok(UnresolvedTypeExpression::BinaryOperation(lhs, op, rhs, expr.span)) } diff --git a/noir/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs b/noir/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs index 8ada3faf756f..b1e559895b83 100644 --- a/noir/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs +++ b/noir/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs @@ -474,18 +474,18 @@ pub(crate) fn check_methods_signatures( let trait_methods = std::mem::take(&mut the_trait.methods); for (file_id, func_id) in impl_methods { - let impl_method = resolver.interner.function_meta(func_id); let func_name = resolver.interner.function_name(func_id).to_owned(); - let mut typecheck_errors = Vec::new(); - // This is None in the case where the impl block has a method that's not part of the trait. // If that's the case, a `MethodNotInTrait` error has already been thrown, and we can ignore // the impl method, since there's nothing in the trait to match its signature against. if let Some(trait_method) = trait_methods.iter().find(|method| method.name.0.contents == func_name) { - let impl_function_type = impl_method.typ.instantiate(resolver.interner); + let mut typecheck_errors = Vec::new(); + let impl_method = resolver.interner.function_meta(func_id); + + let (impl_function_type, _) = impl_method.typ.instantiate(resolver.interner); let impl_method_generic_count = impl_method.typ.generic_count() - trait_impl_generic_count; @@ -505,7 +505,7 @@ pub(crate) fn check_methods_signatures( errors.push((error.into(), *file_id)); } - if let Type::Function(impl_params, _, _) = impl_function_type.0 { + if let Type::Function(impl_params, _, _) = impl_function_type { if trait_method.arguments().len() == impl_params.len() { // Check the parameters of the impl method against the parameters of the trait method let args = trait_method.arguments().iter(); @@ -542,6 +542,7 @@ pub(crate) fn check_methods_signatures( // TODO: This is not right since it may bind generic return types trait_method.return_type().unify(&resolved_return_type, &mut typecheck_errors, || { + let impl_method = resolver.interner.function_meta(func_id); let ret_type_span = impl_method.return_type.get_type().span; let expr_span = ret_type_span.expect("return type must always have a span"); diff --git a/noir/compiler/noirc_frontend/src/hir/def_map/mod.rs b/noir/compiler/noirc_frontend/src/hir/def_map/mod.rs index 20f05532ce41..9c46ef35854f 100644 --- a/noir/compiler/noirc_frontend/src/hir/def_map/mod.rs +++ b/noir/compiler/noirc_frontend/src/hir/def_map/mod.rs @@ -173,6 +173,29 @@ impl CrateDefMap { }) }) } + + /// Go through all modules in this crate, and find all functions in + /// each module with the #[export] attribute + pub fn get_all_exported_functions<'a>( + &'a self, + interner: &'a NodeInterner, + ) -> impl Iterator + 'a { + self.modules.iter().flat_map(|(_, module)| { + module.value_definitions().filter_map(|id| { + if let Some(func_id) = id.as_function() { + let attributes = interner.function_attributes(&func_id); + if attributes.secondary.contains(&SecondaryAttribute::Export) { + Some(func_id) + } else { + None + } + } else { + None + } + }) + }) + } + /// Go through all modules in this crate, find all `contract ... { ... }` declarations, /// and collect them all into a Vec. pub fn get_all_contracts(&self, interner: &NodeInterner) -> Vec { diff --git a/noir/compiler/noirc_frontend/src/hir/mod.rs b/noir/compiler/noirc_frontend/src/hir/mod.rs index 3683b17a27cc..c62f357167f0 100644 --- a/noir/compiler/noirc_frontend/src/hir/mod.rs +++ b/noir/compiler/noirc_frontend/src/hir/mod.rs @@ -152,7 +152,7 @@ impl Context<'_> { None } - pub fn function_meta(&self, func_id: &FuncId) -> FuncMeta { + pub fn function_meta(&self, func_id: &FuncId) -> &FuncMeta { self.def_interner.function_meta(func_id) } @@ -194,6 +194,19 @@ impl Context<'_> { .collect() } + pub fn get_all_exported_functions_in_crate(&self, crate_id: &CrateId) -> Vec<(String, FuncId)> { + let interner = &self.def_interner; + let def_map = self.def_map(crate_id).expect("The local crate should be analyzed already"); + + def_map + .get_all_exported_functions(interner) + .map(|function_id| { + let function_name = self.function_name(&function_id).to_owned(); + (function_name, function_id) + }) + .collect() + } + /// Return a Vec of all `contract` declarations in the source code and the functions they contain pub fn get_all_contracts(&self, crate_id: &CrateId) -> Vec { self.def_map(crate_id) diff --git a/noir/compiler/noirc_frontend/src/hir/type_check/expr.rs b/noir/compiler/noirc_frontend/src/hir/type_check/expr.rs index caa778525602..50ed98a794ad 100644 --- a/noir/compiler/noirc_frontend/src/hir/type_check/expr.rs +++ b/noir/compiler/noirc_frontend/src/hir/type_check/expr.rs @@ -190,10 +190,11 @@ impl<'interner> TypeChecker<'interner> { // Automatically add `&mut` if the method expects a mutable reference and // the object is not already one. if *func_id != FuncId::dummy_id() { - let func_meta = self.interner.function_meta(func_id); + let function_type = + self.interner.function_meta(func_id).typ.clone(); self.try_add_mutable_reference_to_object( &mut method_call, - &func_meta.typ, + &function_type, &mut args, ); } @@ -561,7 +562,7 @@ impl<'interner> TypeChecker<'interner> { let func_meta = self.interner.function_meta(&func_id); let param_len = func_meta.parameters.len(); - (func_meta.typ, param_len) + (func_meta.typ.clone(), param_len) } HirMethodReference::TraitMethodId(method) => { let the_trait = self.interner.get_trait(method.trait_id); @@ -916,7 +917,7 @@ impl<'interner> TypeChecker<'interner> { &self.current_function.expect("unexpected method outside a function"), ); - for constraint in func_meta.trait_constraints { + for constraint in &func_meta.trait_constraints { if *object_type == constraint.typ { if let Some(the_trait) = self.interner.try_get_trait(constraint.trait_id) { for (method_index, method) in the_trait.methods.iter().enumerate() { diff --git a/noir/compiler/noirc_frontend/src/hir/type_check/mod.rs b/noir/compiler/noirc_frontend/src/hir/type_check/mod.rs index 092e8631f1b4..c05c233fe34a 100644 --- a/noir/compiler/noirc_frontend/src/hir/type_check/mod.rs +++ b/noir/compiler/noirc_frontend/src/hir/type_check/mod.rs @@ -50,10 +50,15 @@ pub fn type_check_func(interner: &mut NodeInterner, func_id: FuncId) -> Vec Vec Vec Vec Vec Vec TypeChecker<'interner> { // Must push new lvalue to the interner, we've resolved any field indices self.interner.update_statement(stmt_id, |stmt| match stmt { HirStatement::Assign(assign) => assign.lvalue = new_lvalue, - _ => unreachable!(), + _ => unreachable!("statement is known to be assignment"), }); let span = self.interner.expr_span(&assign_stmt.expression); diff --git a/noir/compiler/noirc_frontend/src/hir_def/function.rs b/noir/compiler/noirc_frontend/src/hir_def/function.rs index 085bda107e38..9fff301f5f76 100644 --- a/noir/compiler/noirc_frontend/src/hir_def/function.rs +++ b/noir/compiler/noirc_frontend/src/hir_def/function.rs @@ -131,22 +131,12 @@ impl FuncMeta { } } - pub fn into_function_signature(self) -> FunctionSignature { - // Doesn't use `self.return_type()` so we aren't working with references and don't need a `clone()` - let return_type = match self.typ { - Type::Function(_, ret, _env) => *ret, - Type::Forall(_, typ) => match *typ { - Type::Function(_, ret, _env) => *ret, - _ => unreachable!(), - }, - _ => unreachable!(), - }; - let return_type = match return_type { + pub fn function_signature(&self) -> FunctionSignature { + let return_type = match self.return_type() { Type::Unit => None, - typ => Some(typ), + typ => Some(typ.clone()), }; - - (self.parameters.0, return_type) + (self.parameters.0.clone(), return_type) } /// Gives the (uninstantiated) return type of this function. diff --git a/noir/compiler/noirc_frontend/src/hir_def/traits.rs b/noir/compiler/noirc_frontend/src/hir_def/traits.rs index ea9c2e2928cc..bbe4e1743d28 100644 --- a/noir/compiler/noirc_frontend/src/hir_def/traits.rs +++ b/noir/compiler/noirc_frontend/src/hir_def/traits.rs @@ -6,7 +6,7 @@ use crate::{ Generics, Ident, NoirFunction, Type, TypeVariable, TypeVariableId, }; use fm::FileId; -use noirc_errors::Span; +use noirc_errors::{Location, Span}; #[derive(Clone, Debug, PartialEq, Eq)] pub struct TraitFunction { @@ -56,7 +56,7 @@ pub struct Trait { pub name: Ident, pub generics: Generics, - pub span: Span, + pub location: Location, /// When resolving the types of Trait elements, all references to `Self` resolve /// to this TypeVariable. Then when we check if the types of trait impl elements @@ -65,6 +65,7 @@ pub struct Trait { pub self_type_typevar_id: TypeVariableId, pub self_type_typevar: TypeVariable, } + #[derive(Debug)] pub struct TraitImpl { pub ident: Ident, diff --git a/noir/compiler/noirc_frontend/src/hir_def/types.rs b/noir/compiler/noirc_frontend/src/hir_def/types.rs index 977293ec678e..9a9c0c047d1b 100644 --- a/noir/compiler/noirc_frontend/src/hir_def/types.rs +++ b/noir/compiler/noirc_frontend/src/hir_def/types.rs @@ -1,4 +1,5 @@ use std::{ + borrow::Cow, cell::RefCell, collections::{BTreeSet, HashMap}, rc::Rc, @@ -299,7 +300,7 @@ impl std::fmt::Display for TypeAliasType { write!(f, "{}", self.name)?; if !self.generics.is_empty() { - let generics = vecmap(&self.generics, |(_, binding)| binding.0.borrow().to_string()); + let generics = vecmap(&self.generics, |(_, binding)| binding.borrow().to_string()); write!(f, "{}", generics.join(", "))?; } @@ -415,11 +416,11 @@ pub enum TypeVariableKind { /// A TypeVariable is a mutable reference that is either /// bound to some type, or unbound with a given TypeVariableId. #[derive(Debug, PartialEq, Eq, Clone, Hash)] -pub struct TypeVariable(Shared); +pub struct TypeVariable(TypeVariableId, Shared); impl TypeVariable { pub fn unbound(id: TypeVariableId) -> Self { - TypeVariable(Shared::new(TypeBinding::Unbound(id))) + TypeVariable(id, Shared::new(TypeBinding::Unbound(id))) } /// Bind this type variable to a value. @@ -428,7 +429,7 @@ impl TypeVariable { /// Also Panics if the ID of this TypeVariable occurs within the given /// binding, as that would cause an infinitely recursive type. pub fn bind(&self, typ: Type) { - let id = match &*self.0.borrow() { + let id = match &*self.1.borrow() { TypeBinding::Bound(binding) => { unreachable!("TypeVariable::bind, cannot bind bound var {} to {}", binding, typ) } @@ -436,11 +437,11 @@ impl TypeVariable { }; assert!(!typ.occurs(id)); - *self.0.borrow_mut() = TypeBinding::Bound(typ); + *self.1.borrow_mut() = TypeBinding::Bound(typ); } pub fn try_bind(&self, binding: Type, span: Span) -> Result<(), TypeCheckError> { - let id = match &*self.0.borrow() { + let id = match &*self.1.borrow() { TypeBinding::Bound(binding) => { unreachable!("Expected unbound, found bound to {binding}") } @@ -450,28 +451,28 @@ impl TypeVariable { if binding.occurs(id) { Err(TypeCheckError::TypeAnnotationsNeeded { span }) } else { - *self.0.borrow_mut() = TypeBinding::Bound(binding); + *self.1.borrow_mut() = TypeBinding::Bound(binding); Ok(()) } } /// Borrows this TypeVariable to (e.g.) manually match on the inner TypeBinding. pub fn borrow(&self) -> std::cell::Ref { - self.0.borrow() + self.1.borrow() } /// Unbind this type variable, setting it to Unbound(id). /// /// This is generally a logic error to use outside of monomorphization. pub fn unbind(&self, id: TypeVariableId) { - *self.0.borrow_mut() = TypeBinding::Unbound(id); + *self.1.borrow_mut() = TypeBinding::Unbound(id); } /// Forcibly bind a type variable to a new type - even if the type /// variable is already bound to a different type. This generally /// a logic error to use outside of monomorphization. pub fn force_bind(&self, typ: Type) { - *self.0.borrow_mut() = TypeBinding::Bound(typ); + *self.1.borrow_mut() = TypeBinding::Bound(typ); } } @@ -503,7 +504,7 @@ impl Type { } pub fn type_variable(id: TypeVariableId) -> Type { - let var = TypeVariable(Shared::new(TypeBinding::Unbound(id))); + let var = TypeVariable::unbound(id); Type::TypeVariable(var, TypeVariableKind::Normal) } @@ -512,14 +513,14 @@ impl Type { pub fn constant_variable(length: u64, interner: &mut NodeInterner) -> Type { let id = interner.next_type_variable_id(); let kind = TypeVariableKind::Constant(length); - let var = TypeVariable(Shared::new(TypeBinding::Unbound(id))); + let var = TypeVariable::unbound(id); Type::TypeVariable(var, kind) } pub fn polymorphic_integer(interner: &mut NodeInterner) -> Type { let id = interner.next_type_variable_id(); let kind = TypeVariableKind::IntegerOrField; - let var = TypeVariable(Shared::new(TypeBinding::Unbound(id))); + let var = TypeVariable::unbound(id); Type::TypeVariable(var, kind) } @@ -529,7 +530,7 @@ impl Type { /// they shouldn't be bound over until monomorphization. pub fn is_bindable(&self) -> bool { match self { - Type::TypeVariable(binding, _) => match &*binding.0.borrow() { + Type::TypeVariable(binding, _) => match &*binding.borrow() { TypeBinding::Bound(binding) => binding.is_bindable(), TypeBinding::Unbound(_) => true, }, @@ -553,7 +554,7 @@ impl Type { // True if the given type is a NamedGeneric with the target_id let named_generic_id_matches_target = |typ: &Type| { if let Type::NamedGeneric(type_variable, _) = typ { - match &*type_variable.0.borrow() { + match &*type_variable.borrow() { TypeBinding::Bound(_) => { unreachable!("Named generics should not be bound until monomorphization") } @@ -653,7 +654,7 @@ impl Type { match self { Type::Forall(generics, _) => generics.len(), Type::TypeVariable(type_variable, _) | Type::NamedGeneric(type_variable, _) => { - match &*type_variable.0.borrow() { + match &*type_variable.borrow() { TypeBinding::Bound(binding) => binding.generic_count(), TypeBinding::Unbound(_) => 0, } @@ -668,6 +669,25 @@ impl Type { let polymorphic_type_vars = vecmap(type_bindings, |(id, (type_var, _))| (id, type_var)); Type::Forall(polymorphic_type_vars, Box::new(self)) } + + /// Return this type as a monomorphic type - without a `Type::Forall` if there is one. + /// This is only a shallow check since Noir's type system prohibits `Type::Forall` anywhere + /// inside other types. + pub fn as_monotype(&self) -> &Type { + match self { + Type::Forall(_, typ) => typ.as_ref(), + other => other, + } + } + + /// Return the generics and type within this `Type::Forall`. + /// Panics if `self` is not `Type::Forall` + pub fn unwrap_forall(&self) -> (Cow, &Type) { + match self { + Type::Forall(generics, typ) => (Cow::Borrowed(generics), typ.as_ref()), + other => (Cow::Owned(Generics::new()), other), + } + } } impl std::fmt::Display for Type { @@ -687,23 +707,23 @@ impl std::fmt::Display for Type { Signedness::Signed => write!(f, "i{num_bits}"), Signedness::Unsigned => write!(f, "u{num_bits}"), }, - Type::TypeVariable(var, TypeVariableKind::Normal) => write!(f, "{}", var.0.borrow()), + Type::TypeVariable(var, TypeVariableKind::Normal) => write!(f, "{}", var.borrow()), Type::TypeVariable(binding, TypeVariableKind::IntegerOrField) => { - if let TypeBinding::Unbound(_) = &*binding.0.borrow() { + if let TypeBinding::Unbound(_) = &*binding.borrow() { // Show a Field by default if this TypeVariableKind::IntegerOrField is unbound, since that is // what they bind to by default anyway. It is less confusing than displaying it // as a generic. write!(f, "Field") } else { - write!(f, "{}", binding.0.borrow()) + write!(f, "{}", binding.borrow()) } } Type::TypeVariable(binding, TypeVariableKind::Constant(n)) => { - if let TypeBinding::Unbound(_) = &*binding.0.borrow() { + if let TypeBinding::Unbound(_) = &*binding.borrow() { // TypeVariableKind::Constant(n) binds to Type::Constant(n) by default, so just show that. write!(f, "{n}") } else { - write!(f, "{}", binding.0.borrow()) + write!(f, "{}", binding.borrow()) } } Type::Struct(s, args) => { @@ -728,7 +748,7 @@ impl std::fmt::Display for Type { } Type::Unit => write!(f, "()"), Type::Error => write!(f, "error"), - Type::NamedGeneric(binding, name) => match &*binding.0.borrow() { + Type::NamedGeneric(binding, name) => match &*binding.borrow() { TypeBinding::Bound(binding) => binding.fmt(f), TypeBinding::Unbound(_) if name.is_empty() => write!(f, "_"), TypeBinding::Unbound(_) => write!(f, "{name}"), @@ -795,7 +815,7 @@ impl Type { target_length: u64, bindings: &mut TypeBindings, ) -> Result<(), UnificationError> { - let target_id = match &*var.0.borrow() { + let target_id = match &*var.borrow() { TypeBinding::Bound(_) => unreachable!(), TypeBinding::Unbound(id) => *id, }; @@ -814,7 +834,7 @@ impl Type { // A TypeVariable is less specific than a MaybeConstant, so we bind // to the other type variable instead. Type::TypeVariable(new_var, kind) => { - let borrow = new_var.0.borrow(); + let borrow = new_var.borrow(); match &*borrow { TypeBinding::Bound(typ) => { typ.try_bind_to_maybe_constant(var, target_length, bindings) @@ -862,7 +882,7 @@ impl Type { var: &TypeVariable, bindings: &mut TypeBindings, ) -> Result<(), UnificationError> { - let target_id = match &*var.0.borrow() { + let target_id = match &*var.borrow() { TypeBinding::Bound(_) => unreachable!(), TypeBinding::Unbound(id) => *id, }; @@ -875,7 +895,7 @@ impl Type { Ok(()) } Type::TypeVariable(self_var, TypeVariableKind::IntegerOrField) => { - let borrow = self_var.0.borrow(); + let borrow = self_var.borrow(); match &*borrow { TypeBinding::Bound(typ) => typ.try_bind_to_polymorphic_int(var, bindings), // Avoid infinitely recursive bindings @@ -887,7 +907,7 @@ impl Type { } } Type::TypeVariable(binding, TypeVariableKind::Normal) => { - let borrow = binding.0.borrow(); + let borrow = binding.borrow(); match &*borrow { TypeBinding::Bound(typ) => typ.try_bind_to_polymorphic_int(var, bindings), // Avoid infinitely recursive bindings @@ -917,7 +937,7 @@ impl Type { var: &TypeVariable, bindings: &mut TypeBindings, ) -> Result<(), UnificationError> { - let target_id = match &*var.0.borrow() { + let target_id = match &*var.borrow() { TypeBinding::Bound(_) => unreachable!(), TypeBinding::Unbound(id) => *id, }; @@ -945,7 +965,7 @@ impl Type { fn get_inner_type_variable(&self) -> Option> { match self { - Type::TypeVariable(var, _) | Type::NamedGeneric(var, _) => Some(var.0.clone()), + Type::TypeVariable(var, _) | Type::NamedGeneric(var, _) => Some(var.1.clone()), _ => None, } } @@ -1041,9 +1061,9 @@ impl Type { } (NamedGeneric(binding, _), other) | (other, NamedGeneric(binding, _)) - if !binding.0.borrow().is_unbound() => + if !binding.borrow().is_unbound() => { - if let TypeBinding::Bound(link) = &*binding.0.borrow() { + if let TypeBinding::Bound(link) = &*binding.borrow() { link.try_unify(other, bindings) } else { unreachable!("If guard ensures binding is bound") @@ -1052,8 +1072,8 @@ impl Type { (NamedGeneric(binding_a, name_a), NamedGeneric(binding_b, name_b)) => { // Bound NamedGenerics are caught by the check above - assert!(binding_a.0.borrow().is_unbound()); - assert!(binding_b.0.borrow().is_unbound()); + assert!(binding_a.borrow().is_unbound()); + assert!(binding_b.borrow().is_unbound()); if name_a == name_b { Ok(()) @@ -1101,7 +1121,7 @@ impl Type { // bind to the given type or not. bind_variable: impl FnOnce(&mut TypeBindings) -> Result<(), UnificationError>, ) -> Result<(), UnificationError> { - match &*type_variable.0.borrow() { + match &*type_variable.borrow() { // If it is already bound, unify against what it is bound to TypeBinding::Bound(link) => link.try_unify(self, bindings), TypeBinding::Unbound(id) => { @@ -1245,7 +1265,7 @@ impl Type { }) .collect(); - let instantiated = typ.substitute(&replacements); + let instantiated = typ.force_substitute(&replacements); (instantiated, replacements) } other => (other.clone(), HashMap::new()), @@ -1314,7 +1334,7 @@ impl Type { } Type::Forall(_, typ) => typ.find_all_unbound_type_variables(type_variables), Type::TypeVariable(type_variable, _) | Type::NamedGeneric(type_variable, _) => { - match &*type_variable.0.borrow() { + match &*type_variable.borrow() { TypeBinding::Bound(binding) => { binding.find_all_unbound_type_variables(type_variables); } @@ -1332,64 +1352,106 @@ impl Type { /// given bindings if found. If a type variable is not found within /// the given TypeBindings, it is unchanged. pub fn substitute(&self, type_bindings: &TypeBindings) -> Type { + self.substitute_helper(type_bindings, false) + } + + /// Forcibly substitute any type variables found within this type with the + /// given bindings if found. If a type variable is not found within + /// the given TypeBindings, it is unchanged. + /// + /// Compared to `substitute`, this function will also substitute any type variables + /// from type_bindings, even if they are bound in `self`. Since this can undo previous + /// bindings, this function should be avoided unless necessary. Currently, it is only + /// needed when handling bindings between trait methods and their corresponding impl + /// method during monomorphization. + pub fn force_substitute(&self, type_bindings: &TypeBindings) -> Type { + self.substitute_helper(type_bindings, true) + } + + /// This helper function only differs in the additional parameter which, if set, + /// allows substitutions on already-bound type variables. This should be `false` + /// for most uses, but is currently needed during monomorphization when instantiating + /// trait functions to shed any previous bindings from recursive parent calls to the + /// same trait. + fn substitute_helper( + &self, + type_bindings: &TypeBindings, + substitute_bound_typevars: bool, + ) -> Type { if type_bindings.is_empty() { return self.clone(); } - let substitute_binding = |binding: &TypeVariable| match &*binding.0.borrow() { - TypeBinding::Bound(binding) => binding.substitute(type_bindings), - TypeBinding::Unbound(id) => match type_bindings.get(id) { - Some((_, binding)) => binding.clone(), - None => self.clone(), - }, + let substitute_binding = |binding: &TypeVariable| { + // Check the id first to allow substituting to + // type variables that have already been bound over. + // This is neede for monomorphizing trait impl methods. + match type_bindings.get(&binding.0) { + Some((_, binding)) if substitute_bound_typevars => binding.clone(), + _ => match &*binding.borrow() { + TypeBinding::Bound(binding) => { + binding.substitute_helper(type_bindings, substitute_bound_typevars) + } + TypeBinding::Unbound(id) => match type_bindings.get(id) { + Some((_, binding)) => binding.clone(), + None => self.clone(), + }, + }, + } }; match self { Type::Array(size, element) => { - let size = Box::new(size.substitute(type_bindings)); - let element = Box::new(element.substitute(type_bindings)); - Type::Array(size, element) + let size = size.substitute_helper(type_bindings, substitute_bound_typevars); + let element = element.substitute_helper(type_bindings, substitute_bound_typevars); + Type::Array(Box::new(size), Box::new(element)) } Type::String(size) => { - let size = Box::new(size.substitute(type_bindings)); - Type::String(size) + let size = size.substitute_helper(type_bindings, substitute_bound_typevars); + Type::String(Box::new(size)) } Type::FmtString(size, fields) => { - let size = Box::new(size.substitute(type_bindings)); - let fields = Box::new(fields.substitute(type_bindings)); - Type::FmtString(size, fields) + let size = size.substitute_helper(type_bindings, substitute_bound_typevars); + let fields = fields.substitute_helper(type_bindings, substitute_bound_typevars); + Type::FmtString(Box::new(size), Box::new(fields)) } Type::NamedGeneric(binding, _) | Type::TypeVariable(binding, _) => { substitute_binding(binding) } - // Do not substitute fields, it can lead to infinite recursion + // Do not substitute_helper fields, it ca, substitute_bound_typevarsn lead to infinite recursion // and we should not match fields when type checking anyway. Type::Struct(fields, args) => { - let args = vecmap(args, |arg| arg.substitute(type_bindings)); + let args = vecmap(args, |arg| { + arg.substitute_helper(type_bindings, substitute_bound_typevars) + }); Type::Struct(fields.clone(), args) } Type::Tuple(fields) => { - let fields = vecmap(fields, |field| field.substitute(type_bindings)); + let fields = vecmap(fields, |field| { + field.substitute_helper(type_bindings, substitute_bound_typevars) + }); Type::Tuple(fields) } Type::Forall(typevars, typ) => { - // Trying to substitute a variable defined within a nested Forall + // Trying to substitute_helper a variable de, substitute_bound_typevarsfined within a nested Forall // is usually impossible and indicative of an error in the type checker somewhere. for (var, _) in typevars { assert!(!type_bindings.contains_key(var)); } - let typ = Box::new(typ.substitute(type_bindings)); + let typ = Box::new(typ.substitute_helper(type_bindings, substitute_bound_typevars)); Type::Forall(typevars.clone(), typ) } Type::Function(args, ret, env) => { - let args = vecmap(args, |arg| arg.substitute(type_bindings)); - let ret = Box::new(ret.substitute(type_bindings)); - let env = Box::new(env.substitute(type_bindings)); + let args = vecmap(args, |arg| { + arg.substitute_helper(type_bindings, substitute_bound_typevars) + }); + let ret = Box::new(ret.substitute_helper(type_bindings, substitute_bound_typevars)); + let env = Box::new(env.substitute_helper(type_bindings, substitute_bound_typevars)); Type::Function(args, ret, env) } - Type::MutableReference(element) => { - Type::MutableReference(Box::new(element.substitute(type_bindings))) - } + Type::MutableReference(element) => Type::MutableReference(Box::new( + element.substitute_helper(type_bindings, substitute_bound_typevars), + )), Type::FieldElement | Type::Integer(_, _) @@ -1415,7 +1477,7 @@ impl Type { Type::Struct(_, generic_args) => generic_args.iter().any(|arg| arg.occurs(target_id)), Type::Tuple(fields) => fields.iter().any(|field| field.occurs(target_id)), Type::NamedGeneric(binding, _) | Type::TypeVariable(binding, _) => { - match &*binding.0.borrow() { + match &*binding.borrow() { TypeBinding::Bound(binding) => binding.occurs(target_id), TypeBinding::Unbound(id) => *id == target_id, } @@ -1465,7 +1527,7 @@ impl Type { } Tuple(args) => Tuple(vecmap(args, |arg| arg.follow_bindings())), TypeVariable(var, _) | NamedGeneric(var, _) => { - if let TypeBinding::Bound(typ) = &*var.0.borrow() { + if let TypeBinding::Bound(typ) = &*var.borrow() { return typ.follow_bindings(); } self.clone() @@ -1570,7 +1632,7 @@ impl From<&Type> for PrintableType { Signedness::Signed => PrintableType::SignedInteger { width: *bit_width }, }, Type::TypeVariable(binding, TypeVariableKind::IntegerOrField) => { - match &*binding.0.borrow() { + match &*binding.borrow() { TypeBinding::Bound(typ) => typ.into(), TypeBinding::Unbound(_) => Type::default_int_type().into(), } diff --git a/noir/compiler/noirc_frontend/src/lexer/token.rs b/noir/compiler/noirc_frontend/src/lexer/token.rs index e6542c643ad9..ab131ccd8803 100644 --- a/noir/compiler/noirc_frontend/src/lexer/token.rs +++ b/noir/compiler/noirc_frontend/src/lexer/token.rs @@ -510,6 +510,7 @@ impl Attribute { Attribute::Secondary(SecondaryAttribute::ContractLibraryMethod) } ["event"] => Attribute::Secondary(SecondaryAttribute::Event), + ["export"] => Attribute::Secondary(SecondaryAttribute::Export), ["deprecated", name] => { if !name.starts_with('"') && !name.ends_with('"') { return Err(LexerErrorKind::MalformedFuncAttribute { @@ -588,6 +589,7 @@ pub enum SecondaryAttribute { // the entry point. ContractLibraryMethod, Event, + Export, Field(String), Custom(String), } @@ -602,6 +604,7 @@ impl fmt::Display for SecondaryAttribute { SecondaryAttribute::Custom(ref k) => write!(f, "#[{k}]"), SecondaryAttribute::ContractLibraryMethod => write!(f, "#[contract_library_method]"), SecondaryAttribute::Event => write!(f, "#[event]"), + SecondaryAttribute::Export => write!(f, "#[export]"), SecondaryAttribute::Field(ref k) => write!(f, "#[field({k})]"), } } @@ -625,7 +628,7 @@ impl AsRef for SecondaryAttribute { SecondaryAttribute::Deprecated(None) => "", SecondaryAttribute::Custom(string) | SecondaryAttribute::Field(string) => string, SecondaryAttribute::ContractLibraryMethod => "", - SecondaryAttribute::Event => "", + SecondaryAttribute::Event | SecondaryAttribute::Export => "", } } } diff --git a/noir/compiler/noirc_frontend/src/monomorphization/mod.rs b/noir/compiler/noirc_frontend/src/monomorphization/mod.rs index bb0972987e45..3510475e881a 100644 --- a/noir/compiler/noirc_frontend/src/monomorphization/mod.rs +++ b/noir/compiler/noirc_frontend/src/monomorphization/mod.rs @@ -20,14 +20,14 @@ use std::{ use crate::{ hir_def::{ expr::*, - function::{FuncMeta, FunctionSignature, Parameters}, + function::{FunctionSignature, Parameters}, stmt::{HirAssignStatement, HirLValue, HirLetStatement, HirPattern, HirStatement}, types, }, node_interner::{self, DefinitionKind, NodeInterner, StmtId, TraitImplKind, TraitMethodId}, token::FunctionAttribute, - ContractFunctionType, FunctionKind, Type, TypeBinding, TypeBindings, TypeVariableKind, UnaryOp, - Visibility, + ContractFunctionType, FunctionKind, Type, TypeBinding, TypeBindings, TypeVariable, + TypeVariableId, TypeVariableKind, UnaryOp, Visibility, }; use self::ast::{Definition, FuncId, Function, LocalId, Program}; @@ -58,8 +58,9 @@ struct Monomorphizer<'interner> { /// confuse users. locals: HashMap, - /// Queue of functions to monomorphize next - queue: VecDeque<(node_interner::FuncId, FuncId, TypeBindings)>, + /// Queue of functions to monomorphize next each item in the queue is a tuple of: + /// (old_id, new_monomorphized_id, any type bindings to apply, the trait method if old_id is from a trait impl) + queue: VecDeque<(node_interner::FuncId, FuncId, TypeBindings, Option)>, /// When a function finishes being monomorphized, the monomorphized ast::Function is /// stored here along with its FuncId. @@ -97,23 +98,25 @@ pub fn monomorphize(main: node_interner::FuncId, interner: &NodeInterner) -> Pro let function_sig = monomorphizer.compile_main(main); while !monomorphizer.queue.is_empty() { - let (next_fn_id, new_id, bindings) = monomorphizer.queue.pop_front().unwrap(); + let (next_fn_id, new_id, bindings, trait_method) = monomorphizer.queue.pop_front().unwrap(); monomorphizer.locals.clear(); perform_instantiation_bindings(&bindings); + let impl_bindings = monomorphizer.perform_impl_bindings(trait_method, next_fn_id); monomorphizer.function(next_fn_id, new_id); + undo_instantiation_bindings(impl_bindings); undo_instantiation_bindings(bindings); } let functions = vecmap(monomorphizer.finished_functions, |(_, f)| f); - let FuncMeta { return_distinctness, return_visibility, .. } = interner.function_meta(&main); + let meta = interner.function_meta(&main); Program::new( functions, function_sig, - return_distinctness, + meta.return_distinctness, monomorphizer.return_location, - return_visibility, + meta.return_visibility, ) } @@ -154,6 +157,7 @@ impl<'interner> Monomorphizer<'interner> { id: node_interner::FuncId, expr_id: node_interner::ExprId, typ: &HirType, + trait_method: Option, ) -> Definition { let typ = typ.follow_bindings(); match self.globals.get(&id).and_then(|inner_map| inner_map.get(&typ)) { @@ -177,7 +181,7 @@ impl<'interner> Monomorphizer<'interner> { Definition::Builtin(opcode) } FunctionKind::Normal => { - let id = self.queue_function(id, expr_id, typ); + let id = self.queue_function(id, expr_id, typ, trait_method); Definition::Function(id) } FunctionKind::Oracle => { @@ -217,7 +221,7 @@ impl<'interner> Monomorphizer<'interner> { }, ); let main_meta = self.interner.function_meta(&main_id); - main_meta.into_function_signature() + main_meta.function_signature() } fn function(&mut self, f: node_interner::FuncId, id: FuncId) { @@ -237,7 +241,7 @@ impl<'interner> Monomorphizer<'interner> { _ => meta.return_type(), }); - let parameters = self.parameters(meta.parameters); + let parameters = self.parameters(&meta.parameters); let body = self.expr(body_expr_id); let unconstrained = modifiers.is_unconstrained @@ -254,17 +258,17 @@ impl<'interner> Monomorphizer<'interner> { /// Monomorphize each parameter, expanding tuple/struct patterns into multiple parameters /// and binding any generic types found. - fn parameters(&mut self, params: Parameters) -> Vec<(ast::LocalId, bool, String, ast::Type)> { + fn parameters(&mut self, params: &Parameters) -> Vec<(ast::LocalId, bool, String, ast::Type)> { let mut new_params = Vec::with_capacity(params.len()); - for parameter in params { - self.parameter(parameter.0, ¶meter.1, &mut new_params); + for (parameter, typ, _) in ¶ms.0 { + self.parameter(parameter, typ, &mut new_params); } new_params } fn parameter( &mut self, - param: HirPattern, + param: &HirPattern, typ: &HirType, new_params: &mut Vec<(ast::LocalId, bool, String, ast::Type)>, ) { @@ -276,11 +280,11 @@ impl<'interner> Monomorphizer<'interner> { new_params.push((new_id, definition.mutable, name, self.convert_type(typ))); self.define_local(ident.id, new_id); } - HirPattern::Mutable(pattern, _) => self.parameter(*pattern, typ, new_params), + HirPattern::Mutable(pattern, _) => self.parameter(pattern, typ, new_params), HirPattern::Tuple(fields, _) => { let tuple_field_types = unwrap_tuple_type(typ); - for (field, typ) in fields.into_iter().zip(tuple_field_types) { + for (field, typ) in fields.iter().zip(tuple_field_types) { self.parameter(field, &typ, new_params); } } @@ -288,7 +292,8 @@ impl<'interner> Monomorphizer<'interner> { let struct_field_types = unwrap_struct_type(typ); assert_eq!(struct_field_types.len(), fields.len()); - let mut fields = btree_map(fields, |(name, field)| (name.0.contents, field)); + let mut fields = + btree_map(fields, |(name, field)| (name.0.contents.clone(), field)); // Iterate over `struct_field_types` since `unwrap_struct_type` will always // return the fields in the order defined by the struct type. @@ -684,7 +689,7 @@ impl<'interner> Monomorphizer<'interner> { let location = Some(ident.location); let name = definition.name.clone(); let typ = self.interner.id_type(expr_id); - let definition = self.lookup_function(*func_id, expr_id, &typ); + let definition = self.lookup_function(*func_id, expr_id, &typ, None); let typ = self.convert_type(&typ); let ident = ast::Ident { location, mutable, definition, name, typ: typ.clone() }; let ident_expression = ast::Expression::Ident(ident); @@ -856,7 +861,7 @@ impl<'interner> Monomorphizer<'interner> { .get_selected_impl_for_expression(expr_id) .expect("ICE: missing trait impl - should be caught during type checking"); - let hir_func_id = match trait_impl { + let func_id = match trait_impl { node_interner::TraitImplKind::Normal(impl_id) => { self.interner.get_trait_implementation(impl_id).borrow().methods [method.method_index] @@ -884,7 +889,7 @@ impl<'interner> Monomorphizer<'interner> { } }; - let func_def = self.lookup_function(hir_func_id, expr_id, &function_type); + let func_def = self.lookup_function(func_id, expr_id, &function_type, Some(method)); let func_id = match func_def { Definition::Function(func_id) => func_id, _ => unreachable!(), @@ -1106,14 +1111,14 @@ impl<'interner> Monomorphizer<'interner> { id: node_interner::FuncId, expr_id: node_interner::ExprId, function_type: HirType, + trait_method: Option, ) -> FuncId { let new_id = self.next_function_id(); self.define_global(id, function_type.clone(), new_id); let bindings = self.interner.get_instantiation_bindings(expr_id); let bindings = self.follow_bindings(bindings); - - self.queue.push_back((id, new_id, bindings)); + self.queue.push_back((id, new_id, bindings, trait_method)); new_id } @@ -1183,7 +1188,7 @@ impl<'interner> Monomorphizer<'interner> { let parameters = vecmap(lambda.parameters, |(pattern, typ)| (pattern, typ, Visibility::Private)).into(); - let parameters = self.parameters(parameters); + let parameters = self.parameters(¶meters); let body = self.expr(lambda.body); let id = self.next_function_id(); @@ -1234,7 +1239,7 @@ impl<'interner> Monomorphizer<'interner> { let parameters = vecmap(lambda.parameters, |(pattern, typ)| (pattern, typ, Visibility::Private)).into(); - let mut converted_parameters = self.parameters(parameters); + let mut converted_parameters = self.parameters(¶meters); let id = self.next_function_id(); let name = lambda_name.to_owned(); @@ -1502,6 +1507,46 @@ impl<'interner> Monomorphizer<'interner> { result } + + /// Call sites are instantiated against the trait method, but when an impl is later selected, + /// the corresponding method in the impl will have a different set of generics. `perform_impl_bindings` + /// is needed to apply the generics from the trait method to the impl method. Without this, + /// static method references to generic impls (e.g. `Eq::eq` for `[T; N]`) will fail to re-apply + /// the correct type bindings during monomorphization. + fn perform_impl_bindings( + &self, + trait_method: Option, + impl_method: node_interner::FuncId, + ) -> TypeBindings { + let mut bindings = TypeBindings::new(); + + if let Some(trait_method) = trait_method { + let the_trait = self.interner.get_trait(trait_method.trait_id); + + let trait_method_type = the_trait.methods[trait_method.method_index].typ.as_monotype(); + + // Make each NamedGeneric in this type bindable by replacing it with a TypeVariable + // with the same internal id and binding. + let (generics, impl_method_type) = + self.interner.function_meta(&impl_method).typ.unwrap_forall(); + + let replace_type_variable = |(id, var): &(TypeVariableId, TypeVariable)| { + (*id, (var.clone(), Type::TypeVariable(var.clone(), TypeVariableKind::Normal))) + }; + + // Replace each NamedGeneric with a TypeVariable containing the same internal type variable + let type_bindings = generics.iter().map(replace_type_variable).collect(); + let impl_method_type = impl_method_type.force_substitute(&type_bindings); + + trait_method_type.try_unify(&impl_method_type, &mut bindings).unwrap_or_else(|_| { + unreachable!("Impl method type {} does not unify with trait method type {} during monomorphization", impl_method_type, trait_method_type) + }); + + perform_instantiation_bindings(&bindings); + } + + bindings + } } fn unwrap_tuple_type(typ: &HirType) -> Vec { diff --git a/noir/compiler/noirc_frontend/src/node_interner.rs b/noir/compiler/noirc_frontend/src/node_interner.rs index 9082df1bcd52..e0170ec914fa 100644 --- a/noir/compiler/noirc_frontend/src/node_interner.rs +++ b/noir/compiler/noirc_frontend/src/node_interner.rs @@ -468,6 +468,7 @@ impl NodeInterner { /// The [Location] may not necessarily point to the beginning of the item /// so we check if the location's span is contained within the start or end /// of each items [Span] + #[tracing::instrument(skip(self))] pub fn find_location_index(&self, location: Location) -> Option> { let mut location_candidate: Option<(&Index, &Location)> = None; @@ -504,7 +505,7 @@ impl NodeInterner { id: type_id, name: unresolved_trait.trait_def.name.clone(), crate_id: unresolved_trait.crate_id, - span: unresolved_trait.trait_def.span, + location: Location::new(unresolved_trait.trait_def.span, unresolved_trait.file_id), generics: vecmap(&unresolved_trait.trait_def.generics, |_| { // Temporary type variable ids before the trait is resolved to its actual ids. // This lets us record how many arguments the type expects so that other types @@ -767,12 +768,12 @@ impl NodeInterner { } /// Returns the interned meta data corresponding to `func_id` - pub fn function_meta(&self, func_id: &FuncId) -> FuncMeta { - self.func_meta.get(func_id).cloned().expect("ice: all function ids should have metadata") + pub fn function_meta(&self, func_id: &FuncId) -> &FuncMeta { + self.func_meta.get(func_id).expect("ice: all function ids should have metadata") } - pub fn try_function_meta(&self, func_id: &FuncId) -> Option { - self.func_meta.get(func_id).cloned() + pub fn try_function_meta(&self, func_id: &FuncId) -> Option<&FuncMeta> { + self.func_meta.get(func_id) } pub fn function_ident(&self, func_id: &FuncId) -> crate::Ident { @@ -1103,7 +1104,10 @@ impl NodeInterner { recursion_limit: u32, ) -> Result<(), Vec> { for constraint in where_clause { - let constraint_type = constraint.typ.substitute(instantiation_bindings); + // Instantiation bindings are generally safe to force substitute into the same type. + // This is needed here to undo any bindings done to trait methods by monomorphization. + // Otherwise, an impl for (A, B) could get narrowed to only an impl for e.g. (u8, u16). + let constraint_type = constraint.typ.force_substitute(instantiation_bindings); let constraint_type = constraint_type.substitute(type_bindings); self.lookup_trait_implementation_helper( @@ -1142,6 +1146,7 @@ impl NodeInterner { } /// Adds a trait implementation to the list of known implementations. + #[tracing::instrument(skip(self))] pub fn add_trait_implementation( &mut self, object_type: Type, @@ -1274,7 +1279,9 @@ impl NodeInterner { /// Returns the [Location] of the definition of the given Ident found at [Span] of the given [FileId]. /// Returns [None] when definition is not found. pub fn get_definition_location_from(&self, location: Location) -> Option { - self.find_location_index(location).and_then(|index| self.resolve_location(index)) + self.find_location_index(location) + .and_then(|index| self.resolve_location(index)) + .or_else(|| self.try_resolve_trait_impl_location(location)) } /// For a given [Index] we return [Location] to which we resolved to @@ -1429,6 +1436,24 @@ impl NodeInterner { pub(crate) fn ordering_type(&self) -> Type { self.ordering_type.clone().expect("Expected ordering_type to be set in the NodeInterner") } + + /// Attempts to resolve [Location] of [Trait] based on [Location] of [TraitImpl] + /// This is used by LSP to resolve the location of a trait based on the location of a trait impl. + /// + /// Example: + /// impl Foo for Bar { ... } -> trait Foo { ... } + fn try_resolve_trait_impl_location(&self, location: Location) -> Option { + self.trait_implementations + .iter() + .find(|shared_trait_impl| { + let trait_impl = shared_trait_impl.borrow(); + trait_impl.file == location.file && trait_impl.ident.span().contains(&location.span) + }) + .and_then(|shared_trait_impl| { + let trait_impl = shared_trait_impl.borrow(); + self.traits.get(&trait_impl.trait_id).map(|trait_| trait_.location) + }) + } } impl Methods { diff --git a/noir/compiler/noirc_frontend/src/parser/parser.rs b/noir/compiler/noirc_frontend/src/parser/parser.rs index 660c85759b97..b149eb24f073 100644 --- a/noir/compiler/noirc_frontend/src/parser/parser.rs +++ b/noir/compiler/noirc_frontend/src/parser/parser.rs @@ -312,7 +312,7 @@ fn function_return_type() -> impl NoirParser<((Distinctness, Visibility), Functi fn attribute() -> impl NoirParser { token_kind(TokenKind::Attribute).map(|token| match token { Token::Attribute(attribute) => attribute, - _ => unreachable!(), + _ => unreachable!("Parser should have already errored due to token not being an attribute"), }) } @@ -369,7 +369,7 @@ fn function_parameters<'a>(allow_self: bool) -> impl NoirParser> + 'a /// This parser always parses no input and fails fn nothing() -> impl NoirParser { - one_of([]).map(|_| unreachable!()) + one_of([]).map(|_| unreachable!("parser should always error")) } fn self_parameter() -> impl NoirParser { diff --git a/noir/compiler/wasm/src/compile.rs b/noir/compiler/wasm/src/compile.rs index c6ae0ae1f424..54fdccf13693 100644 --- a/noir/compiler/wasm/src/compile.rs +++ b/noir/compiler/wasm/src/compile.rs @@ -2,9 +2,9 @@ use fm::FileManager; use gloo_utils::format::JsValueSerdeExt; use js_sys::{JsString, Object}; use nargo::artifacts::{ - contract::{PreprocessedContract, PreprocessedContractFunction}, + contract::{ContractArtifact, ContractFunctionArtifact}, debug::DebugArtifact, - program::PreprocessedProgram, + program::ProgramArtifact, }; use noirc_driver::{ add_dep, compile_contract, compile_main, file_manager_with_stdlib, prepare_crate, @@ -148,8 +148,8 @@ impl PathToFileSourceMap { } pub enum CompileResult { - Contract { contract: PreprocessedContract, debug: DebugArtifact }, - Program { program: PreprocessedProgram, debug: DebugArtifact }, + Contract { contract: ContractArtifact, debug: DebugArtifact }, + Program { program: ProgramArtifact, debug: DebugArtifact }, } #[wasm_bindgen] @@ -195,7 +195,7 @@ pub fn compile( let optimized_contract = nargo::ops::optimize_contract(compiled_contract, expression_width); - let compile_output = preprocess_contract(optimized_contract); + let compile_output = generate_contract_artifact(optimized_contract); Ok(JsCompileResult::new(compile_output)) } else { let compiled_program = compile_main(&mut context, crate_id, &compile_options, None, true) @@ -210,7 +210,7 @@ pub fn compile( let optimized_program = nargo::ops::optimize_program(compiled_program, expression_width); - let compile_output = preprocess_program(optimized_program); + let compile_output = generate_program_artifact(optimized_program); Ok(JsCompileResult::new(compile_output)) } } @@ -272,50 +272,32 @@ fn add_noir_lib(context: &mut Context, library_name: &CrateName) -> CrateId { prepare_dependency(context, &path_to_lib) } -pub(crate) fn preprocess_program(program: CompiledProgram) -> CompileResult { +pub(crate) fn generate_program_artifact(program: CompiledProgram) -> CompileResult { let debug_artifact = DebugArtifact { - debug_symbols: vec![program.debug], - file_map: program.file_map, - warnings: program.warnings, + debug_symbols: vec![program.debug.clone()], + file_map: program.file_map.clone(), + warnings: program.warnings.clone(), }; - let preprocessed_program = PreprocessedProgram { - hash: program.hash, - abi: program.abi, - noir_version: NOIR_ARTIFACT_VERSION_STRING.to_string(), - bytecode: program.circuit, - }; - - CompileResult::Program { program: preprocessed_program, debug: debug_artifact } + CompileResult::Program { program: program.into(), debug: debug_artifact } } -// TODO: This method should not be doing so much, most of this should be done in nargo or the driver -pub(crate) fn preprocess_contract(contract: CompiledContract) -> CompileResult { +pub(crate) fn generate_contract_artifact(contract: CompiledContract) -> CompileResult { let debug_artifact = DebugArtifact { debug_symbols: contract.functions.iter().map(|function| function.debug.clone()).collect(), file_map: contract.file_map, warnings: contract.warnings, }; - let preprocessed_functions = contract - .functions - .into_iter() - .map(|func| PreprocessedContractFunction { - name: func.name, - function_type: func.function_type, - is_internal: func.is_internal, - abi: func.abi, - bytecode: func.bytecode, - }) - .collect(); - - let preprocessed_contract = PreprocessedContract { + let functions = contract.functions.into_iter().map(ContractFunctionArtifact::from).collect(); + + let contract_artifact = ContractArtifact { noir_version: String::from(NOIR_ARTIFACT_VERSION_STRING), name: contract.name, - functions: preprocessed_functions, + functions, events: contract.events, }; - CompileResult::Contract { contract: preprocessed_contract, debug: debug_artifact } + CompileResult::Contract { contract: contract_artifact, debug: debug_artifact } } #[cfg(test)] diff --git a/noir/compiler/wasm/src/compile_new.rs b/noir/compiler/wasm/src/compile_new.rs index 0cd1a2c50e59..9ac080ffcaef 100644 --- a/noir/compiler/wasm/src/compile_new.rs +++ b/noir/compiler/wasm/src/compile_new.rs @@ -1,6 +1,6 @@ use crate::compile::{ - file_manager_with_source_map, preprocess_contract, preprocess_program, JsCompileResult, - PathToFileSourceMap, + file_manager_with_source_map, generate_contract_artifact, generate_program_artifact, + JsCompileResult, PathToFileSourceMap, }; use crate::errors::{CompileError, JsCompileError}; use noirc_driver::{ @@ -79,11 +79,12 @@ impl CompilerContext { crate_name: String, from: &CrateIDWrapper, to: &CrateIDWrapper, - ) { - let parsed_crate_name: CrateName = crate_name - .parse() - .unwrap_or_else(|_| panic!("Failed to parse crate name {}", crate_name)); + ) -> Result<(), JsCompileError> { + let parsed_crate_name: CrateName = + crate_name.parse().map_err(|err_string| JsCompileError::new(err_string, Vec::new()))?; + add_dep(&mut self.context, from.0, to.0, parsed_crate_name); + Ok(()) } pub fn compile_program( @@ -108,7 +109,7 @@ impl CompilerContext { let optimized_program = nargo::ops::optimize_program(compiled_program, np_language); - let compile_output = preprocess_program(optimized_program); + let compile_output = generate_program_artifact(optimized_program); Ok(JsCompileResult::new(compile_output)) } @@ -133,7 +134,7 @@ impl CompilerContext { let optimized_contract = nargo::ops::optimize_contract(compiled_contract, np_language); - let compile_output = preprocess_contract(optimized_contract); + let compile_output = generate_contract_artifact(optimized_contract); Ok(JsCompileResult::new(compile_output)) } } @@ -188,7 +189,7 @@ pub fn compile_( crate_names.insert(lib_name.clone(), crate_id); // Add the dependency edges - compiler_context.add_dependency_edge(lib_name_string, &root_id, &crate_id); + compiler_context.add_dependency_edge(lib_name_string, &root_id, &crate_id)?; } // Process the transitive dependencies of the root @@ -207,7 +208,11 @@ pub fn compile_( .entry(dependency_name.clone()) .or_insert_with(|| add_noir_lib(&mut compiler_context, dependency_name)); - compiler_context.add_dependency_edge(dependency_name_string, &crate_id, dep_crate_id); + compiler_context.add_dependency_edge( + dependency_name_string, + &crate_id, + dep_crate_id, + )?; } } @@ -278,8 +283,8 @@ mod test { let lib1_crate_id = context.process_dependency_crate("lib1/lib.nr".to_string()); let root_crate_id = context.root_crate_id(); - context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id); - context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id); + context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id).unwrap(); + context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id).unwrap(); assert_eq!(context.crate_graph().number_of_crates(), 3); } @@ -303,9 +308,9 @@ mod test { let lib3_crate_id = context.process_dependency_crate("lib3/lib.nr".to_string()); let root_crate_id = context.root_crate_id(); - context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id); - context.add_dependency_edge("lib2".to_string(), &lib1_crate_id, &lib2_crate_id); - context.add_dependency_edge("lib3".to_string(), &lib2_crate_id, &lib3_crate_id); + context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id).unwrap(); + context.add_dependency_edge("lib2".to_string(), &lib1_crate_id, &lib2_crate_id).unwrap(); + context.add_dependency_edge("lib3".to_string(), &lib2_crate_id, &lib3_crate_id).unwrap(); assert_eq!(context.crate_graph().number_of_crates(), 5); } @@ -328,8 +333,8 @@ mod test { let lib3_crate_id = context.process_dependency_crate("lib3/lib.nr".to_string()); let root_crate_id = context.root_crate_id(); - context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id); - context.add_dependency_edge("lib3".to_string(), &lib2_crate_id, &lib3_crate_id); + context.add_dependency_edge("lib1".to_string(), &root_crate_id, &lib1_crate_id).unwrap(); + context.add_dependency_edge("lib3".to_string(), &lib2_crate_id, &lib3_crate_id).unwrap(); assert_eq!(context.crate_graph().number_of_crates(), 5); } diff --git a/noir/cspell.json b/noir/cspell.json index 94449a68a773..8e3f248acfae 100644 --- a/noir/cspell.json +++ b/noir/cspell.json @@ -11,12 +11,13 @@ "arity", "arkworks", "arraysort", + "barebones", "barretenberg", "bincode", "bindgen", "bitand", - "bitxor", "bitor", + "bitxor", "blackbox", "bridgekeeper", "brillig", @@ -29,6 +30,7 @@ "chumsky", "clippy", "codegen", + "codegenned", "codegens", "Codespaces", "codespan", @@ -81,6 +83,7 @@ "jmpif", "jmpifs", "jmps", + "jsdoc", "Jubjub", "keccak", "krate", @@ -96,6 +99,7 @@ "montcurve", "nand", "nargo", + "neovim", "newtype", "nightlies", "nixpkgs", diff --git a/noir/docs/docs/noir/concepts/data_types/strings.md b/noir/docs/docs/noir/concepts/data_types/strings.md index 8d76d4ca654c..311dfd644168 100644 --- a/noir/docs/docs/noir/concepts/data_types/strings.md +++ b/noir/docs/docs/noir/concepts/data_types/strings.md @@ -17,13 +17,13 @@ sidebar_position: 3 The string type is a fixed length value defined with `str`. You can use strings in `assert()` functions or print them with -`std::println()`. See more about [Logging](../../standard_library/logging). +`println()`. See more about [Logging](../../standard_library/logging). ```rust use dep::std; fn main(message : pub str<11>, hex_as_string : str<4>) { - std::println(message); + println(message); assert(message == "hello world"); assert(hex_as_string == "0x41"); } diff --git a/noir/docs/docs/noir/concepts/data_types/vectors.mdx b/noir/docs/docs/noir/concepts/data_types/vectors.mdx index 10e35711b74b..aed13183719e 100644 --- a/noir/docs/docs/noir/concepts/data_types/vectors.mdx +++ b/noir/docs/docs/noir/concepts/data_types/vectors.mdx @@ -14,8 +14,6 @@ A vector is a collection type similar to Rust's Vector type. It's convenient way Example: ```rust -use dep::std::collections::vec::Vec; - let mut vector: Vec = Vec::new(); for i in 0..5 { vector.push(i); diff --git a/noir/docs/docs/noir/concepts/generics.md b/noir/docs/docs/noir/concepts/generics.md index 443ca2b45a5a..ec2db95839ab 100644 --- a/noir/docs/docs/noir/concepts/generics.md +++ b/noir/docs/docs/noir/concepts/generics.md @@ -30,25 +30,15 @@ struct RepeatedValue { } impl RepeatedValue { - fn new(value: T) -> Self { - Self { value, count: 1 } - } - - fn increment(mut repeated: Self) -> Self { - repeated.count += 1; - repeated - } - fn print(self) { for _i in 0 .. self.count { - dep::std::println(self.value); + println(self.value); } } } fn main() { - let mut repeated = RepeatedValue::new("Hello!"); - repeated = repeated.increment(); + let repeated = RepeatedValue { value: "Hello!", count: 2 }; repeated.print(); } ``` @@ -80,35 +70,37 @@ impl BigInt { ## Calling functions on generic parameters -Unlike Rust, Noir does not have traits, so how can one translate the equivalent of a trait bound in -Rust into Noir? That is, how can we write a function that is generic over some type `T`, while also -requiring there is a function like `eq: fn(T, T) -> bool` that works on the type? +Since a generic type `T` can represent any type, how can we call functions on the underlying type? +In other words, how can we go from "any type `T`" to "any type `T` that has certain methods available?" -The answer is that we can translate this by passing in the function manually. Here's an example of -implementing array equality in Noir: +This is what [traits](../concepts/traits) are for in Noir. Here's an example of a function generic over +any type `T` that implements the `Eq` trait for equality: ```rust -fn array_eq(array1: [T; N], array2: [T; N], elem_eq: fn(T, T) -> bool) -> bool { - if array1.len() != array2.len() { - false +fn first_element_is_equal(array1: [T; N], array2: [T; N]) -> bool + where T: Eq +{ + if (array1.len() == 0) | (array2.len() == 0) { + true } else { - let mut result = true; - for i in 0 .. array1.len() { - result &= elem_eq(array1[i], array2[i]); - } - result + array1[0] == array2[0] } } fn main() { - assert(array_eq([1, 2, 3], [1, 2, 3], |a, b| a == b)); + assert(first_element_is_equal([1, 2, 3], [1, 5, 6])); - // We can use array_eq even for arrays of structs, as long as we have - // an equality function for these structs we can pass in + // We can use first_element_is_equal for arrays of any type + // as long as we have an Eq impl for the types we pass in let array = [MyStruct::new(), MyStruct::new()]; assert(array_eq(array, array, MyStruct::eq)); } + +impl Eq for MyStruct { + fn eq(self, other: MyStruct) -> bool { + self.foo == other.foo + } +} ``` -You can see an example of generics in the tests -[here](https://github.com/noir-lang/noir/blob/master/tooling/nargo_cli/tests/execution_success/generics/src/main.nr). +You can find more details on traits and trait implementations on the [traits page](../concepts/traits). diff --git a/noir/docs/docs/noir/standard_library/cryptographic_primitives/scalar.mdx b/noir/docs/docs/noir/standard_library/cryptographic_primitives/scalar.mdx index 1e686303c182..aa4fb8cbaedb 100644 --- a/noir/docs/docs/noir/standard_library/cryptographic_primitives/scalar.mdx +++ b/noir/docs/docs/noir/standard_library/cryptographic_primitives/scalar.mdx @@ -21,7 +21,7 @@ example ```rust fn main(x : Field) { let scal = std::scalar_mul::fixed_base_embedded_curve(x); - std::println(scal); + println(scal); } ``` diff --git a/noir/docs/docs/noir/standard_library/logging.md b/noir/docs/docs/noir/standard_library/logging.md index 2e163b52ab32..db75ef9f86fa 100644 --- a/noir/docs/docs/noir/standard_library/logging.md +++ b/noir/docs/docs/noir/standard_library/logging.md @@ -27,18 +27,19 @@ It is recommended to use `nargo execute` if you want to debug failing constraint Both `print` and `println` are generic functions which can work on integers, fields, strings, and even structs or expressions. Note however, that slices are currently unsupported. For example: ```rust -use dep::std; - struct Person { - age : Field, - height : Field, + age: Field, + height: Field, } -fn main(age : Field, height : Field) { - let person = Person { age : age, height : height }; - std::println(person); - std::println(age + height); - std::println("Hello world!"); +fn main(age: Field, height: Field) { + let person = Person { + age: age, + height: height, + }; + println(person); + println(age + height); + println("Hello world!"); } ``` @@ -46,22 +47,22 @@ You can print different types in the same statement (including strings) with a t ```rust let fmt_str = f"i: {i}, j: {j}"; - std::println(fmt_str); + println(fmt_str); let s = myStruct { y: x, x: y }; - std::println(s); + println(s); - std::println(f"i: {i}, s: {s}"); + println(f"i: {i}, s: {s}"); - std::println(x); - std::println([x, y]); + println(x); + println([x, y]); let foo = fooStruct { my_struct: s, foo: 15 }; - std::println(f"s: {s}, foo: {foo}"); + println(f"s: {s}, foo: {foo}"); - std::println(15); // prints 0x0f, implicit Field - std::println(-1 as u8); // prints 255 - std::println(-1 as i8); // prints -1 + println(15); // prints 0x0f, implicit Field + println(-1 as u8); // prints 255 + println(-1 as i8); // prints -1 ``` Examples shown above are interchangeable between the two `print` statements: @@ -69,9 +70,9 @@ Examples shown above are interchangeable between the two `print` statements: ```rust let person = Person { age : age, height : height }; -std::println(person); -std::print(person); +println(person); +print(person); -std::println("Hello world!"); // Prints with a newline at the end of the input -std::print("Hello world!"); // Prints the input and keeps cursor on the same line +println("Hello world!"); // Prints with a newline at the end of the input +print("Hello world!"); // Prints the input and keeps cursor on the same line ``` diff --git a/noir/docs/docs/noir/standard_library/merkle_trees.md b/noir/docs/docs/noir/standard_library/merkle_trees.md index 5b45617812ac..fa4886778840 100644 --- a/noir/docs/docs/noir/standard_library/merkle_trees.md +++ b/noir/docs/docs/noir/standard_library/merkle_trees.md @@ -45,7 +45,7 @@ fn main(index: Field, priv_key: Field, secret: Field, note_hash_path: [Field; 3] let note_commitment = std::hash::pedersen([pubkey_x, pubkey_y, secret]); let root = std::merkle::compute_merkle_root(note_commitment[0], index, note_hash_path); - std::println(root); + println(root); } ``` diff --git a/noir/docs/docs/noir/standard_library/options.md b/noir/docs/docs/noir/standard_library/options.md index 3d3139fb98b8..970c9cfbf11d 100644 --- a/noir/docs/docs/noir/standard_library/options.md +++ b/noir/docs/docs/noir/standard_library/options.md @@ -11,11 +11,9 @@ struct Option { } ``` -You can import the Option type into your Noir program like so: +The `Option` type, already imported into your Noir program, can be used directly: ```rust -use dep::std::option::Option; - fn main() { let none = Option::none(); let some = Option::some(3); diff --git a/noir/docs/docs/tutorials/noirjs_app.md b/noir/docs/docs/tutorials/noirjs_app.md index 302ee4aeadea..4293aa053fcc 100644 --- a/noir/docs/docs/tutorials/noirjs_app.md +++ b/noir/docs/docs/tutorials/noirjs_app.md @@ -1,37 +1,63 @@ --- -title: Tiny NoirJS app -description: Learn how to setup a new app that uses Noir to generate and verify zero-knowledge SNARK proofs in a typescript or javascript environment -keywords: [how to, guide, javascript, typescript, noir, barretenberg, zero-knowledge, proofs] +title: Building a web app with NoirJS +description: Learn how to setup a new app that uses Noir to generate and verify zero-knowledge SNARK proofs in a typescript or javascript environment. +keywords: [how to, guide, javascript, typescript, noir, barretenberg, zero-knowledge, proofs, app] sidebar_position: 0 --- -NoirJS works both on the browser and on the server, and works for both ESM and CJS module systems. In this page, we will learn how can we write a simple test and a simple web app to verify the standard Noir example. +NoirJS is a set of packages meant to work both in a browser and a server environment. In this tutorial, we will build a simple web app using them. From here, you should get an idea on how to proceed with your own Noir projects! You can find the complete app code for this guide [here](https://github.com/noir-lang/tiny-noirjs-app). -## Before we start +## Setup :::note -Feel free to use whatever versions, just keep in mind that Nargo and the NoirJS packages are meant to be in sync. For example, Nargo 0.18.x matches `noir_js@0.18.x`, etc. +Feel free to use whatever versions, just keep in mind that Nargo and the NoirJS packages are meant to be in sync. For example, Nargo 0.19.x matches `noir_js@0.19.x`, etc. -In this guide, we will be pinned to 0.17.0. +In this guide, we will be pinned to 0.19.4. ::: -Make sure you have Node installed on your machine by opening a terminal and executing `node --version`. If you don't see a version, you should install [node](https://github.com/nvm-sh/nvm). You can also use `yarn` if you prefer that package manager over npm (which comes with node). +Before we start, we want to make sure we have Node and Nargo installed. -First of all, follow the the [Nargo guide](../getting_started/installation/index.md) to install nargo version 0.17.0 and create a new project with `nargo new circuit`. Once there, `cd` into the `circuit` folder. You should then be able to compile your circuit into `json` format and see it inside the `target` folder: +We start by opening a terminal and executing `node --version`. If we don't get an output like `v20.10.0`, that means node is not installed. Let's do that by following the handy [nvm guide](https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script). -```bash -nargo compile +As for `Nargo`, we can follow the the [Nargo guide](../getting_started/installation/index.md) to install it. If you're lazy, just paste this on a terminal and run `noirup`: + +```sh +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash ``` -Your folder structure should look like: +Easy enough. Onwards! + +## Our project + +ZK is a powerful technology. An app that doesn't reveal one of the inputs to *anyone* is almost unbelievable, yet Noir makes it as easy as a single line of code. + +In fact, it's so simple that it comes nicely packaged in `nargo`. Let's do that! + +### Nargo + +Run: + +```nargo new circuit``` + +And... That's about it. Your program is ready to be compiled and run. + +To compile, let's `cd` into the `circuit` folder to enter our project, and call: + +```nargo compile``` + +This compiles our circuit into `json` format and add it to a new `target` folder. + +:::info + +At this point in the tutorial, your folder structure should look like this: ```tree . -└── circuit +└── circuit <---- our working directory ├── Nargo.toml ├── src │ └── main.nr @@ -39,84 +65,46 @@ Your folder structure should look like: └── circuit.json ``` -## Starting a new project +::: -Go back to the previous folder and start a new project by running run `npm init`. You can configure your project or just leave the defaults, and see a `package.json` appear in your root folder. +### Node and Vite -## Installing dependencies +If you want to explore Nargo, feel free to go on a side-quest now and follow the steps in the +[getting started](../getting_started/create_a_project) guide. However, we want our app to run on the browser, so we need Vite. -We'll need two `npm` packages. These packages will provide us the methods we need to run and verify proofs: +Vite is a powerful tool to generate static websites. While it provides all kinds of features, let's just go barebones with some good old vanilla JS. -```bash -npm i @noir-lang/backend_barretenberg@^0.17.0 @noir-lang/noir_js@^0.17.0 -``` +To do this this, go back to the previous folder (`cd ..`) and create a new vite project by running `npm create vite` and choosing "Vanilla" and "Javascript". -To serve our page, we can use a build tool such as `vite`. Because we're gonna use some `wasm` files, we need to install a plugin as well. Run: +You should see `vite-project` appear in your root folder. This seems like a good time to `cd` into it and install our NoirJS packages: ```bash -npm i --save-dev vite rollup-plugin-copy +npm i @noir-lang/backend_barretenberg@0.19.4 @noir-lang/noir_js@0.19.4 ``` -Since we're on the dependency world, we may as well define a nice starting script. Vite makes it easy. Just open `package.json`, find the block "scripts" and add this just below the line with `"test" : "echo......."`: - -```json - "start": "vite --open" -``` +:::info -If you want do build a static website, you can also add some build and preview scripts: +At this point in the tutorial, your folder structure should look like this: -```json - "build": "vite build", - "preview": "vite preview" +```tree +. +└── circuit + └── ...etc... +└── vite-project <---- our working directory + └── ...etc... ``` -## Vite plugins +::: -Vite is great, but support from `wasm` doesn't work out-of-the-box. We're gonna write a quick plugin and use another one. Just copy and paste this into a file named `vite.config.js`. You don't need to understand it, just trust me bro. +#### Some cleanup -```js -import { defineConfig } from 'vite'; -import copy from 'rollup-plugin-copy'; -import fs from 'fs'; -import path from 'path'; - -const wasmContentTypePlugin = { - name: 'wasm-content-type-plugin', - configureServer(server) { - server.middlewares.use(async (req, res, next) => { - if (req.url.endsWith('.wasm')) { - res.setHeader('Content-Type', 'application/wasm'); - const newPath = req.url.replace('deps', 'dist'); - const targetPath = path.join(__dirname, newPath); - const wasmContent = fs.readFileSync(targetPath); - return res.end(wasmContent); - } - next(); - }); - }, -}; - -export default defineConfig(({ command }) => { - if (command === 'serve') { - return { - plugins: [ - copy({ - targets: [{ src: 'node_modules/**/*.wasm', dest: 'node_modules/.vite/dist' }], - copySync: true, - hook: 'buildStart', - }), - command === 'serve' ? wasmContentTypePlugin : [], - ], - }; - } +`npx create vite` is amazing but it creates a bunch of files we don't really need for our simple example. Actually, let's just delete everything except for `index.html`, `main.js` and `package.json`. I feel lighter already. - return {}; -}); -``` +![my heart is ready for you, noir.js](../../static/img/memes/titanic.jpeg) ## HTML -Here's the simplest HTML with some terrible UI. Create a file called `index.html` and paste this: +Our app won't run like this, of course. We need some working HTML, at least. Let's open our broken-hearted `index.html` and replace everything with this code snippet: ```html @@ -136,8 +124,12 @@ Here's the simplest HTML with some terrible UI. Create a file called `index.html - -

Very basic Noir app

+ +

Noir app

+
+ + +

Logs

Proof

@@ -146,14 +138,25 @@ Here's the simplest HTML with some terrible UI. Create a file called `index.html ``` +It *could* be a beautiful UI... Depending on which universe you live in. + ## Some good old vanilla Javascript -Create a new file `app.js`, which is where our javascript code will live. Let's start with this code inside: +Our love for Noir needs undivided attention, so let's just open `main.js` and delete everything (this is where the romantic scenery becomes a bit creepy). + +Start by pasting in this boilerplate code: ```js -document.addEventListener('DOMContentLoaded', async () => { - // here's where the magic happens -}); +const setup = async () => { + await Promise.all([ + import("@noir-lang/noirc_abi").then(module => + module.default(new URL("@noir-lang/noirc_abi/web/noirc_abi_wasm_bg.wasm", import.meta.url).toString()) + ), + import("@noir-lang/acvm_js").then(module => + module.default(new URL("@noir-lang/acvm_js/web/acvm_js_bg.wasm", import.meta.url).toString()) + ) + ]); +} function display(container, msg) { const c = document.getElementById(container); @@ -161,73 +164,83 @@ function display(container, msg) { p.textContent = msg; c.appendChild(p); } + +document.getElementById('submitGuess').addEventListener('click', async () => { + try { + // here's where love happens + } catch(err) { + display("logs", "Oh 💔 Wrong guess") + } +}); + ``` -We can manipulate our website with this little function, so we can see our website working. +The display function doesn't do much. We're simply manipulating our website to see stuff happening. For example, if the proof fails, it will simply log a broken heart 😢 + +As for the `setup` function, it's just a sad reminder that dealing with `wasm` on the browser is not as easy as it should. Just copy, paste, and forget. -## Adding Noir +:::info -If you come from the previous page, your folder structure should look like this: +At this point in the tutorial, your folder structure should look like this: ```tree -├── app.js -├── circuit -│ ├── Nargo.toml -│ ├── src -│ │ └── main.nr -│ └── target -│ └── circuit.json -├── index.html -├── package.json -└── vite.config.js +. +└── circuit + └── ...same as above +└── vite-project + ├── main.js + ├── package.json + └── index.html ``` -You'll see other files and folders showing up (like `package-lock.json`, `yarn.lock`, `node_modules`) but you shouldn't have to care about those. +You'll see other files and folders showing up (like `package-lock.json`, `node_modules`) but you shouldn't have to care about those. -## Importing our dependencies +::: + +## Some NoirJS -We're starting with the good stuff now. At the top of the new javascript file, import the packages: +We're starting with the good stuff now. If you've compiled the circuit as described above, you should have a `json` file we want to import at the very top of our `main.js` file: ```ts +import circuit from '../circuit/target/circuit.json'; +``` + +[Noir is backend-agnostic](../index.md#whats-new-about-noir). We write Noir, but we also need a proving backend. That's why we need to import and instantiate the two dependencies we installed above: `BarretenbergBackend` and `Noir`. Let's import them right below: + +```js import { BarretenbergBackend } from '@noir-lang/backend_barretenberg'; import { Noir } from '@noir-lang/noir_js'; ``` -We also need to import the `circuit` JSON file we created. If you have the suggested folder structure, you can add this line: +And instantiate them inside our try-catch block: ```ts -import circuit from './circuit/target/circuit.json'; +// try { +const backend = new BarretenbergBackend(circuit); +const noir = new Noir(circuit, backend); +// } ``` -## Write code - :::note -We're gonna be adding code inside the `document.addEventListener...etc` block: - -```js -// forget stuff here -document.addEventListener('DOMContentLoaded', async () => { - // here's where the magic happens -}); -// forget stuff here -``` +For the remainder of the tutorial, everything will be happening inside the `try` block ::: -Our dependencies exported two classes: `BarretenbergBackend` and `Noir`. Let's `init` them and add some logs, just to flex: +## Our app -```ts -const backend = new BarretenbergBackend(circuit); -const noir = new Noir(circuit, backend); -``` +Now for the app itself. We're capturing whatever is in the input when people press the submit button. Just add this: -## Proving +```js +const x = parseInt(document.getElementById('guessInput').value); +const input = { x, y: 2 }; +``` Now we're ready to prove stuff! Let's feed some inputs to our circuit and calculate the proof: ```js -const input = { x: 1, y: 2 }; +await setup(); // let's squeeze our wasm inits here + display('logs', 'Generating proof... ⌛'); const proof = await noir.generateFinalProof(input); display('logs', 'Generating proof... ✅'); @@ -236,13 +249,17 @@ display('results', proof.proof); You're probably eager to see stuff happening, so go and run your app now! -From your terminal, run `npm start` (or `yarn start`). If it doesn't open a browser for you, just visit `localhost:5173`. On a modern laptop, proof will generate in less than 100ms, and you'll see this: +From your terminal, run `npm run dev`. If it doesn't open a browser for you, just visit `localhost:5173`. You should now see the worst UI ever, with an ugly input. ![Getting Started 0](@site/static/img/noir_getting_started_1.png) -If you're human, you shouldn't be able to understand anything on the "proof" box. That's OK. We like you, human. +Now, our circuit says `fn main(x: Field, y: pub Field)`. This means only the `y` value is public, and it's hardcoded above: `input = { x, y: 2 }`. In other words, you won't need to send your secret`x` to the verifier! -In any case, this means your proof was generated! But you shouldn't trust me just yet. Add these lines to see it being verified: +By inputting any number other than 2 in the input box and clicking "submit", you should get a valid proof. Otherwise the proof won't even generate correctly. By the way, if you're human, you shouldn't be able to understand anything on the "proof" box. That's OK. We like you, human ❤️. + +## Verifying + +Time to celebrate, yes! But we shouldn't trust machines so blindly. Let's add these lines to see our proof being verified: ```js display('logs', 'Verifying proof... ⌛'); @@ -250,9 +267,9 @@ const verification = await noir.verifyFinalProof(proof); if (verification) display('logs', 'Verifying proof... ✅'); ``` -By saving, your app will refresh and here's our complete Tiny Noir App! +You have successfully generated a client-side Noir web app! -You can find the complete app code for this guide [here](https://github.com/noir-lang/tiny-noirjs-app). +![coded app without math knowledge](../../static/img/memes/flextape.jpeg) ## Further Reading diff --git a/noir/docs/static/img/memes/flextape.jpeg b/noir/docs/static/img/memes/flextape.jpeg new file mode 100644 index 000000000000..65a8992e9853 Binary files /dev/null and b/noir/docs/static/img/memes/flextape.jpeg differ diff --git a/noir/docs/static/img/memes/titanic.jpeg b/noir/docs/static/img/memes/titanic.jpeg new file mode 100644 index 000000000000..a29f06ffb93b Binary files /dev/null and b/noir/docs/static/img/memes/titanic.jpeg differ diff --git a/noir/docs/static/img/noir_getting_started_1.png b/noir/docs/static/img/noir_getting_started_1.png index 9de33296e916..beaee6fd3c93 100644 Binary files a/noir/docs/static/img/noir_getting_started_1.png and b/noir/docs/static/img/noir_getting_started_1.png differ diff --git a/noir/docs/versioned_docs/version-v0.19.4/language_concepts/data_types/01_integers.md b/noir/docs/versioned_docs/version-v0.19.4/language_concepts/data_types/01_integers.md index b1e7ad11bfd2..1814365800af 100644 --- a/noir/docs/versioned_docs/version-v0.19.4/language_concepts/data_types/01_integers.md +++ b/noir/docs/versioned_docs/version-v0.19.4/language_concepts/data_types/01_integers.md @@ -107,6 +107,6 @@ Example of how it is used: use dep::std; fn main(x: u8, y: u8) -> pub u8 { - std::wrapping_add(x + y) + std::wrapping_add(x, y) } ``` diff --git a/noir/test_programs/compile_success_empty/regression_3964/Nargo.toml b/noir/test_programs/compile_success_empty/regression_3964/Nargo.toml new file mode 100644 index 000000000000..a3fd040bcc26 --- /dev/null +++ b/noir/test_programs/compile_success_empty/regression_3964/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "regression_3964" +type = "bin" +authors = [""] +compiler_version = ">=0.20.0" + +[dependencies] diff --git a/noir/test_programs/compile_success_empty/regression_3964/src/main.nr b/noir/test_programs/compile_success_empty/regression_3964/src/main.nr new file mode 100644 index 000000000000..0600a4281a08 --- /dev/null +++ b/noir/test_programs/compile_success_empty/regression_3964/src/main.nr @@ -0,0 +1,5 @@ +fn main() { + let one: u8 = 1; + let p = ((one, 2), (3, 4)); + assert(p == p); +} diff --git a/noir/tooling/nargo_cli/tests/execution_success/prelude/Nargo.toml b/noir/test_programs/execution_success/prelude/Nargo.toml similarity index 100% rename from noir/tooling/nargo_cli/tests/execution_success/prelude/Nargo.toml rename to noir/test_programs/execution_success/prelude/Nargo.toml diff --git a/noir/tooling/nargo_cli/tests/execution_success/prelude/src/main.nr b/noir/test_programs/execution_success/prelude/src/main.nr similarity index 59% rename from noir/tooling/nargo_cli/tests/execution_success/prelude/src/main.nr rename to noir/test_programs/execution_success/prelude/src/main.nr index 9bf2ec18f3a4..c9ae448c4867 100644 --- a/noir/tooling/nargo_cli/tests/execution_success/prelude/src/main.nr +++ b/noir/test_programs/execution_success/prelude/src/main.nr @@ -1,6 +1,6 @@ -fn main(x: Field, y: pub Field) { - let xs = Vec::new(); - let option = Option::none(); +fn main() { + let _xs = Vec::new(); + let _option = Option::none(); print("42\n"); println("42"); @@ -10,11 +10,10 @@ mod a { // We don't want to give an error due to re-importing elements that are already in the prelude. use dep::std::collections::vec::Vec; use dep::std::option::Option; - use dep::{print, println}; fn main() { - let xs = Vec::new(); - let option = Option::none(); + let _xs = Vec::new(); + let _option = Option::none(); print("42\n"); println("42"); @@ -23,8 +22,8 @@ mod a { mod b { fn main() { - let xs = Vec::new(); - let option = Option::none(); + let _xs = Vec::new(); + let _option = Option::none(); print("42\n"); println("42"); diff --git a/noir/tooling/bb_abstraction_leaks/build.rs b/noir/tooling/bb_abstraction_leaks/build.rs index 166e61a5a97f..965a57747f97 100644 --- a/noir/tooling/bb_abstraction_leaks/build.rs +++ b/noir/tooling/bb_abstraction_leaks/build.rs @@ -10,7 +10,7 @@ use const_format::formatcp; const USERNAME: &str = "AztecProtocol"; const REPO: &str = "aztec-packages"; -const VERSION: &str = "0.16.0"; +const VERSION: &str = "0.17.0"; const TAG: &str = formatcp!("aztec-packages-v{}", VERSION); const API_URL: &str = diff --git a/noir/tooling/bb_abstraction_leaks/src/contract.sol b/noir/tooling/bb_abstraction_leaks/src/contract.sol deleted file mode 100644 index 814c81d235e7..000000000000 --- a/noir/tooling/bb_abstraction_leaks/src/contract.sol +++ /dev/null @@ -1,2575 +0,0 @@ -/** - * @title Ultra Plonk proof verification contract - * @dev Top level Plonk proof verification contract, which allows Plonk proof to be verified - */ -abstract contract BaseUltraVerifier { - // VERIFICATION KEY MEMORY LOCATIONS - uint256 internal constant N_LOC = 0x380; - uint256 internal constant NUM_INPUTS_LOC = 0x3a0; - uint256 internal constant OMEGA_LOC = 0x3c0; - uint256 internal constant DOMAIN_INVERSE_LOC = 0x3e0; - uint256 internal constant Q1_X_LOC = 0x400; - uint256 internal constant Q1_Y_LOC = 0x420; - uint256 internal constant Q2_X_LOC = 0x440; - uint256 internal constant Q2_Y_LOC = 0x460; - uint256 internal constant Q3_X_LOC = 0x480; - uint256 internal constant Q3_Y_LOC = 0x4a0; - uint256 internal constant Q4_X_LOC = 0x4c0; - uint256 internal constant Q4_Y_LOC = 0x4e0; - uint256 internal constant QM_X_LOC = 0x500; - uint256 internal constant QM_Y_LOC = 0x520; - uint256 internal constant QC_X_LOC = 0x540; - uint256 internal constant QC_Y_LOC = 0x560; - uint256 internal constant QARITH_X_LOC = 0x580; - uint256 internal constant QARITH_Y_LOC = 0x5a0; - uint256 internal constant QSORT_X_LOC = 0x5c0; - uint256 internal constant QSORT_Y_LOC = 0x5e0; - uint256 internal constant QELLIPTIC_X_LOC = 0x600; - uint256 internal constant QELLIPTIC_Y_LOC = 0x620; - uint256 internal constant QAUX_X_LOC = 0x640; - uint256 internal constant QAUX_Y_LOC = 0x660; - uint256 internal constant SIGMA1_X_LOC = 0x680; - uint256 internal constant SIGMA1_Y_LOC = 0x6a0; - uint256 internal constant SIGMA2_X_LOC = 0x6c0; - uint256 internal constant SIGMA2_Y_LOC = 0x6e0; - uint256 internal constant SIGMA3_X_LOC = 0x700; - uint256 internal constant SIGMA3_Y_LOC = 0x720; - uint256 internal constant SIGMA4_X_LOC = 0x740; - uint256 internal constant SIGMA4_Y_LOC = 0x760; - uint256 internal constant TABLE1_X_LOC = 0x780; - uint256 internal constant TABLE1_Y_LOC = 0x7a0; - uint256 internal constant TABLE2_X_LOC = 0x7c0; - uint256 internal constant TABLE2_Y_LOC = 0x7e0; - uint256 internal constant TABLE3_X_LOC = 0x800; - uint256 internal constant TABLE3_Y_LOC = 0x820; - uint256 internal constant TABLE4_X_LOC = 0x840; - uint256 internal constant TABLE4_Y_LOC = 0x860; - uint256 internal constant TABLE_TYPE_X_LOC = 0x880; - uint256 internal constant TABLE_TYPE_Y_LOC = 0x8a0; - uint256 internal constant ID1_X_LOC = 0x8c0; - uint256 internal constant ID1_Y_LOC = 0x8e0; - uint256 internal constant ID2_X_LOC = 0x900; - uint256 internal constant ID2_Y_LOC = 0x920; - uint256 internal constant ID3_X_LOC = 0x940; - uint256 internal constant ID3_Y_LOC = 0x960; - uint256 internal constant ID4_X_LOC = 0x980; - uint256 internal constant ID4_Y_LOC = 0x9a0; - uint256 internal constant CONTAINS_RECURSIVE_PROOF_LOC = 0x9c0; - uint256 internal constant RECURSIVE_PROOF_PUBLIC_INPUT_INDICES_LOC = 0x9e0; - uint256 internal constant G2X_X0_LOC = 0xa00; - uint256 internal constant G2X_X1_LOC = 0xa20; - uint256 internal constant G2X_Y0_LOC = 0xa40; - uint256 internal constant G2X_Y1_LOC = 0xa60; - - // ### PROOF DATA MEMORY LOCATIONS - uint256 internal constant W1_X_LOC = 0x1200; - uint256 internal constant W1_Y_LOC = 0x1220; - uint256 internal constant W2_X_LOC = 0x1240; - uint256 internal constant W2_Y_LOC = 0x1260; - uint256 internal constant W3_X_LOC = 0x1280; - uint256 internal constant W3_Y_LOC = 0x12a0; - uint256 internal constant W4_X_LOC = 0x12c0; - uint256 internal constant W4_Y_LOC = 0x12e0; - uint256 internal constant S_X_LOC = 0x1300; - uint256 internal constant S_Y_LOC = 0x1320; - uint256 internal constant Z_X_LOC = 0x1340; - uint256 internal constant Z_Y_LOC = 0x1360; - uint256 internal constant Z_LOOKUP_X_LOC = 0x1380; - uint256 internal constant Z_LOOKUP_Y_LOC = 0x13a0; - uint256 internal constant T1_X_LOC = 0x13c0; - uint256 internal constant T1_Y_LOC = 0x13e0; - uint256 internal constant T2_X_LOC = 0x1400; - uint256 internal constant T2_Y_LOC = 0x1420; - uint256 internal constant T3_X_LOC = 0x1440; - uint256 internal constant T3_Y_LOC = 0x1460; - uint256 internal constant T4_X_LOC = 0x1480; - uint256 internal constant T4_Y_LOC = 0x14a0; - - uint256 internal constant W1_EVAL_LOC = 0x1600; - uint256 internal constant W2_EVAL_LOC = 0x1620; - uint256 internal constant W3_EVAL_LOC = 0x1640; - uint256 internal constant W4_EVAL_LOC = 0x1660; - uint256 internal constant S_EVAL_LOC = 0x1680; - uint256 internal constant Z_EVAL_LOC = 0x16a0; - uint256 internal constant Z_LOOKUP_EVAL_LOC = 0x16c0; - uint256 internal constant Q1_EVAL_LOC = 0x16e0; - uint256 internal constant Q2_EVAL_LOC = 0x1700; - uint256 internal constant Q3_EVAL_LOC = 0x1720; - uint256 internal constant Q4_EVAL_LOC = 0x1740; - uint256 internal constant QM_EVAL_LOC = 0x1760; - uint256 internal constant QC_EVAL_LOC = 0x1780; - uint256 internal constant QARITH_EVAL_LOC = 0x17a0; - uint256 internal constant QSORT_EVAL_LOC = 0x17c0; - uint256 internal constant QELLIPTIC_EVAL_LOC = 0x17e0; - uint256 internal constant QAUX_EVAL_LOC = 0x1800; - uint256 internal constant TABLE1_EVAL_LOC = 0x1840; - uint256 internal constant TABLE2_EVAL_LOC = 0x1860; - uint256 internal constant TABLE3_EVAL_LOC = 0x1880; - uint256 internal constant TABLE4_EVAL_LOC = 0x18a0; - uint256 internal constant TABLE_TYPE_EVAL_LOC = 0x18c0; - uint256 internal constant ID1_EVAL_LOC = 0x18e0; - uint256 internal constant ID2_EVAL_LOC = 0x1900; - uint256 internal constant ID3_EVAL_LOC = 0x1920; - uint256 internal constant ID4_EVAL_LOC = 0x1940; - uint256 internal constant SIGMA1_EVAL_LOC = 0x1960; - uint256 internal constant SIGMA2_EVAL_LOC = 0x1980; - uint256 internal constant SIGMA3_EVAL_LOC = 0x19a0; - uint256 internal constant SIGMA4_EVAL_LOC = 0x19c0; - uint256 internal constant W1_OMEGA_EVAL_LOC = 0x19e0; - uint256 internal constant W2_OMEGA_EVAL_LOC = 0x2000; - uint256 internal constant W3_OMEGA_EVAL_LOC = 0x2020; - uint256 internal constant W4_OMEGA_EVAL_LOC = 0x2040; - uint256 internal constant S_OMEGA_EVAL_LOC = 0x2060; - uint256 internal constant Z_OMEGA_EVAL_LOC = 0x2080; - uint256 internal constant Z_LOOKUP_OMEGA_EVAL_LOC = 0x20a0; - uint256 internal constant TABLE1_OMEGA_EVAL_LOC = 0x20c0; - uint256 internal constant TABLE2_OMEGA_EVAL_LOC = 0x20e0; - uint256 internal constant TABLE3_OMEGA_EVAL_LOC = 0x2100; - uint256 internal constant TABLE4_OMEGA_EVAL_LOC = 0x2120; - - uint256 internal constant PI_Z_X_LOC = 0x2300; - uint256 internal constant PI_Z_Y_LOC = 0x2320; - uint256 internal constant PI_Z_OMEGA_X_LOC = 0x2340; - uint256 internal constant PI_Z_OMEGA_Y_LOC = 0x2360; - - // Used for elliptic widget. These are alias names for wire + shifted wire evaluations - uint256 internal constant X1_EVAL_LOC = W2_EVAL_LOC; - uint256 internal constant X2_EVAL_LOC = W1_OMEGA_EVAL_LOC; - uint256 internal constant X3_EVAL_LOC = W2_OMEGA_EVAL_LOC; - uint256 internal constant Y1_EVAL_LOC = W3_EVAL_LOC; - uint256 internal constant Y2_EVAL_LOC = W4_OMEGA_EVAL_LOC; - uint256 internal constant Y3_EVAL_LOC = W3_OMEGA_EVAL_LOC; - uint256 internal constant QBETA_LOC = Q3_EVAL_LOC; - uint256 internal constant QBETA_SQR_LOC = Q4_EVAL_LOC; - uint256 internal constant QSIGN_LOC = Q1_EVAL_LOC; - - // ### CHALLENGES MEMORY OFFSETS - - uint256 internal constant C_BETA_LOC = 0x2600; - uint256 internal constant C_GAMMA_LOC = 0x2620; - uint256 internal constant C_ALPHA_LOC = 0x2640; - uint256 internal constant C_ETA_LOC = 0x2660; - uint256 internal constant C_ETA_SQR_LOC = 0x2680; - uint256 internal constant C_ETA_CUBE_LOC = 0x26a0; - - uint256 internal constant C_ZETA_LOC = 0x26c0; - uint256 internal constant C_CURRENT_LOC = 0x26e0; - uint256 internal constant C_V0_LOC = 0x2700; - uint256 internal constant C_V1_LOC = 0x2720; - uint256 internal constant C_V2_LOC = 0x2740; - uint256 internal constant C_V3_LOC = 0x2760; - uint256 internal constant C_V4_LOC = 0x2780; - uint256 internal constant C_V5_LOC = 0x27a0; - uint256 internal constant C_V6_LOC = 0x27c0; - uint256 internal constant C_V7_LOC = 0x27e0; - uint256 internal constant C_V8_LOC = 0x2800; - uint256 internal constant C_V9_LOC = 0x2820; - uint256 internal constant C_V10_LOC = 0x2840; - uint256 internal constant C_V11_LOC = 0x2860; - uint256 internal constant C_V12_LOC = 0x2880; - uint256 internal constant C_V13_LOC = 0x28a0; - uint256 internal constant C_V14_LOC = 0x28c0; - uint256 internal constant C_V15_LOC = 0x28e0; - uint256 internal constant C_V16_LOC = 0x2900; - uint256 internal constant C_V17_LOC = 0x2920; - uint256 internal constant C_V18_LOC = 0x2940; - uint256 internal constant C_V19_LOC = 0x2960; - uint256 internal constant C_V20_LOC = 0x2980; - uint256 internal constant C_V21_LOC = 0x29a0; - uint256 internal constant C_V22_LOC = 0x29c0; - uint256 internal constant C_V23_LOC = 0x29e0; - uint256 internal constant C_V24_LOC = 0x2a00; - uint256 internal constant C_V25_LOC = 0x2a20; - uint256 internal constant C_V26_LOC = 0x2a40; - uint256 internal constant C_V27_LOC = 0x2a60; - uint256 internal constant C_V28_LOC = 0x2a80; - uint256 internal constant C_V29_LOC = 0x2aa0; - uint256 internal constant C_V30_LOC = 0x2ac0; - - uint256 internal constant C_U_LOC = 0x2b00; - - // ### LOCAL VARIABLES MEMORY OFFSETS - uint256 internal constant DELTA_NUMERATOR_LOC = 0x3000; - uint256 internal constant DELTA_DENOMINATOR_LOC = 0x3020; - uint256 internal constant ZETA_POW_N_LOC = 0x3040; - uint256 internal constant PUBLIC_INPUT_DELTA_LOC = 0x3060; - uint256 internal constant ZERO_POLY_LOC = 0x3080; - uint256 internal constant L_START_LOC = 0x30a0; - uint256 internal constant L_END_LOC = 0x30c0; - uint256 internal constant R_ZERO_EVAL_LOC = 0x30e0; - - uint256 internal constant PLOOKUP_DELTA_NUMERATOR_LOC = 0x3100; - uint256 internal constant PLOOKUP_DELTA_DENOMINATOR_LOC = 0x3120; - uint256 internal constant PLOOKUP_DELTA_LOC = 0x3140; - - uint256 internal constant ACCUMULATOR_X_LOC = 0x3160; - uint256 internal constant ACCUMULATOR_Y_LOC = 0x3180; - uint256 internal constant ACCUMULATOR2_X_LOC = 0x31a0; - uint256 internal constant ACCUMULATOR2_Y_LOC = 0x31c0; - uint256 internal constant PAIRING_LHS_X_LOC = 0x31e0; - uint256 internal constant PAIRING_LHS_Y_LOC = 0x3200; - uint256 internal constant PAIRING_RHS_X_LOC = 0x3220; - uint256 internal constant PAIRING_RHS_Y_LOC = 0x3240; - - // ### SUCCESS FLAG MEMORY LOCATIONS - uint256 internal constant GRAND_PRODUCT_SUCCESS_FLAG = 0x3300; - uint256 internal constant ARITHMETIC_TERM_SUCCESS_FLAG = 0x3020; - uint256 internal constant BATCH_OPENING_SUCCESS_FLAG = 0x3340; - uint256 internal constant OPENING_COMMITMENT_SUCCESS_FLAG = 0x3360; - uint256 internal constant PAIRING_PREAMBLE_SUCCESS_FLAG = 0x3380; - uint256 internal constant PAIRING_SUCCESS_FLAG = 0x33a0; - uint256 internal constant RESULT_FLAG = 0x33c0; - - // misc stuff - uint256 internal constant OMEGA_INVERSE_LOC = 0x3400; - uint256 internal constant C_ALPHA_SQR_LOC = 0x3420; - uint256 internal constant C_ALPHA_CUBE_LOC = 0x3440; - uint256 internal constant C_ALPHA_QUAD_LOC = 0x3460; - uint256 internal constant C_ALPHA_BASE_LOC = 0x3480; - - // ### RECURSION VARIABLE MEMORY LOCATIONS - uint256 internal constant RECURSIVE_P1_X_LOC = 0x3500; - uint256 internal constant RECURSIVE_P1_Y_LOC = 0x3520; - uint256 internal constant RECURSIVE_P2_X_LOC = 0x3540; - uint256 internal constant RECURSIVE_P2_Y_LOC = 0x3560; - - uint256 internal constant PUBLIC_INPUTS_HASH_LOCATION = 0x3580; - - // sub-identity storage - uint256 internal constant PERMUTATION_IDENTITY = 0x3600; - uint256 internal constant PLOOKUP_IDENTITY = 0x3620; - uint256 internal constant ARITHMETIC_IDENTITY = 0x3640; - uint256 internal constant SORT_IDENTITY = 0x3660; - uint256 internal constant ELLIPTIC_IDENTITY = 0x3680; - uint256 internal constant AUX_IDENTITY = 0x36a0; - uint256 internal constant AUX_NON_NATIVE_FIELD_EVALUATION = 0x36c0; - uint256 internal constant AUX_LIMB_ACCUMULATOR_EVALUATION = 0x36e0; - uint256 internal constant AUX_RAM_CONSISTENCY_EVALUATION = 0x3700; - uint256 internal constant AUX_ROM_CONSISTENCY_EVALUATION = 0x3720; - uint256 internal constant AUX_MEMORY_EVALUATION = 0x3740; - - uint256 internal constant QUOTIENT_EVAL_LOC = 0x3760; - uint256 internal constant ZERO_POLY_INVERSE_LOC = 0x3780; - - // when hashing public inputs we use memory at NU_CHALLENGE_INPUT_LOC_A, as the hash input size is unknown at compile time - uint256 internal constant NU_CHALLENGE_INPUT_LOC_A = 0x37a0; - uint256 internal constant NU_CHALLENGE_INPUT_LOC_B = 0x37c0; - uint256 internal constant NU_CHALLENGE_INPUT_LOC_C = 0x37e0; - - bytes4 internal constant PUBLIC_INPUT_INVALID_BN128_G1_POINT_SELECTOR = 0xeba9f4a6; - bytes4 internal constant PUBLIC_INPUT_GE_P_SELECTOR = 0x374a972f; - bytes4 internal constant MOD_EXP_FAILURE_SELECTOR = 0xf894a7bc; - bytes4 internal constant EC_SCALAR_MUL_FAILURE_SELECTOR = 0xf755f369; - bytes4 internal constant PROOF_FAILURE_SELECTOR = 0x0711fcec; - - uint256 internal constant ETA_INPUT_LENGTH = 0xc0; // W1, W2, W3 = 6 * 0x20 bytes - - // We need to hash 41 field elements when generating the NU challenge - // w1, w2, w3, w4, s, z, z_lookup, q1, q2, q3, q4, qm, qc, qarith (14) - // qsort, qelliptic, qaux, sigma1, sigma2, sigma, sigma4, (7) - // table1, table2, table3, table4, tabletype, id1, id2, id3, id4, (9) - // w1_omega, w2_omega, w3_omega, w4_omega, s_omega, z_omega, z_lookup_omega, (7) - // table1_omega, table2_omega, table3_omega, table4_omega (4) - uint256 internal constant NU_INPUT_LENGTH = 0x520; // 0x520 = 41 * 0x20 - - // There are ELEVEN G1 group elements added into the transcript in the `beta` round, that we need to skip over - // W1, W2, W3, W4, S, Z, Z_LOOKUP, T1, T2, T3, T4 - uint256 internal constant NU_CALLDATA_SKIP_LENGTH = 0x2c0; // 11 * 0x40 = 0x2c0 - - uint256 internal constant NEGATIVE_INVERSE_OF_2_MODULO_P = - 0x183227397098d014dc2822db40c0ac2e9419f4243cdcb848a1f0fac9f8000000; - uint256 internal constant LIMB_SIZE = 0x100000000000000000; // 2<<68 - uint256 internal constant SUBLIMB_SHIFT = 0x4000; // 2<<14 - - // y^2 = x^3 + ax + b - // for Grumpkin, a = 0 and b = -17. We use b in a custom gate relation that evaluates elliptic curve arithmetic - uint256 internal constant GRUMPKIN_CURVE_B_PARAMETER_NEGATED = 17; - error PUBLIC_INPUT_COUNT_INVALID(uint256 expected, uint256 actual); - error PUBLIC_INPUT_INVALID_BN128_G1_POINT(); - error PUBLIC_INPUT_GE_P(); - error MOD_EXP_FAILURE(); - error EC_SCALAR_MUL_FAILURE(); - error PROOF_FAILURE(); - - function getVerificationKeyHash() public pure virtual returns (bytes32); - - function loadVerificationKey(uint256 _vk, uint256 _omegaInverseLoc) internal pure virtual; - - /** - * @notice Verify a Ultra Plonk proof - * @param _proof - The serialized proof - * @param _publicInputs - An array of the public inputs - * @return True if proof is valid, reverts otherwise - */ - function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external view returns (bool) { - loadVerificationKey(N_LOC, OMEGA_INVERSE_LOC); - - uint256 requiredPublicInputCount; - assembly { - requiredPublicInputCount := mload(NUM_INPUTS_LOC) - } - if (requiredPublicInputCount != _publicInputs.length) { - revert PUBLIC_INPUT_COUNT_INVALID(requiredPublicInputCount, _publicInputs.length); - } - - assembly { - let q := 21888242871839275222246405745257275088696311157297823662689037894645226208583 // EC group order - let p := 21888242871839275222246405745257275088548364400416034343698204186575808495617 // Prime field order - - /** - * LOAD PROOF FROM CALLDATA - */ - { - let data_ptr := add(calldataload(0x04), 0x24) - - mstore(W1_Y_LOC, mod(calldataload(data_ptr), q)) - mstore(W1_X_LOC, mod(calldataload(add(data_ptr, 0x20)), q)) - - mstore(W2_Y_LOC, mod(calldataload(add(data_ptr, 0x40)), q)) - mstore(W2_X_LOC, mod(calldataload(add(data_ptr, 0x60)), q)) - - mstore(W3_Y_LOC, mod(calldataload(add(data_ptr, 0x80)), q)) - mstore(W3_X_LOC, mod(calldataload(add(data_ptr, 0xa0)), q)) - - mstore(W4_Y_LOC, mod(calldataload(add(data_ptr, 0xc0)), q)) - mstore(W4_X_LOC, mod(calldataload(add(data_ptr, 0xe0)), q)) - - mstore(S_Y_LOC, mod(calldataload(add(data_ptr, 0x100)), q)) - mstore(S_X_LOC, mod(calldataload(add(data_ptr, 0x120)), q)) - mstore(Z_Y_LOC, mod(calldataload(add(data_ptr, 0x140)), q)) - mstore(Z_X_LOC, mod(calldataload(add(data_ptr, 0x160)), q)) - mstore(Z_LOOKUP_Y_LOC, mod(calldataload(add(data_ptr, 0x180)), q)) - mstore(Z_LOOKUP_X_LOC, mod(calldataload(add(data_ptr, 0x1a0)), q)) - mstore(T1_Y_LOC, mod(calldataload(add(data_ptr, 0x1c0)), q)) - mstore(T1_X_LOC, mod(calldataload(add(data_ptr, 0x1e0)), q)) - - mstore(T2_Y_LOC, mod(calldataload(add(data_ptr, 0x200)), q)) - mstore(T2_X_LOC, mod(calldataload(add(data_ptr, 0x220)), q)) - - mstore(T3_Y_LOC, mod(calldataload(add(data_ptr, 0x240)), q)) - mstore(T3_X_LOC, mod(calldataload(add(data_ptr, 0x260)), q)) - - mstore(T4_Y_LOC, mod(calldataload(add(data_ptr, 0x280)), q)) - mstore(T4_X_LOC, mod(calldataload(add(data_ptr, 0x2a0)), q)) - - mstore(W1_EVAL_LOC, mod(calldataload(add(data_ptr, 0x2c0)), p)) - mstore(W2_EVAL_LOC, mod(calldataload(add(data_ptr, 0x2e0)), p)) - mstore(W3_EVAL_LOC, mod(calldataload(add(data_ptr, 0x300)), p)) - mstore(W4_EVAL_LOC, mod(calldataload(add(data_ptr, 0x320)), p)) - mstore(S_EVAL_LOC, mod(calldataload(add(data_ptr, 0x340)), p)) - mstore(Z_EVAL_LOC, mod(calldataload(add(data_ptr, 0x360)), p)) - mstore(Z_LOOKUP_EVAL_LOC, mod(calldataload(add(data_ptr, 0x380)), p)) - mstore(Q1_EVAL_LOC, mod(calldataload(add(data_ptr, 0x3a0)), p)) - mstore(Q2_EVAL_LOC, mod(calldataload(add(data_ptr, 0x3c0)), p)) - mstore(Q3_EVAL_LOC, mod(calldataload(add(data_ptr, 0x3e0)), p)) - mstore(Q4_EVAL_LOC, mod(calldataload(add(data_ptr, 0x400)), p)) - mstore(QM_EVAL_LOC, mod(calldataload(add(data_ptr, 0x420)), p)) - mstore(QC_EVAL_LOC, mod(calldataload(add(data_ptr, 0x440)), p)) - mstore(QARITH_EVAL_LOC, mod(calldataload(add(data_ptr, 0x460)), p)) - mstore(QSORT_EVAL_LOC, mod(calldataload(add(data_ptr, 0x480)), p)) - mstore(QELLIPTIC_EVAL_LOC, mod(calldataload(add(data_ptr, 0x4a0)), p)) - mstore(QAUX_EVAL_LOC, mod(calldataload(add(data_ptr, 0x4c0)), p)) - - mstore(SIGMA1_EVAL_LOC, mod(calldataload(add(data_ptr, 0x4e0)), p)) - mstore(SIGMA2_EVAL_LOC, mod(calldataload(add(data_ptr, 0x500)), p)) - - mstore(SIGMA3_EVAL_LOC, mod(calldataload(add(data_ptr, 0x520)), p)) - mstore(SIGMA4_EVAL_LOC, mod(calldataload(add(data_ptr, 0x540)), p)) - - mstore(TABLE1_EVAL_LOC, mod(calldataload(add(data_ptr, 0x560)), p)) - mstore(TABLE2_EVAL_LOC, mod(calldataload(add(data_ptr, 0x580)), p)) - mstore(TABLE3_EVAL_LOC, mod(calldataload(add(data_ptr, 0x5a0)), p)) - mstore(TABLE4_EVAL_LOC, mod(calldataload(add(data_ptr, 0x5c0)), p)) - mstore(TABLE_TYPE_EVAL_LOC, mod(calldataload(add(data_ptr, 0x5e0)), p)) - - mstore(ID1_EVAL_LOC, mod(calldataload(add(data_ptr, 0x600)), p)) - mstore(ID2_EVAL_LOC, mod(calldataload(add(data_ptr, 0x620)), p)) - mstore(ID3_EVAL_LOC, mod(calldataload(add(data_ptr, 0x640)), p)) - mstore(ID4_EVAL_LOC, mod(calldataload(add(data_ptr, 0x660)), p)) - - mstore(W1_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x680)), p)) - mstore(W2_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x6a0)), p)) - mstore(W3_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x6c0)), p)) - mstore(W4_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x6e0)), p)) - mstore(S_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x700)), p)) - - mstore(Z_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x720)), p)) - - mstore(Z_LOOKUP_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x740)), p)) - mstore(TABLE1_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x760)), p)) - mstore(TABLE2_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x780)), p)) - mstore(TABLE3_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x7a0)), p)) - mstore(TABLE4_OMEGA_EVAL_LOC, mod(calldataload(add(data_ptr, 0x7c0)), p)) - - mstore(PI_Z_Y_LOC, mod(calldataload(add(data_ptr, 0x7e0)), q)) - mstore(PI_Z_X_LOC, mod(calldataload(add(data_ptr, 0x800)), q)) - - mstore(PI_Z_OMEGA_Y_LOC, mod(calldataload(add(data_ptr, 0x820)), q)) - mstore(PI_Z_OMEGA_X_LOC, mod(calldataload(add(data_ptr, 0x840)), q)) - } - - /** - * LOAD RECURSIVE PROOF INTO MEMORY - */ - { - if mload(CONTAINS_RECURSIVE_PROOF_LOC) { - let public_inputs_ptr := add(calldataload(0x24), 0x24) - let index_counter := add(shl(5, mload(RECURSIVE_PROOF_PUBLIC_INPUT_INDICES_LOC)), public_inputs_ptr) - - let x0 := calldataload(index_counter) - x0 := add(x0, shl(68, calldataload(add(index_counter, 0x20)))) - x0 := add(x0, shl(136, calldataload(add(index_counter, 0x40)))) - x0 := add(x0, shl(204, calldataload(add(index_counter, 0x60)))) - let y0 := calldataload(add(index_counter, 0x80)) - y0 := add(y0, shl(68, calldataload(add(index_counter, 0xa0)))) - y0 := add(y0, shl(136, calldataload(add(index_counter, 0xc0)))) - y0 := add(y0, shl(204, calldataload(add(index_counter, 0xe0)))) - let x1 := calldataload(add(index_counter, 0x100)) - x1 := add(x1, shl(68, calldataload(add(index_counter, 0x120)))) - x1 := add(x1, shl(136, calldataload(add(index_counter, 0x140)))) - x1 := add(x1, shl(204, calldataload(add(index_counter, 0x160)))) - let y1 := calldataload(add(index_counter, 0x180)) - y1 := add(y1, shl(68, calldataload(add(index_counter, 0x1a0)))) - y1 := add(y1, shl(136, calldataload(add(index_counter, 0x1c0)))) - y1 := add(y1, shl(204, calldataload(add(index_counter, 0x1e0)))) - mstore(RECURSIVE_P1_X_LOC, x0) - mstore(RECURSIVE_P1_Y_LOC, y0) - mstore(RECURSIVE_P2_X_LOC, x1) - mstore(RECURSIVE_P2_Y_LOC, y1) - - // validate these are valid bn128 G1 points - if iszero(and(and(lt(x0, q), lt(x1, q)), and(lt(y0, q), lt(y1, q)))) { - mstore(0x00, PUBLIC_INPUT_INVALID_BN128_G1_POINT_SELECTOR) - revert(0x00, 0x04) - } - } - } - - { - /** - * Generate initial challenge - */ - mstore(0x00, shl(224, mload(N_LOC))) - mstore(0x04, shl(224, mload(NUM_INPUTS_LOC))) - let challenge := keccak256(0x00, 0x08) - - /** - * Generate eta challenge - */ - mstore(PUBLIC_INPUTS_HASH_LOCATION, challenge) - // The public input location is stored at 0x24, we then add 0x24 to skip selector and the length of public inputs - let public_inputs_start := add(calldataload(0x24), 0x24) - // copy the public inputs over - let public_input_size := mul(mload(NUM_INPUTS_LOC), 0x20) - calldatacopy(add(PUBLIC_INPUTS_HASH_LOCATION, 0x20), public_inputs_start, public_input_size) - - // copy W1, W2, W3 into challenge. Each point is 0x40 bytes, so load 0xc0 = 3 * 0x40 bytes (ETA input length) - let w_start := add(calldataload(0x04), 0x24) - calldatacopy(add(add(PUBLIC_INPUTS_HASH_LOCATION, 0x20), public_input_size), w_start, ETA_INPUT_LENGTH) - - // Challenge is the old challenge + public inputs + W1, W2, W3 (0x20 + public_input_size + 0xc0) - let challenge_bytes_size := add(0x20, add(public_input_size, ETA_INPUT_LENGTH)) - - challenge := keccak256(PUBLIC_INPUTS_HASH_LOCATION, challenge_bytes_size) - { - let eta := mod(challenge, p) - mstore(C_ETA_LOC, eta) - mstore(C_ETA_SQR_LOC, mulmod(eta, eta, p)) - mstore(C_ETA_CUBE_LOC, mulmod(mload(C_ETA_SQR_LOC), eta, p)) - } - - /** - * Generate beta challenge - */ - mstore(0x00, challenge) - mstore(0x20, mload(W4_Y_LOC)) - mstore(0x40, mload(W4_X_LOC)) - mstore(0x60, mload(S_Y_LOC)) - mstore(0x80, mload(S_X_LOC)) - challenge := keccak256(0x00, 0xa0) - mstore(C_BETA_LOC, mod(challenge, p)) - - /** - * Generate gamma challenge - */ - mstore(0x00, challenge) - mstore8(0x20, 0x01) - challenge := keccak256(0x00, 0x21) - mstore(C_GAMMA_LOC, mod(challenge, p)) - - /** - * Generate alpha challenge - */ - mstore(0x00, challenge) - mstore(0x20, mload(Z_Y_LOC)) - mstore(0x40, mload(Z_X_LOC)) - mstore(0x60, mload(Z_LOOKUP_Y_LOC)) - mstore(0x80, mload(Z_LOOKUP_X_LOC)) - challenge := keccak256(0x00, 0xa0) - mstore(C_ALPHA_LOC, mod(challenge, p)) - - /** - * Compute and store some powers of alpha for future computations - */ - let alpha := mload(C_ALPHA_LOC) - mstore(C_ALPHA_SQR_LOC, mulmod(alpha, alpha, p)) - mstore(C_ALPHA_CUBE_LOC, mulmod(mload(C_ALPHA_SQR_LOC), alpha, p)) - mstore(C_ALPHA_QUAD_LOC, mulmod(mload(C_ALPHA_CUBE_LOC), alpha, p)) - mstore(C_ALPHA_BASE_LOC, alpha) - - /** - * Generate zeta challenge - */ - mstore(0x00, challenge) - mstore(0x20, mload(T1_Y_LOC)) - mstore(0x40, mload(T1_X_LOC)) - mstore(0x60, mload(T2_Y_LOC)) - mstore(0x80, mload(T2_X_LOC)) - mstore(0xa0, mload(T3_Y_LOC)) - mstore(0xc0, mload(T3_X_LOC)) - mstore(0xe0, mload(T4_Y_LOC)) - mstore(0x100, mload(T4_X_LOC)) - - challenge := keccak256(0x00, 0x120) - - mstore(C_ZETA_LOC, mod(challenge, p)) - mstore(C_CURRENT_LOC, challenge) - } - - /** - * EVALUATE FIELD OPERATIONS - */ - - /** - * COMPUTE PUBLIC INPUT DELTA - * ΔPI = ∏ᵢ∈ℓ(wᵢ + β σ(i) + γ) / ∏ᵢ∈ℓ(wᵢ + β σ'(i) + γ) - */ - { - let beta := mload(C_BETA_LOC) // β - let gamma := mload(C_GAMMA_LOC) // γ - let work_root := mload(OMEGA_LOC) // ω - let numerator_value := 1 - let denominator_value := 1 - - let p_clone := p // move p to the front of the stack - let valid_inputs := true - - // Load the starting point of the public inputs (jump over the selector and the length of public inputs [0x24]) - let public_inputs_ptr := add(calldataload(0x24), 0x24) - - // endpoint_ptr = public_inputs_ptr + num_inputs * 0x20. // every public input is 0x20 bytes - let endpoint_ptr := add(public_inputs_ptr, mul(mload(NUM_INPUTS_LOC), 0x20)) - - // root_1 = β * 0x05 - let root_1 := mulmod(beta, 0x05, p_clone) // k1.β - // root_2 = β * 0x0c - let root_2 := mulmod(beta, 0x0c, p_clone) - // @note 0x05 + 0x07 == 0x0c == external coset generator - - for {} lt(public_inputs_ptr, endpoint_ptr) { public_inputs_ptr := add(public_inputs_ptr, 0x20) } { - /** - * input = public_input[i] - * valid_inputs &= input < p - * temp = input + gamma - * numerator_value *= (β.σ(i) + wᵢ + γ) // σ(i) = 0x05.ωⁱ - * denominator_value *= (β.σ'(i) + wᵢ + γ) // σ'(i) = 0x0c.ωⁱ - * root_1 *= ω - * root_2 *= ω - */ - - let input := calldataload(public_inputs_ptr) - valid_inputs := and(valid_inputs, lt(input, p_clone)) - let temp := addmod(input, gamma, p_clone) - - numerator_value := mulmod(numerator_value, add(root_1, temp), p_clone) - denominator_value := mulmod(denominator_value, add(root_2, temp), p_clone) - - root_1 := mulmod(root_1, work_root, p_clone) - root_2 := mulmod(root_2, work_root, p_clone) - } - - // Revert if not all public inputs are field elements (i.e. < p) - if iszero(valid_inputs) { - mstore(0x00, PUBLIC_INPUT_GE_P_SELECTOR) - revert(0x00, 0x04) - } - - mstore(DELTA_NUMERATOR_LOC, numerator_value) - mstore(DELTA_DENOMINATOR_LOC, denominator_value) - } - - /** - * Compute Plookup delta factor [γ(1 + β)]^{n-k} - * k = num roots cut out of Z_H = 4 - */ - { - let delta_base := mulmod(mload(C_GAMMA_LOC), addmod(mload(C_BETA_LOC), 1, p), p) - let delta_numerator := delta_base - { - let exponent := mload(N_LOC) - let count := 1 - for {} lt(count, exponent) { count := add(count, count) } { - delta_numerator := mulmod(delta_numerator, delta_numerator, p) - } - } - mstore(PLOOKUP_DELTA_NUMERATOR_LOC, delta_numerator) - - let delta_denominator := mulmod(delta_base, delta_base, p) - delta_denominator := mulmod(delta_denominator, delta_denominator, p) - mstore(PLOOKUP_DELTA_DENOMINATOR_LOC, delta_denominator) - } - /** - * Compute lagrange poly and vanishing poly fractions - */ - { - /** - * vanishing_numerator = zeta - * ZETA_POW_N = zeta^n - * vanishing_numerator -= 1 - * accumulating_root = omega_inverse - * work_root = p - accumulating_root - * domain_inverse = domain_inverse - * vanishing_denominator = zeta + work_root - * work_root *= accumulating_root - * vanishing_denominator *= (zeta + work_root) - * work_root *= accumulating_root - * vanishing_denominator *= (zeta + work_root) - * vanishing_denominator *= (zeta + (zeta + accumulating_root)) - * work_root = omega - * lagrange_numerator = vanishing_numerator * domain_inverse - * l_start_denominator = zeta - 1 - * accumulating_root = work_root^2 - * l_end_denominator = accumulating_root^2 * work_root * zeta - 1 - * Note: l_end_denominator term contains a term \omega^5 to cut out 5 roots of unity from vanishing poly - */ - - let zeta := mload(C_ZETA_LOC) - - // compute zeta^n, where n is a power of 2 - let vanishing_numerator := zeta - { - // pow_small - let exponent := mload(N_LOC) - let count := 1 - for {} lt(count, exponent) { count := add(count, count) } { - vanishing_numerator := mulmod(vanishing_numerator, vanishing_numerator, p) - } - } - mstore(ZETA_POW_N_LOC, vanishing_numerator) - vanishing_numerator := addmod(vanishing_numerator, sub(p, 1), p) - - let accumulating_root := mload(OMEGA_INVERSE_LOC) - let work_root := sub(p, accumulating_root) - let domain_inverse := mload(DOMAIN_INVERSE_LOC) - - let vanishing_denominator := addmod(zeta, work_root, p) - work_root := mulmod(work_root, accumulating_root, p) - vanishing_denominator := mulmod(vanishing_denominator, addmod(zeta, work_root, p), p) - work_root := mulmod(work_root, accumulating_root, p) - vanishing_denominator := mulmod(vanishing_denominator, addmod(zeta, work_root, p), p) - vanishing_denominator := - mulmod(vanishing_denominator, addmod(zeta, mulmod(work_root, accumulating_root, p), p), p) - - work_root := mload(OMEGA_LOC) - - let lagrange_numerator := mulmod(vanishing_numerator, domain_inverse, p) - let l_start_denominator := addmod(zeta, sub(p, 1), p) - - accumulating_root := mulmod(work_root, work_root, p) - - let l_end_denominator := - addmod( - mulmod(mulmod(mulmod(accumulating_root, accumulating_root, p), work_root, p), zeta, p), sub(p, 1), p - ) - - /** - * Compute inversions using Montgomery's batch inversion trick - */ - let accumulator := mload(DELTA_DENOMINATOR_LOC) - let t0 := accumulator - accumulator := mulmod(accumulator, vanishing_denominator, p) - let t1 := accumulator - accumulator := mulmod(accumulator, vanishing_numerator, p) - let t2 := accumulator - accumulator := mulmod(accumulator, l_start_denominator, p) - let t3 := accumulator - accumulator := mulmod(accumulator, mload(PLOOKUP_DELTA_DENOMINATOR_LOC), p) - let t4 := accumulator - { - mstore(0, 0x20) - mstore(0x20, 0x20) - mstore(0x40, 0x20) - mstore(0x60, mulmod(accumulator, l_end_denominator, p)) - mstore(0x80, sub(p, 2)) - mstore(0xa0, p) - if iszero(staticcall(gas(), 0x05, 0x00, 0xc0, 0x00, 0x20)) { - mstore(0x0, MOD_EXP_FAILURE_SELECTOR) - revert(0x00, 0x04) - } - accumulator := mload(0x00) - } - - t4 := mulmod(accumulator, t4, p) - accumulator := mulmod(accumulator, l_end_denominator, p) - - t3 := mulmod(accumulator, t3, p) - accumulator := mulmod(accumulator, mload(PLOOKUP_DELTA_DENOMINATOR_LOC), p) - - t2 := mulmod(accumulator, t2, p) - accumulator := mulmod(accumulator, l_start_denominator, p) - - t1 := mulmod(accumulator, t1, p) - accumulator := mulmod(accumulator, vanishing_numerator, p) - - t0 := mulmod(accumulator, t0, p) - accumulator := mulmod(accumulator, vanishing_denominator, p) - - accumulator := mulmod(mulmod(accumulator, accumulator, p), mload(DELTA_DENOMINATOR_LOC), p) - - mstore(PUBLIC_INPUT_DELTA_LOC, mulmod(mload(DELTA_NUMERATOR_LOC), accumulator, p)) - mstore(ZERO_POLY_LOC, mulmod(vanishing_numerator, t0, p)) - mstore(ZERO_POLY_INVERSE_LOC, mulmod(vanishing_denominator, t1, p)) - mstore(L_START_LOC, mulmod(lagrange_numerator, t2, p)) - mstore(PLOOKUP_DELTA_LOC, mulmod(mload(PLOOKUP_DELTA_NUMERATOR_LOC), t3, p)) - mstore(L_END_LOC, mulmod(lagrange_numerator, t4, p)) - } - - /** - * UltraPlonk Widget Ordering: - * - * 1. Permutation widget - * 2. Plookup widget - * 3. Arithmetic widget - * 4. Fixed base widget (?) - * 5. GenPermSort widget - * 6. Elliptic widget - * 7. Auxiliary widget - */ - - /** - * COMPUTE PERMUTATION WIDGET EVALUATION - */ - { - let alpha := mload(C_ALPHA_LOC) - let beta := mload(C_BETA_LOC) - let gamma := mload(C_GAMMA_LOC) - - /** - * t1 = (W1 + gamma + beta * ID1) * (W2 + gamma + beta * ID2) - * t2 = (W3 + gamma + beta * ID3) * (W4 + gamma + beta * ID4) - * result = alpha_base * z_eval * t1 * t2 - * t1 = (W1 + gamma + beta * sigma_1_eval) * (W2 + gamma + beta * sigma_2_eval) - * t2 = (W2 + gamma + beta * sigma_3_eval) * (W3 + gamma + beta * sigma_4_eval) - * result -= (alpha_base * z_omega_eval * t1 * t2) - */ - let t1 := - mulmod( - add(add(mload(W1_EVAL_LOC), gamma), mulmod(beta, mload(ID1_EVAL_LOC), p)), - add(add(mload(W2_EVAL_LOC), gamma), mulmod(beta, mload(ID2_EVAL_LOC), p)), - p - ) - let t2 := - mulmod( - add(add(mload(W3_EVAL_LOC), gamma), mulmod(beta, mload(ID3_EVAL_LOC), p)), - add(add(mload(W4_EVAL_LOC), gamma), mulmod(beta, mload(ID4_EVAL_LOC), p)), - p - ) - let result := mulmod(mload(C_ALPHA_BASE_LOC), mulmod(mload(Z_EVAL_LOC), mulmod(t1, t2, p), p), p) - t1 := - mulmod( - add(add(mload(W1_EVAL_LOC), gamma), mulmod(beta, mload(SIGMA1_EVAL_LOC), p)), - add(add(mload(W2_EVAL_LOC), gamma), mulmod(beta, mload(SIGMA2_EVAL_LOC), p)), - p - ) - t2 := - mulmod( - add(add(mload(W3_EVAL_LOC), gamma), mulmod(beta, mload(SIGMA3_EVAL_LOC), p)), - add(add(mload(W4_EVAL_LOC), gamma), mulmod(beta, mload(SIGMA4_EVAL_LOC), p)), - p - ) - result := - addmod( - result, - sub(p, mulmod(mload(C_ALPHA_BASE_LOC), mulmod(mload(Z_OMEGA_EVAL_LOC), mulmod(t1, t2, p), p), p)), - p - ) - - /** - * alpha_base *= alpha - * result += alpha_base . (L_{n-k}(ʓ) . (z(ʓ.ω) - ∆_{PI})) - * alpha_base *= alpha - * result += alpha_base . (L_1(ʓ)(Z(ʓ) - 1)) - * alpha_Base *= alpha - */ - mstore(C_ALPHA_BASE_LOC, mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_LOC), p)) - result := - addmod( - result, - mulmod( - mload(C_ALPHA_BASE_LOC), - mulmod( - mload(L_END_LOC), - addmod(mload(Z_OMEGA_EVAL_LOC), sub(p, mload(PUBLIC_INPUT_DELTA_LOC)), p), - p - ), - p - ), - p - ) - mstore(C_ALPHA_BASE_LOC, mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_LOC), p)) - mstore( - PERMUTATION_IDENTITY, - addmod( - result, - mulmod( - mload(C_ALPHA_BASE_LOC), - mulmod(mload(L_START_LOC), addmod(mload(Z_EVAL_LOC), sub(p, 1), p), p), - p - ), - p - ) - ) - mstore(C_ALPHA_BASE_LOC, mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_LOC), p)) - } - - /** - * COMPUTE PLOOKUP WIDGET EVALUATION - */ - { - /** - * Goal: f = (w1(z) + q2.w1(zω)) + η(w2(z) + qm.w2(zω)) + η²(w3(z) + qc.w_3(zω)) + q3(z).η³ - * f = η.q3(z) - * f += (w3(z) + qc.w_3(zω)) - * f *= η - * f += (w2(z) + qm.w2(zω)) - * f *= η - * f += (w1(z) + q2.w1(zω)) - */ - let f := mulmod(mload(C_ETA_LOC), mload(Q3_EVAL_LOC), p) - f := - addmod(f, addmod(mload(W3_EVAL_LOC), mulmod(mload(QC_EVAL_LOC), mload(W3_OMEGA_EVAL_LOC), p), p), p) - f := mulmod(f, mload(C_ETA_LOC), p) - f := - addmod(f, addmod(mload(W2_EVAL_LOC), mulmod(mload(QM_EVAL_LOC), mload(W2_OMEGA_EVAL_LOC), p), p), p) - f := mulmod(f, mload(C_ETA_LOC), p) - f := - addmod(f, addmod(mload(W1_EVAL_LOC), mulmod(mload(Q2_EVAL_LOC), mload(W1_OMEGA_EVAL_LOC), p), p), p) - - // t(z) = table4(z).η³ + table3(z).η² + table2(z).η + table1(z) - let t := - addmod( - addmod( - addmod( - mulmod(mload(TABLE4_EVAL_LOC), mload(C_ETA_CUBE_LOC), p), - mulmod(mload(TABLE3_EVAL_LOC), mload(C_ETA_SQR_LOC), p), - p - ), - mulmod(mload(TABLE2_EVAL_LOC), mload(C_ETA_LOC), p), - p - ), - mload(TABLE1_EVAL_LOC), - p - ) - - // t(zw) = table4(zw).η³ + table3(zw).η² + table2(zw).η + table1(zw) - let t_omega := - addmod( - addmod( - addmod( - mulmod(mload(TABLE4_OMEGA_EVAL_LOC), mload(C_ETA_CUBE_LOC), p), - mulmod(mload(TABLE3_OMEGA_EVAL_LOC), mload(C_ETA_SQR_LOC), p), - p - ), - mulmod(mload(TABLE2_OMEGA_EVAL_LOC), mload(C_ETA_LOC), p), - p - ), - mload(TABLE1_OMEGA_EVAL_LOC), - p - ) - - /** - * Goal: numerator = (TABLE_TYPE_EVAL * f(z) + γ) * (t(z) + βt(zω) + γ(β + 1)) * (β + 1) - * gamma_beta_constant = γ(β + 1) - * numerator = f * TABLE_TYPE_EVAL + gamma - * temp0 = t(z) + t(zω) * β + gamma_beta_constant - * numerator *= temp0 - * numerator *= (β + 1) - * temp0 = alpha * l_1 - * numerator += temp0 - * numerator *= z_lookup(z) - * numerator -= temp0 - */ - let gamma_beta_constant := mulmod(mload(C_GAMMA_LOC), addmod(mload(C_BETA_LOC), 1, p), p) - let numerator := addmod(mulmod(f, mload(TABLE_TYPE_EVAL_LOC), p), mload(C_GAMMA_LOC), p) - let temp0 := addmod(addmod(t, mulmod(t_omega, mload(C_BETA_LOC), p), p), gamma_beta_constant, p) - numerator := mulmod(numerator, temp0, p) - numerator := mulmod(numerator, addmod(mload(C_BETA_LOC), 1, p), p) - temp0 := mulmod(mload(C_ALPHA_LOC), mload(L_START_LOC), p) - numerator := addmod(numerator, temp0, p) - numerator := mulmod(numerator, mload(Z_LOOKUP_EVAL_LOC), p) - numerator := addmod(numerator, sub(p, temp0), p) - - /** - * Goal: denominator = z_lookup(zω)*[s(z) + βs(zω) + γ(1 + β)] - [z_lookup(zω) - [γ(1 + β)]^{n-k}]*α²L_end(z) - * note: delta_factor = [γ(1 + β)]^{n-k} - * denominator = s(z) + βs(zω) + γ(β + 1) - * temp1 = α²L_end(z) - * denominator -= temp1 - * denominator *= z_lookup(zω) - * denominator += temp1 * delta_factor - * PLOOKUP_IDENTITY = (numerator - denominator).alpha_base - * alpha_base *= alpha^3 - */ - let denominator := - addmod( - addmod(mload(S_EVAL_LOC), mulmod(mload(S_OMEGA_EVAL_LOC), mload(C_BETA_LOC), p), p), - gamma_beta_constant, - p - ) - let temp1 := mulmod(mload(C_ALPHA_SQR_LOC), mload(L_END_LOC), p) - denominator := addmod(denominator, sub(p, temp1), p) - denominator := mulmod(denominator, mload(Z_LOOKUP_OMEGA_EVAL_LOC), p) - denominator := addmod(denominator, mulmod(temp1, mload(PLOOKUP_DELTA_LOC), p), p) - - mstore(PLOOKUP_IDENTITY, mulmod(addmod(numerator, sub(p, denominator), p), mload(C_ALPHA_BASE_LOC), p)) - - // update alpha - mstore(C_ALPHA_BASE_LOC, mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_CUBE_LOC), p)) - } - - /** - * COMPUTE ARITHMETIC WIDGET EVALUATION - */ - { - /** - * The basic arithmetic gate identity in standard plonk is as follows. - * (w_1 . w_2 . q_m) + (w_1 . q_1) + (w_2 . q_2) + (w_3 . q_3) + (w_4 . q_4) + q_c = 0 - * However, for Ultraplonk, we extend this to support "passing" wires between rows (shown without alpha scaling below): - * q_arith * ( ( (-1/2) * (q_arith - 3) * q_m * w_1 * w_2 + q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c ) + - * (q_arith - 1)*( α * (q_arith - 2) * (w_1 + w_4 - w_1_omega + q_m) + w_4_omega) ) = 0 - * - * This formula results in several cases depending on q_arith: - * 1. q_arith == 0: Arithmetic gate is completely disabled - * - * 2. q_arith == 1: Everything in the minigate on the right is disabled. The equation is just a standard plonk equation - * with extra wires: q_m * w_1 * w_2 + q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c = 0 - * - * 3. q_arith == 2: The (w_1 + w_4 - ...) term is disabled. THe equation is: - * (1/2) * q_m * w_1 * w_2 + q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c + w_4_omega = 0 - * It allows defining w_4 at next index (w_4_omega) in terms of current wire values - * - * 4. q_arith == 3: The product of w_1 and w_2 is disabled, but a mini addition gate is enabled. α allows us to split - * the equation into two: - * - * q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c + 2 * w_4_omega = 0 - * and - * w_1 + w_4 - w_1_omega + q_m = 0 (we are reusing q_m here) - * - * 5. q_arith > 3: The product of w_1 and w_2 is scaled by (q_arith - 3), while the w_4_omega term is scaled by (q_arith - 1). - * The equation can be split into two: - * - * (q_arith - 3)* q_m * w_1 * w_ 2 + q_1 * w_1 + q_2 * w_2 + q_3 * w_3 + q_4 * w_4 + q_c + (q_arith - 1) * w_4_omega = 0 - * and - * w_1 + w_4 - w_1_omega + q_m = 0 - * - * The problem that q_m is used both in both equations can be dealt with by appropriately changing selector values at - * the next gate. Then we can treat (q_arith - 1) as a simulated q_6 selector and scale q_m to handle (q_arith - 3) at - * product. - */ - - let w1q1 := mulmod(mload(W1_EVAL_LOC), mload(Q1_EVAL_LOC), p) - let w2q2 := mulmod(mload(W2_EVAL_LOC), mload(Q2_EVAL_LOC), p) - let w3q3 := mulmod(mload(W3_EVAL_LOC), mload(Q3_EVAL_LOC), p) - let w4q3 := mulmod(mload(W4_EVAL_LOC), mload(Q4_EVAL_LOC), p) - - // @todo - Add a explicit test that hits QARITH == 3 - // w1w2qm := (w_1 . w_2 . q_m . (QARITH_EVAL_LOC - 3)) / 2 - let w1w2qm := - mulmod( - mulmod( - mulmod(mulmod(mload(W1_EVAL_LOC), mload(W2_EVAL_LOC), p), mload(QM_EVAL_LOC), p), - addmod(mload(QARITH_EVAL_LOC), sub(p, 3), p), - p - ), - NEGATIVE_INVERSE_OF_2_MODULO_P, - p - ) - - // (w_1 . w_2 . q_m . (q_arith - 3)) / -2) + (w_1 . q_1) + (w_2 . q_2) + (w_3 . q_3) + (w_4 . q_4) + q_c - let identity := - addmod( - mload(QC_EVAL_LOC), addmod(w4q3, addmod(w3q3, addmod(w2q2, addmod(w1q1, w1w2qm, p), p), p), p), p - ) - - // if q_arith == 3 we evaluate an additional mini addition gate (on top of the regular one), where: - // w_1 + w_4 - w_1_omega + q_m = 0 - // we use this gate to save an addition gate when adding or subtracting non-native field elements - // α * (q_arith - 2) * (w_1 + w_4 - w_1_omega + q_m) - let extra_small_addition_gate_identity := - mulmod( - mload(C_ALPHA_LOC), - mulmod( - addmod(mload(QARITH_EVAL_LOC), sub(p, 2), p), - addmod( - mload(QM_EVAL_LOC), - addmod( - sub(p, mload(W1_OMEGA_EVAL_LOC)), addmod(mload(W1_EVAL_LOC), mload(W4_EVAL_LOC), p), p - ), - p - ), - p - ), - p - ) - - // if q_arith == 2 OR q_arith == 3 we add the 4th wire of the NEXT gate into the arithmetic identity - // N.B. if q_arith > 2, this wire value will be scaled by (q_arith - 1) relative to the other gate wires! - // alpha_base * q_arith * (identity + (q_arith - 1) * (w_4_omega + extra_small_addition_gate_identity)) - mstore( - ARITHMETIC_IDENTITY, - mulmod( - mload(C_ALPHA_BASE_LOC), - mulmod( - mload(QARITH_EVAL_LOC), - addmod( - identity, - mulmod( - addmod(mload(QARITH_EVAL_LOC), sub(p, 1), p), - addmod(mload(W4_OMEGA_EVAL_LOC), extra_small_addition_gate_identity, p), - p - ), - p - ), - p - ), - p - ) - ) - - // update alpha - mstore(C_ALPHA_BASE_LOC, mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_SQR_LOC), p)) - } - - /** - * COMPUTE GENPERMSORT WIDGET EVALUATION - */ - { - /** - * D1 = (w2 - w1) - * D2 = (w3 - w2) - * D3 = (w4 - w3) - * D4 = (w1_omega - w4) - * - * α_a = alpha_base - * α_b = alpha_base * α - * α_c = alpha_base * α^2 - * α_d = alpha_base * α^3 - * - * range_accumulator = ( - * D1(D1 - 1)(D1 - 2)(D1 - 3).α_a + - * D2(D2 - 1)(D2 - 2)(D2 - 3).α_b + - * D3(D3 - 1)(D3 - 2)(D3 - 3).α_c + - * D4(D4 - 1)(D4 - 2)(D4 - 3).α_d + - * ) . q_sort - */ - let minus_two := sub(p, 2) - let minus_three := sub(p, 3) - let d1 := addmod(mload(W2_EVAL_LOC), sub(p, mload(W1_EVAL_LOC)), p) - let d2 := addmod(mload(W3_EVAL_LOC), sub(p, mload(W2_EVAL_LOC)), p) - let d3 := addmod(mload(W4_EVAL_LOC), sub(p, mload(W3_EVAL_LOC)), p) - let d4 := addmod(mload(W1_OMEGA_EVAL_LOC), sub(p, mload(W4_EVAL_LOC)), p) - - let range_accumulator := - mulmod( - mulmod( - mulmod(addmod(mulmod(d1, d1, p), sub(p, d1), p), addmod(d1, minus_two, p), p), - addmod(d1, minus_three, p), - p - ), - mload(C_ALPHA_BASE_LOC), - p - ) - range_accumulator := - addmod( - range_accumulator, - mulmod( - mulmod( - mulmod(addmod(mulmod(d2, d2, p), sub(p, d2), p), addmod(d2, minus_two, p), p), - addmod(d2, minus_three, p), - p - ), - mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_LOC), p), - p - ), - p - ) - range_accumulator := - addmod( - range_accumulator, - mulmod( - mulmod( - mulmod(addmod(mulmod(d3, d3, p), sub(p, d3), p), addmod(d3, minus_two, p), p), - addmod(d3, minus_three, p), - p - ), - mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_SQR_LOC), p), - p - ), - p - ) - range_accumulator := - addmod( - range_accumulator, - mulmod( - mulmod( - mulmod(addmod(mulmod(d4, d4, p), sub(p, d4), p), addmod(d4, minus_two, p), p), - addmod(d4, minus_three, p), - p - ), - mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_CUBE_LOC), p), - p - ), - p - ) - range_accumulator := mulmod(range_accumulator, mload(QSORT_EVAL_LOC), p) - - mstore(SORT_IDENTITY, range_accumulator) - - // update alpha - mstore(C_ALPHA_BASE_LOC, mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_QUAD_LOC), p)) - } - - /** - * COMPUTE ELLIPTIC WIDGET EVALUATION - */ - { - /** - * endo_term = (-x_2) * x_1 * (x_3 * 2 + x_1) * q_beta - * endo_sqr_term = x_2^2 - * endo_sqr_term *= (x_3 - x_1) - * endo_sqr_term *= q_beta^2 - * leftovers = x_2^2 - * leftovers *= x_2 - * leftovers += x_1^2 * (x_3 + x_1) @follow-up Invalid comment in BB widget - * leftovers -= (y_2^2 + y_1^2) - * sign_term = y_2 * y_1 - * sign_term += sign_term - * sign_term *= q_sign - */ - // q_elliptic * (x3 + x2 + x1)(x2 - x1)(x2 - x1) - y2^2 - y1^2 + 2(y2y1)*q_sign = 0 - let x_diff := addmod(mload(X2_EVAL_LOC), sub(p, mload(X1_EVAL_LOC)), p) - let y2_sqr := mulmod(mload(Y2_EVAL_LOC), mload(Y2_EVAL_LOC), p) - let y1_sqr := mulmod(mload(Y1_EVAL_LOC), mload(Y1_EVAL_LOC), p) - let y1y2 := mulmod(mulmod(mload(Y1_EVAL_LOC), mload(Y2_EVAL_LOC), p), mload(QSIGN_LOC), p) - - let x_add_identity := - addmod( - mulmod( - addmod(mload(X3_EVAL_LOC), addmod(mload(X2_EVAL_LOC), mload(X1_EVAL_LOC), p), p), - mulmod(x_diff, x_diff, p), - p - ), - addmod( - sub( - p, - addmod(y2_sqr, y1_sqr, p) - ), - addmod(y1y2, y1y2, p), - p - ), - p - ) - x_add_identity := - mulmod( - mulmod( - x_add_identity, - addmod( - 1, - sub(p, mload(QM_EVAL_LOC)), - p - ), - p - ), - mload(C_ALPHA_BASE_LOC), - p - ) - - // q_elliptic * (x3 + x2 + x1)(x2 - x1)(x2 - x1) - y2^2 - y1^2 + 2(y2y1)*q_sign = 0 - let y1_plus_y3 := addmod( - mload(Y1_EVAL_LOC), - mload(Y3_EVAL_LOC), - p - ) - let y_diff := addmod(mulmod(mload(Y2_EVAL_LOC), mload(QSIGN_LOC), p), sub(p, mload(Y1_EVAL_LOC)), p) - let y_add_identity := - addmod( - mulmod(y1_plus_y3, x_diff, p), - mulmod(addmod(mload(X3_EVAL_LOC), sub(p, mload(X1_EVAL_LOC)), p), y_diff, p), - p - ) - y_add_identity := - mulmod( - mulmod(y_add_identity, addmod(1, sub(p, mload(QM_EVAL_LOC)), p), p), - mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_LOC), p), - p - ) - - // ELLIPTIC_IDENTITY = (x_identity + y_identity) * Q_ELLIPTIC_EVAL - mstore( - ELLIPTIC_IDENTITY, mulmod(addmod(x_add_identity, y_add_identity, p), mload(QELLIPTIC_EVAL_LOC), p) - ) - } - { - /** - * x_pow_4 = (y_1_sqr - curve_b) * x_1; - * y_1_sqr_mul_4 = y_1_sqr + y_1_sqr; - * y_1_sqr_mul_4 += y_1_sqr_mul_4; - * x_1_pow_4_mul_9 = x_pow_4; - * x_1_pow_4_mul_9 += x_1_pow_4_mul_9; - * x_1_pow_4_mul_9 += x_1_pow_4_mul_9; - * x_1_pow_4_mul_9 += x_1_pow_4_mul_9; - * x_1_pow_4_mul_9 += x_pow_4; - * x_1_sqr_mul_3 = x_1_sqr + x_1_sqr + x_1_sqr; - * x_double_identity = (x_3 + x_1 + x_1) * y_1_sqr_mul_4 - x_1_pow_4_mul_9; - * y_double_identity = x_1_sqr_mul_3 * (x_1 - x_3) - (y_1 + y_1) * (y_1 + y_3); - */ - // (x3 + x1 + x1) (4y1*y1) - 9 * x1 * x1 * x1 * x1 = 0 - let x1_sqr := mulmod(mload(X1_EVAL_LOC), mload(X1_EVAL_LOC), p) - let y1_sqr := mulmod(mload(Y1_EVAL_LOC), mload(Y1_EVAL_LOC), p) - let x_pow_4 := mulmod(addmod(y1_sqr, GRUMPKIN_CURVE_B_PARAMETER_NEGATED, p), mload(X1_EVAL_LOC), p) - let y1_sqr_mul_4 := mulmod(y1_sqr, 4, p) - let x1_pow_4_mul_9 := mulmod(x_pow_4, 9, p) - let x1_sqr_mul_3 := mulmod(x1_sqr, 3, p) - let x_double_identity := - addmod( - mulmod( - addmod(mload(X3_EVAL_LOC), addmod(mload(X1_EVAL_LOC), mload(X1_EVAL_LOC), p), p), - y1_sqr_mul_4, - p - ), - sub(p, x1_pow_4_mul_9), - p - ) - // (y1 + y1) (2y1) - (3 * x1 * x1)(x1 - x3) = 0 - let y_double_identity := - addmod( - mulmod(x1_sqr_mul_3, addmod(mload(X1_EVAL_LOC), sub(p, mload(X3_EVAL_LOC)), p), p), - sub( - p, - mulmod( - addmod(mload(Y1_EVAL_LOC), mload(Y1_EVAL_LOC), p), - addmod(mload(Y1_EVAL_LOC), mload(Y3_EVAL_LOC), p), - p - ) - ), - p - ) - x_double_identity := mulmod(x_double_identity, mload(C_ALPHA_BASE_LOC), p) - y_double_identity := - mulmod(y_double_identity, mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_LOC), p), p) - x_double_identity := mulmod(x_double_identity, mload(QM_EVAL_LOC), p) - y_double_identity := mulmod(y_double_identity, mload(QM_EVAL_LOC), p) - // ELLIPTIC_IDENTITY += (x_double_identity + y_double_identity) * Q_DOUBLE_EVAL - mstore( - ELLIPTIC_IDENTITY, - addmod( - mload(ELLIPTIC_IDENTITY), - mulmod(addmod(x_double_identity, y_double_identity, p), mload(QELLIPTIC_EVAL_LOC), p), - p - ) - ) - - // update alpha - mstore(C_ALPHA_BASE_LOC, mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_QUAD_LOC), p)) - } - - /** - * COMPUTE AUXILIARY WIDGET EVALUATION - */ - { - { - /** - * Non native field arithmetic gate 2 - * _ _ - * / _ _ _ 14 \ - * q_2 . q_4 | (w_1 . w_2) + (w_1 . w_2) + (w_1 . w_4 + w_2 . w_3 - w_3) . 2 - w_3 - w_4 | - * \_ _/ - * - * limb_subproduct = w_1 . w_2_omega + w_1_omega . w_2 - * non_native_field_gate_2 = w_1 * w_4 + w_4 * w_3 - w_3_omega - * non_native_field_gate_2 = non_native_field_gate_2 * limb_size - * non_native_field_gate_2 -= w_4_omega - * non_native_field_gate_2 += limb_subproduct - * non_native_field_gate_2 *= q_4 - * limb_subproduct *= limb_size - * limb_subproduct += w_1_omega * w_2_omega - * non_native_field_gate_1 = (limb_subproduct + w_3 + w_4) * q_3 - * non_native_field_gate_3 = (limb_subproduct + w_4 - (w_3_omega + w_4_omega)) * q_m - * non_native_field_identity = (non_native_field_gate_1 + non_native_field_gate_2 + non_native_field_gate_3) * q_2 - */ - - let limb_subproduct := - addmod( - mulmod(mload(W1_EVAL_LOC), mload(W2_OMEGA_EVAL_LOC), p), - mulmod(mload(W1_OMEGA_EVAL_LOC), mload(W2_EVAL_LOC), p), - p - ) - - let non_native_field_gate_2 := - addmod( - addmod( - mulmod(mload(W1_EVAL_LOC), mload(W4_EVAL_LOC), p), - mulmod(mload(W2_EVAL_LOC), mload(W3_EVAL_LOC), p), - p - ), - sub(p, mload(W3_OMEGA_EVAL_LOC)), - p - ) - non_native_field_gate_2 := mulmod(non_native_field_gate_2, LIMB_SIZE, p) - non_native_field_gate_2 := addmod(non_native_field_gate_2, sub(p, mload(W4_OMEGA_EVAL_LOC)), p) - non_native_field_gate_2 := addmod(non_native_field_gate_2, limb_subproduct, p) - non_native_field_gate_2 := mulmod(non_native_field_gate_2, mload(Q4_EVAL_LOC), p) - limb_subproduct := mulmod(limb_subproduct, LIMB_SIZE, p) - limb_subproduct := - addmod(limb_subproduct, mulmod(mload(W1_OMEGA_EVAL_LOC), mload(W2_OMEGA_EVAL_LOC), p), p) - let non_native_field_gate_1 := - mulmod( - addmod(limb_subproduct, sub(p, addmod(mload(W3_EVAL_LOC), mload(W4_EVAL_LOC), p)), p), - mload(Q3_EVAL_LOC), - p - ) - let non_native_field_gate_3 := - mulmod( - addmod( - addmod(limb_subproduct, mload(W4_EVAL_LOC), p), - sub(p, addmod(mload(W3_OMEGA_EVAL_LOC), mload(W4_OMEGA_EVAL_LOC), p)), - p - ), - mload(QM_EVAL_LOC), - p - ) - let non_native_field_identity := - mulmod( - addmod(addmod(non_native_field_gate_1, non_native_field_gate_2, p), non_native_field_gate_3, p), - mload(Q2_EVAL_LOC), - p - ) - - mstore(AUX_NON_NATIVE_FIELD_EVALUATION, non_native_field_identity) - } - - { - /** - * limb_accumulator_1 = w_2_omega; - * limb_accumulator_1 *= SUBLIMB_SHIFT; - * limb_accumulator_1 += w_1_omega; - * limb_accumulator_1 *= SUBLIMB_SHIFT; - * limb_accumulator_1 += w_3; - * limb_accumulator_1 *= SUBLIMB_SHIFT; - * limb_accumulator_1 += w_2; - * limb_accumulator_1 *= SUBLIMB_SHIFT; - * limb_accumulator_1 += w_1; - * limb_accumulator_1 -= w_4; - * limb_accumulator_1 *= q_4; - */ - let limb_accumulator_1 := mulmod(mload(W2_OMEGA_EVAL_LOC), SUBLIMB_SHIFT, p) - limb_accumulator_1 := addmod(limb_accumulator_1, mload(W1_OMEGA_EVAL_LOC), p) - limb_accumulator_1 := mulmod(limb_accumulator_1, SUBLIMB_SHIFT, p) - limb_accumulator_1 := addmod(limb_accumulator_1, mload(W3_EVAL_LOC), p) - limb_accumulator_1 := mulmod(limb_accumulator_1, SUBLIMB_SHIFT, p) - limb_accumulator_1 := addmod(limb_accumulator_1, mload(W2_EVAL_LOC), p) - limb_accumulator_1 := mulmod(limb_accumulator_1, SUBLIMB_SHIFT, p) - limb_accumulator_1 := addmod(limb_accumulator_1, mload(W1_EVAL_LOC), p) - limb_accumulator_1 := addmod(limb_accumulator_1, sub(p, mload(W4_EVAL_LOC)), p) - limb_accumulator_1 := mulmod(limb_accumulator_1, mload(Q4_EVAL_LOC), p) - - /** - * limb_accumulator_2 = w_3_omega; - * limb_accumulator_2 *= SUBLIMB_SHIFT; - * limb_accumulator_2 += w_2_omega; - * limb_accumulator_2 *= SUBLIMB_SHIFT; - * limb_accumulator_2 += w_1_omega; - * limb_accumulator_2 *= SUBLIMB_SHIFT; - * limb_accumulator_2 += w_4; - * limb_accumulator_2 *= SUBLIMB_SHIFT; - * limb_accumulator_2 += w_3; - * limb_accumulator_2 -= w_4_omega; - * limb_accumulator_2 *= q_m; - */ - let limb_accumulator_2 := mulmod(mload(W3_OMEGA_EVAL_LOC), SUBLIMB_SHIFT, p) - limb_accumulator_2 := addmod(limb_accumulator_2, mload(W2_OMEGA_EVAL_LOC), p) - limb_accumulator_2 := mulmod(limb_accumulator_2, SUBLIMB_SHIFT, p) - limb_accumulator_2 := addmod(limb_accumulator_2, mload(W1_OMEGA_EVAL_LOC), p) - limb_accumulator_2 := mulmod(limb_accumulator_2, SUBLIMB_SHIFT, p) - limb_accumulator_2 := addmod(limb_accumulator_2, mload(W4_EVAL_LOC), p) - limb_accumulator_2 := mulmod(limb_accumulator_2, SUBLIMB_SHIFT, p) - limb_accumulator_2 := addmod(limb_accumulator_2, mload(W3_EVAL_LOC), p) - limb_accumulator_2 := addmod(limb_accumulator_2, sub(p, mload(W4_OMEGA_EVAL_LOC)), p) - limb_accumulator_2 := mulmod(limb_accumulator_2, mload(QM_EVAL_LOC), p) - - mstore( - AUX_LIMB_ACCUMULATOR_EVALUATION, - mulmod(addmod(limb_accumulator_1, limb_accumulator_2, p), mload(Q3_EVAL_LOC), p) - ) - } - - { - /** - * memory_record_check = w_3; - * memory_record_check *= eta; - * memory_record_check += w_2; - * memory_record_check *= eta; - * memory_record_check += w_1; - * memory_record_check *= eta; - * memory_record_check += q_c; - * - * partial_record_check = memory_record_check; - * - * memory_record_check -= w_4; - */ - - let memory_record_check := mulmod(mload(W3_EVAL_LOC), mload(C_ETA_LOC), p) - memory_record_check := addmod(memory_record_check, mload(W2_EVAL_LOC), p) - memory_record_check := mulmod(memory_record_check, mload(C_ETA_LOC), p) - memory_record_check := addmod(memory_record_check, mload(W1_EVAL_LOC), p) - memory_record_check := mulmod(memory_record_check, mload(C_ETA_LOC), p) - memory_record_check := addmod(memory_record_check, mload(QC_EVAL_LOC), p) - - let partial_record_check := memory_record_check - memory_record_check := addmod(memory_record_check, sub(p, mload(W4_EVAL_LOC)), p) - - mstore(AUX_MEMORY_EVALUATION, memory_record_check) - - // index_delta = w_1_omega - w_1 - let index_delta := addmod(mload(W1_OMEGA_EVAL_LOC), sub(p, mload(W1_EVAL_LOC)), p) - // record_delta = w_4_omega - w_4 - let record_delta := addmod(mload(W4_OMEGA_EVAL_LOC), sub(p, mload(W4_EVAL_LOC)), p) - // index_is_monotonically_increasing = index_delta * (index_delta - 1) - let index_is_monotonically_increasing := mulmod(index_delta, addmod(index_delta, sub(p, 1), p), p) - - // adjacent_values_match_if_adjacent_indices_match = record_delta * (1 - index_delta) - let adjacent_values_match_if_adjacent_indices_match := - mulmod(record_delta, addmod(1, sub(p, index_delta), p), p) - - // AUX_ROM_CONSISTENCY_EVALUATION = ((adjacent_values_match_if_adjacent_indices_match * alpha) + index_is_monotonically_increasing) * alpha + partial_record_check - mstore( - AUX_ROM_CONSISTENCY_EVALUATION, - addmod( - mulmod( - addmod( - mulmod(adjacent_values_match_if_adjacent_indices_match, mload(C_ALPHA_LOC), p), - index_is_monotonically_increasing, - p - ), - mload(C_ALPHA_LOC), - p - ), - memory_record_check, - p - ) - ) - - { - /** - * next_gate_access_type = w_3_omega; - * next_gate_access_type *= eta; - * next_gate_access_type += w_2_omega; - * next_gate_access_type *= eta; - * next_gate_access_type += w_1_omega; - * next_gate_access_type *= eta; - * next_gate_access_type = w_4_omega - next_gate_access_type; - */ - let next_gate_access_type := mulmod(mload(W3_OMEGA_EVAL_LOC), mload(C_ETA_LOC), p) - next_gate_access_type := addmod(next_gate_access_type, mload(W2_OMEGA_EVAL_LOC), p) - next_gate_access_type := mulmod(next_gate_access_type, mload(C_ETA_LOC), p) - next_gate_access_type := addmod(next_gate_access_type, mload(W1_OMEGA_EVAL_LOC), p) - next_gate_access_type := mulmod(next_gate_access_type, mload(C_ETA_LOC), p) - next_gate_access_type := addmod(mload(W4_OMEGA_EVAL_LOC), sub(p, next_gate_access_type), p) - - // value_delta = w_3_omega - w_3 - let value_delta := addmod(mload(W3_OMEGA_EVAL_LOC), sub(p, mload(W3_EVAL_LOC)), p) - // adjacent_values_match_if_adjacent_indices_match_and_next_access_is_a_read_operation = (1 - index_delta) * value_delta * (1 - next_gate_access_type); - - let adjacent_values_match_if_adjacent_indices_match_and_next_access_is_a_read_operation := - mulmod( - addmod(1, sub(p, index_delta), p), - mulmod(value_delta, addmod(1, sub(p, next_gate_access_type), p), p), - p - ) - - // AUX_RAM_CONSISTENCY_EVALUATION - - /** - * access_type = w_4 - partial_record_check - * access_check = access_type^2 - access_type - * next_gate_access_type_is_boolean = next_gate_access_type^2 - next_gate_access_type - * RAM_consistency_check_identity = adjacent_values_match_if_adjacent_indices_match_and_next_access_is_a_read_operation; - * RAM_consistency_check_identity *= alpha; - * RAM_consistency_check_identity += index_is_monotonically_increasing; - * RAM_consistency_check_identity *= alpha; - * RAM_consistency_check_identity += next_gate_access_type_is_boolean; - * RAM_consistency_check_identity *= alpha; - * RAM_consistency_check_identity += access_check; - */ - - let access_type := addmod(mload(W4_EVAL_LOC), sub(p, partial_record_check), p) - let access_check := mulmod(access_type, addmod(access_type, sub(p, 1), p), p) - let next_gate_access_type_is_boolean := - mulmod(next_gate_access_type, addmod(next_gate_access_type, sub(p, 1), p), p) - let RAM_cci := - mulmod( - adjacent_values_match_if_adjacent_indices_match_and_next_access_is_a_read_operation, - mload(C_ALPHA_LOC), - p - ) - RAM_cci := addmod(RAM_cci, index_is_monotonically_increasing, p) - RAM_cci := mulmod(RAM_cci, mload(C_ALPHA_LOC), p) - RAM_cci := addmod(RAM_cci, next_gate_access_type_is_boolean, p) - RAM_cci := mulmod(RAM_cci, mload(C_ALPHA_LOC), p) - RAM_cci := addmod(RAM_cci, access_check, p) - - mstore(AUX_RAM_CONSISTENCY_EVALUATION, RAM_cci) - } - - { - // timestamp_delta = w_2_omega - w_2 - let timestamp_delta := addmod(mload(W2_OMEGA_EVAL_LOC), sub(p, mload(W2_EVAL_LOC)), p) - - // RAM_timestamp_check_identity = (1 - index_delta) * timestamp_delta - w_3 - let RAM_timestamp_check_identity := - addmod( - mulmod(timestamp_delta, addmod(1, sub(p, index_delta), p), p), sub(p, mload(W3_EVAL_LOC)), p - ) - - /** - * memory_identity = ROM_consistency_check_identity * q_2; - * memory_identity += RAM_timestamp_check_identity * q_4; - * memory_identity += memory_record_check * q_m; - * memory_identity *= q_1; - * memory_identity += (RAM_consistency_check_identity * q_arith); - * - * auxiliary_identity = memory_identity + non_native_field_identity + limb_accumulator_identity; - * auxiliary_identity *= q_aux; - * auxiliary_identity *= alpha_base; - */ - let memory_identity := mulmod(mload(AUX_ROM_CONSISTENCY_EVALUATION), mload(Q2_EVAL_LOC), p) - memory_identity := - addmod(memory_identity, mulmod(RAM_timestamp_check_identity, mload(Q4_EVAL_LOC), p), p) - memory_identity := - addmod(memory_identity, mulmod(mload(AUX_MEMORY_EVALUATION), mload(QM_EVAL_LOC), p), p) - memory_identity := mulmod(memory_identity, mload(Q1_EVAL_LOC), p) - memory_identity := - addmod( - memory_identity, mulmod(mload(AUX_RAM_CONSISTENCY_EVALUATION), mload(QARITH_EVAL_LOC), p), p - ) - - let auxiliary_identity := addmod(memory_identity, mload(AUX_NON_NATIVE_FIELD_EVALUATION), p) - auxiliary_identity := addmod(auxiliary_identity, mload(AUX_LIMB_ACCUMULATOR_EVALUATION), p) - auxiliary_identity := mulmod(auxiliary_identity, mload(QAUX_EVAL_LOC), p) - auxiliary_identity := mulmod(auxiliary_identity, mload(C_ALPHA_BASE_LOC), p) - - mstore(AUX_IDENTITY, auxiliary_identity) - - // update alpha - mstore(C_ALPHA_BASE_LOC, mulmod(mload(C_ALPHA_BASE_LOC), mload(C_ALPHA_CUBE_LOC), p)) - } - } - } - - { - /** - * quotient = ARITHMETIC_IDENTITY - * quotient += PERMUTATION_IDENTITY - * quotient += PLOOKUP_IDENTITY - * quotient += SORT_IDENTITY - * quotient += ELLIPTIC_IDENTITY - * quotient += AUX_IDENTITY - * quotient *= ZERO_POLY_INVERSE - */ - mstore( - QUOTIENT_EVAL_LOC, - mulmod( - addmod( - addmod( - addmod( - addmod( - addmod(mload(PERMUTATION_IDENTITY), mload(PLOOKUP_IDENTITY), p), - mload(ARITHMETIC_IDENTITY), - p - ), - mload(SORT_IDENTITY), - p - ), - mload(ELLIPTIC_IDENTITY), - p - ), - mload(AUX_IDENTITY), - p - ), - mload(ZERO_POLY_INVERSE_LOC), - p - ) - ) - } - - /** - * GENERATE NU AND SEPARATOR CHALLENGES - */ - { - let current_challenge := mload(C_CURRENT_LOC) - // get a calldata pointer that points to the start of the data we want to copy - let calldata_ptr := add(calldataload(0x04), 0x24) - - calldata_ptr := add(calldata_ptr, NU_CALLDATA_SKIP_LENGTH) - - mstore(NU_CHALLENGE_INPUT_LOC_A, current_challenge) - mstore(NU_CHALLENGE_INPUT_LOC_B, mload(QUOTIENT_EVAL_LOC)) - calldatacopy(NU_CHALLENGE_INPUT_LOC_C, calldata_ptr, NU_INPUT_LENGTH) - - // hash length = (0x20 + num field elements), we include the previous challenge in the hash - let challenge := keccak256(NU_CHALLENGE_INPUT_LOC_A, add(NU_INPUT_LENGTH, 0x40)) - - mstore(C_V0_LOC, mod(challenge, p)) - // We need THIRTY-ONE independent nu challenges! - mstore(0x00, challenge) - mstore8(0x20, 0x01) - mstore(C_V1_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x02) - mstore(C_V2_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x03) - mstore(C_V3_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x04) - mstore(C_V4_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x05) - mstore(C_V5_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x06) - mstore(C_V6_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x07) - mstore(C_V7_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x08) - mstore(C_V8_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x09) - mstore(C_V9_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x0a) - mstore(C_V10_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x0b) - mstore(C_V11_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x0c) - mstore(C_V12_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x0d) - mstore(C_V13_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x0e) - mstore(C_V14_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x0f) - mstore(C_V15_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x10) - mstore(C_V16_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x11) - mstore(C_V17_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x12) - mstore(C_V18_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x13) - mstore(C_V19_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x14) - mstore(C_V20_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x15) - mstore(C_V21_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x16) - mstore(C_V22_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x17) - mstore(C_V23_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x18) - mstore(C_V24_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x19) - mstore(C_V25_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x1a) - mstore(C_V26_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x1b) - mstore(C_V27_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x1c) - mstore(C_V28_LOC, mod(keccak256(0x00, 0x21), p)) - mstore8(0x20, 0x1d) - mstore(C_V29_LOC, mod(keccak256(0x00, 0x21), p)) - - // @follow-up - Why are both v29 and v30 using appending 0x1d to the prior challenge and hashing, should it not change? - mstore8(0x20, 0x1d) - challenge := keccak256(0x00, 0x21) - mstore(C_V30_LOC, mod(challenge, p)) - - // separator - mstore(0x00, challenge) - mstore(0x20, mload(PI_Z_Y_LOC)) - mstore(0x40, mload(PI_Z_X_LOC)) - mstore(0x60, mload(PI_Z_OMEGA_Y_LOC)) - mstore(0x80, mload(PI_Z_OMEGA_X_LOC)) - - mstore(C_U_LOC, mod(keccak256(0x00, 0xa0), p)) - } - - let success := 0 - // VALIDATE T1 - { - let x := mload(T1_X_LOC) - let y := mload(T1_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)) - mstore(ACCUMULATOR_X_LOC, x) - mstore(add(ACCUMULATOR_X_LOC, 0x20), y) - } - // VALIDATE T2 - { - let x := mload(T2_X_LOC) // 0x1400 - let y := mload(T2_Y_LOC) // 0x1420 - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(ZETA_POW_N_LOC)) - // accumulator_2 = [T2].zeta^n - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = [T1] + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE T3 - { - let x := mload(T3_X_LOC) - let y := mload(T3_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(mload(ZETA_POW_N_LOC), mload(ZETA_POW_N_LOC), p)) - // accumulator_2 = [T3].zeta^{2n} - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE T4 - { - let x := mload(T4_X_LOC) - let y := mload(T4_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(mulmod(mload(ZETA_POW_N_LOC), mload(ZETA_POW_N_LOC), p), mload(ZETA_POW_N_LOC), p)) - // accumulator_2 = [T4].zeta^{3n} - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE W1 - { - let x := mload(W1_X_LOC) - let y := mload(W1_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V0_LOC), p)) - // accumulator_2 = v0.(u + 1).[W1] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE W2 - { - let x := mload(W2_X_LOC) - let y := mload(W2_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V1_LOC), p)) - // accumulator_2 = v1.(u + 1).[W2] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE W3 - { - let x := mload(W3_X_LOC) - let y := mload(W3_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V2_LOC), p)) - // accumulator_2 = v2.(u + 1).[W3] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE W4 - { - let x := mload(W4_X_LOC) - let y := mload(W4_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V3_LOC), p)) - // accumulator_2 = v3.(u + 1).[W4] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE S - { - let x := mload(S_X_LOC) - let y := mload(S_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V4_LOC), p)) - // accumulator_2 = v4.(u + 1).[S] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE Z - { - let x := mload(Z_X_LOC) - let y := mload(Z_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V5_LOC), p)) - // accumulator_2 = v5.(u + 1).[Z] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE Z_LOOKUP - { - let x := mload(Z_LOOKUP_X_LOC) - let y := mload(Z_LOOKUP_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V6_LOC), p)) - // accumulator_2 = v6.(u + 1).[Z_LOOKUP] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE Q1 - { - let x := mload(Q1_X_LOC) - let y := mload(Q1_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V7_LOC)) - // accumulator_2 = v7.[Q1] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE Q2 - { - let x := mload(Q2_X_LOC) - let y := mload(Q2_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V8_LOC)) - // accumulator_2 = v8.[Q2] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE Q3 - { - let x := mload(Q3_X_LOC) - let y := mload(Q3_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V9_LOC)) - // accumulator_2 = v9.[Q3] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE Q4 - { - let x := mload(Q4_X_LOC) - let y := mload(Q4_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V10_LOC)) - // accumulator_2 = v10.[Q4] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE QM - { - let x := mload(QM_X_LOC) - let y := mload(QM_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V11_LOC)) - // accumulator_2 = v11.[Q;] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE QC - { - let x := mload(QC_X_LOC) - let y := mload(QC_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V12_LOC)) - // accumulator_2 = v12.[QC] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE QARITH - { - let x := mload(QARITH_X_LOC) - let y := mload(QARITH_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V13_LOC)) - // accumulator_2 = v13.[QARITH] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE QSORT - { - let x := mload(QSORT_X_LOC) - let y := mload(QSORT_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V14_LOC)) - // accumulator_2 = v14.[QSORT] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE QELLIPTIC - { - let x := mload(QELLIPTIC_X_LOC) - let y := mload(QELLIPTIC_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V15_LOC)) - // accumulator_2 = v15.[QELLIPTIC] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE QAUX - { - let x := mload(QAUX_X_LOC) - let y := mload(QAUX_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V16_LOC)) - // accumulator_2 = v15.[Q_AUX] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE SIGMA1 - { - let x := mload(SIGMA1_X_LOC) - let y := mload(SIGMA1_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V17_LOC)) - // accumulator_2 = v17.[sigma1] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE SIGMA2 - { - let x := mload(SIGMA2_X_LOC) - let y := mload(SIGMA2_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V18_LOC)) - // accumulator_2 = v18.[sigma2] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE SIGMA3 - { - let x := mload(SIGMA3_X_LOC) - let y := mload(SIGMA3_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V19_LOC)) - // accumulator_2 = v19.[sigma3] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE SIGMA4 - { - let x := mload(SIGMA4_X_LOC) - let y := mload(SIGMA4_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V20_LOC)) - // accumulator_2 = v20.[sigma4] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE TABLE1 - { - let x := mload(TABLE1_X_LOC) - let y := mload(TABLE1_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V21_LOC), p)) - // accumulator_2 = u.[table1] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE TABLE2 - { - let x := mload(TABLE2_X_LOC) - let y := mload(TABLE2_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V22_LOC), p)) - // accumulator_2 = u.[table2] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE TABLE3 - { - let x := mload(TABLE3_X_LOC) - let y := mload(TABLE3_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V23_LOC), p)) - // accumulator_2 = u.[table3] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE TABLE4 - { - let x := mload(TABLE4_X_LOC) - let y := mload(TABLE4_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(addmod(mload(C_U_LOC), 0x1, p), mload(C_V24_LOC), p)) - // accumulator_2 = u.[table4] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE TABLE_TYPE - { - let x := mload(TABLE_TYPE_X_LOC) - let y := mload(TABLE_TYPE_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V25_LOC)) - // accumulator_2 = v25.[TableType] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE ID1 - { - let x := mload(ID1_X_LOC) - let y := mload(ID1_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V26_LOC)) - // accumulator_2 = v26.[ID1] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE ID2 - { - let x := mload(ID2_X_LOC) - let y := mload(ID2_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V27_LOC)) - // accumulator_2 = v27.[ID2] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE ID3 - { - let x := mload(ID3_X_LOC) - let y := mload(ID3_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V28_LOC)) - // accumulator_2 = v28.[ID3] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE ID4 - { - let x := mload(ID4_X_LOC) - let y := mload(ID4_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mload(C_V29_LOC)) - // accumulator_2 = v29.[ID4] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - /** - * COMPUTE BATCH EVALUATION SCALAR MULTIPLIER - */ - { - /** - * batch_evaluation = v0 * (w_1_omega * u + w_1_eval) - * batch_evaluation += v1 * (w_2_omega * u + w_2_eval) - * batch_evaluation += v2 * (w_3_omega * u + w_3_eval) - * batch_evaluation += v3 * (w_4_omega * u + w_4_eval) - * batch_evaluation += v4 * (s_omega_eval * u + s_eval) - * batch_evaluation += v5 * (z_omega_eval * u + z_eval) - * batch_evaluation += v6 * (z_lookup_omega_eval * u + z_lookup_eval) - */ - let batch_evaluation := - mulmod( - mload(C_V0_LOC), - addmod(mulmod(mload(W1_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(W1_EVAL_LOC), p), - p - ) - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V1_LOC), - addmod(mulmod(mload(W2_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(W2_EVAL_LOC), p), - p - ), - p - ) - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V2_LOC), - addmod(mulmod(mload(W3_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(W3_EVAL_LOC), p), - p - ), - p - ) - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V3_LOC), - addmod(mulmod(mload(W4_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(W4_EVAL_LOC), p), - p - ), - p - ) - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V4_LOC), - addmod(mulmod(mload(S_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(S_EVAL_LOC), p), - p - ), - p - ) - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V5_LOC), - addmod(mulmod(mload(Z_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(Z_EVAL_LOC), p), - p - ), - p - ) - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V6_LOC), - addmod(mulmod(mload(Z_LOOKUP_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(Z_LOOKUP_EVAL_LOC), p), - p - ), - p - ) - - /** - * batch_evaluation += v7 * Q1_EVAL - * batch_evaluation += v8 * Q2_EVAL - * batch_evaluation += v9 * Q3_EVAL - * batch_evaluation += v10 * Q4_EVAL - * batch_evaluation += v11 * QM_EVAL - * batch_evaluation += v12 * QC_EVAL - * batch_evaluation += v13 * QARITH_EVAL - * batch_evaluation += v14 * QSORT_EVAL_LOC - * batch_evaluation += v15 * QELLIPTIC_EVAL_LOC - * batch_evaluation += v16 * QAUX_EVAL_LOC - * batch_evaluation += v17 * SIGMA1_EVAL_LOC - * batch_evaluation += v18 * SIGMA2_EVAL_LOC - * batch_evaluation += v19 * SIGMA3_EVAL_LOC - * batch_evaluation += v20 * SIGMA4_EVAL_LOC - */ - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V7_LOC), mload(Q1_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V8_LOC), mload(Q2_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V9_LOC), mload(Q3_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V10_LOC), mload(Q4_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V11_LOC), mload(QM_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V12_LOC), mload(QC_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V13_LOC), mload(QARITH_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V14_LOC), mload(QSORT_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V15_LOC), mload(QELLIPTIC_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V16_LOC), mload(QAUX_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V17_LOC), mload(SIGMA1_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V18_LOC), mload(SIGMA2_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V19_LOC), mload(SIGMA3_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V20_LOC), mload(SIGMA4_EVAL_LOC), p), p) - - /** - * batch_evaluation += v21 * (table1(zw) * u + table1(z)) - * batch_evaluation += v22 * (table2(zw) * u + table2(z)) - * batch_evaluation += v23 * (table3(zw) * u + table3(z)) - * batch_evaluation += v24 * (table4(zw) * u + table4(z)) - * batch_evaluation += v25 * table_type_eval - * batch_evaluation += v26 * id1_eval - * batch_evaluation += v27 * id2_eval - * batch_evaluation += v28 * id3_eval - * batch_evaluation += v29 * id4_eval - * batch_evaluation += quotient_eval - */ - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V21_LOC), - addmod(mulmod(mload(TABLE1_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(TABLE1_EVAL_LOC), p), - p - ), - p - ) - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V22_LOC), - addmod(mulmod(mload(TABLE2_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(TABLE2_EVAL_LOC), p), - p - ), - p - ) - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V23_LOC), - addmod(mulmod(mload(TABLE3_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(TABLE3_EVAL_LOC), p), - p - ), - p - ) - batch_evaluation := - addmod( - batch_evaluation, - mulmod( - mload(C_V24_LOC), - addmod(mulmod(mload(TABLE4_OMEGA_EVAL_LOC), mload(C_U_LOC), p), mload(TABLE4_EVAL_LOC), p), - p - ), - p - ) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V25_LOC), mload(TABLE_TYPE_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V26_LOC), mload(ID1_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V27_LOC), mload(ID2_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V28_LOC), mload(ID3_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mulmod(mload(C_V29_LOC), mload(ID4_EVAL_LOC), p), p) - batch_evaluation := addmod(batch_evaluation, mload(QUOTIENT_EVAL_LOC), p) - - mstore(0x00, 0x01) // [1].x - mstore(0x20, 0x02) // [1].y - mstore(0x40, sub(p, batch_evaluation)) - // accumulator_2 = -[1].(batch_evaluation) - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - mstore(OPENING_COMMITMENT_SUCCESS_FLAG, success) - } - - /** - * PERFORM PAIRING PREAMBLE - */ - { - let u := mload(C_U_LOC) - let zeta := mload(C_ZETA_LOC) - // VALIDATE PI_Z - { - let x := mload(PI_Z_X_LOC) - let y := mload(PI_Z_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q)) - mstore(0x00, x) - mstore(0x20, y) - } - // compute zeta.[PI_Z] and add into accumulator - mstore(0x40, zeta) - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // accumulator = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, ACCUMULATOR_X_LOC, 0x40)) - - // VALIDATE PI_Z_OMEGA - { - let x := mload(PI_Z_OMEGA_X_LOC) - let y := mload(PI_Z_OMEGA_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - mstore(0x40, mulmod(mulmod(u, zeta, p), mload(OMEGA_LOC), p)) - // accumulator_2 = u.zeta.omega.[PI_Z_OMEGA] - success := and(success, staticcall(gas(), 7, 0x00, 0x60, ACCUMULATOR2_X_LOC, 0x40)) - // PAIRING_RHS = accumulator + accumulator_2 - success := and(success, staticcall(gas(), 6, ACCUMULATOR_X_LOC, 0x80, PAIRING_RHS_X_LOC, 0x40)) - - mstore(0x00, mload(PI_Z_X_LOC)) - mstore(0x20, mload(PI_Z_Y_LOC)) - mstore(0x40, mload(PI_Z_OMEGA_X_LOC)) - mstore(0x60, mload(PI_Z_OMEGA_Y_LOC)) - mstore(0x80, u) - success := and(success, staticcall(gas(), 7, 0x40, 0x60, 0x40, 0x40)) - // PAIRING_LHS = [PI_Z] + [PI_Z_OMEGA] * u - success := and(success, staticcall(gas(), 6, 0x00, 0x80, PAIRING_LHS_X_LOC, 0x40)) - // negate lhs y-coordinate - mstore(PAIRING_LHS_Y_LOC, sub(q, mload(PAIRING_LHS_Y_LOC))) - - if mload(CONTAINS_RECURSIVE_PROOF_LOC) { - // VALIDATE RECURSIVE P1 - { - let x := mload(RECURSIVE_P1_X_LOC) - let y := mload(RECURSIVE_P1_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - - // compute u.u.[recursive_p1] and write into 0x60 - mstore(0x40, mulmod(u, u, p)) - success := and(success, staticcall(gas(), 7, 0x00, 0x60, 0x60, 0x40)) - // VALIDATE RECURSIVE P2 - { - let x := mload(RECURSIVE_P2_X_LOC) - let y := mload(RECURSIVE_P2_Y_LOC) - let xx := mulmod(x, x, q) - // validate on curve - success := and(success, eq(mulmod(y, y, q), addmod(mulmod(x, xx, q), 3, q))) - mstore(0x00, x) - mstore(0x20, y) - } - // compute u.u.[recursive_p2] and write into 0x00 - // 0x40 still contains u*u - success := and(success, staticcall(gas(), 7, 0x00, 0x60, 0x00, 0x40)) - - // compute u.u.[recursiveP1] + rhs and write into rhs - mstore(0xa0, mload(PAIRING_RHS_X_LOC)) - mstore(0xc0, mload(PAIRING_RHS_Y_LOC)) - success := and(success, staticcall(gas(), 6, 0x60, 0x80, PAIRING_RHS_X_LOC, 0x40)) - - // compute u.u.[recursiveP2] + lhs and write into lhs - mstore(0x40, mload(PAIRING_LHS_X_LOC)) - mstore(0x60, mload(PAIRING_LHS_Y_LOC)) - success := and(success, staticcall(gas(), 6, 0x00, 0x80, PAIRING_LHS_X_LOC, 0x40)) - } - - if iszero(success) { - mstore(0x0, EC_SCALAR_MUL_FAILURE_SELECTOR) - revert(0x00, 0x04) - } - mstore(PAIRING_PREAMBLE_SUCCESS_FLAG, success) - } - - /** - * PERFORM PAIRING - */ - { - // rhs paired with [1]_2 - // lhs paired with [x]_2 - - mstore(0x00, mload(PAIRING_RHS_X_LOC)) - mstore(0x20, mload(PAIRING_RHS_Y_LOC)) - mstore(0x40, 0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2) // this is [1]_2 - mstore(0x60, 0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed) - mstore(0x80, 0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b) - mstore(0xa0, 0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa) - - mstore(0xc0, mload(PAIRING_LHS_X_LOC)) - mstore(0xe0, mload(PAIRING_LHS_Y_LOC)) - mstore(0x100, mload(G2X_X0_LOC)) - mstore(0x120, mload(G2X_X1_LOC)) - mstore(0x140, mload(G2X_Y0_LOC)) - mstore(0x160, mload(G2X_Y1_LOC)) - - success := staticcall(gas(), 8, 0x00, 0x180, 0x00, 0x20) - mstore(PAIRING_SUCCESS_FLAG, success) - mstore(RESULT_FLAG, mload(0x00)) - } - if iszero( - and( - and(and(mload(PAIRING_SUCCESS_FLAG), mload(RESULT_FLAG)), mload(PAIRING_PREAMBLE_SUCCESS_FLAG)), - mload(OPENING_COMMITMENT_SUCCESS_FLAG) - ) - ) { - mstore(0x0, PROOF_FAILURE_SELECTOR) - revert(0x00, 0x04) - } - { - mstore(0x00, 0x01) - return(0x00, 0x20) // Proof succeeded! - } - } - } -} - -contract UltraVerifier is BaseUltraVerifier { - function getVerificationKeyHash() public pure override(BaseUltraVerifier) returns (bytes32) { - return UltraVerificationKey.verificationKeyHash(); - } - - function loadVerificationKey(uint256 vk, uint256 _omegaInverseLoc) internal pure virtual override(BaseUltraVerifier) { - UltraVerificationKey.loadVerificationKey(vk, _omegaInverseLoc); - } -} diff --git a/noir/tooling/bb_abstraction_leaks/src/lib.rs b/noir/tooling/bb_abstraction_leaks/src/lib.rs index fec53809ad43..56a4f58cd211 100644 --- a/noir/tooling/bb_abstraction_leaks/src/lib.rs +++ b/noir/tooling/bb_abstraction_leaks/src/lib.rs @@ -7,13 +7,6 @@ pub const ACVM_BACKEND_BARRETENBERG: &str = "acvm-backend-barretenberg"; pub const BB_DOWNLOAD_URL: &str = env!("BB_BINARY_URL"); pub const BB_VERSION: &str = env!("BB_VERSION"); -/// Embed the Solidity verifier file -const ULTRA_VERIFIER_CONTRACT: &str = include_str!("contract.sol"); - -pub fn complete_barretenberg_verifier_contract(contract: String) -> String { - format!("{contract}{ULTRA_VERIFIER_CONTRACT}") -} - /// Removes the public inputs which are prepended to a proof by Barretenberg. pub fn remove_public_inputs(num_pub_inputs: usize, proof: &[u8]) -> Vec { // Barretenberg prepends the public inputs onto the proof so we need to remove diff --git a/noir/tooling/debugger/Cargo.toml b/noir/tooling/debugger/Cargo.toml index 4d37f801d787..4d240c61f90e 100644 --- a/noir/tooling/debugger/Cargo.toml +++ b/noir/tooling/debugger/Cargo.toml @@ -7,7 +7,6 @@ edition.workspace = true license.workspace = true [build-dependencies] -rustc_version = "0.4.0" build-data.workspace = true [dependencies] diff --git a/noir/tooling/debugger/build.rs b/noir/tooling/debugger/build.rs index 5d14ec2bae2a..cedeebcae865 100644 --- a/noir/tooling/debugger/build.rs +++ b/noir/tooling/debugger/build.rs @@ -1,24 +1,14 @@ -use rustc_version::{version, Version}; use std::fs::File; use std::io::Write; use std::path::{Path, PathBuf}; use std::{env, fs}; -fn check_rustc_version() { - assert!( - version().unwrap() >= Version::parse("1.71.1").unwrap(), - "The minimal supported rustc version is 1.71.1." - ); -} - const GIT_COMMIT: &&str = &"GIT_COMMIT"; fn main() { // Rebuild if the tests have changed println!("cargo:rerun-if-changed=tests"); - check_rustc_version(); - // Only use build_data if the environment variable isn't set // The environment variable is always set when working via Nix if std::env::var(GIT_COMMIT).is_err() { diff --git a/noir/tooling/debugger/tests/debug.rs b/noir/tooling/debugger/tests/debug.rs index b2f441f56067..e8b17b8a7af2 100644 --- a/noir/tooling/debugger/tests/debug.rs +++ b/noir/tooling/debugger/tests/debug.rs @@ -28,7 +28,7 @@ mod tests { dbg_session .execute( &format!("{} debug --program-dir {}", nargo_bin, test_program_dir), - &format!(".*\\Starting debugger.*"), + ".*\\Starting debugger.*", ) .expect("Could not start debugger"); @@ -49,7 +49,7 @@ mod tests { // having successfully solved the circuit witness. dbg_session.send_line("quit").expect("Failed to quit debugger"); dbg_session - .exp_regex(&format!(".*Circuit witness successfully solved.*")) + .exp_regex(".*Circuit witness successfully solved.*") .expect("Expected circuit witness to be successfully solved."); } } diff --git a/noir/tooling/lsp/src/requests/goto_definition.rs b/noir/tooling/lsp/src/requests/goto_definition.rs index 2ff5901ff9cc..267519dfa1e1 100644 --- a/noir/tooling/lsp/src/requests/goto_definition.rs +++ b/noir/tooling/lsp/src/requests/goto_definition.rs @@ -116,6 +116,11 @@ where } } +/// Calculates the byte offset of a given character in a line. +/// LSP Clients (editors, eg. neovim) use a different coordinate (LSP Positions) system than the compiler. +/// +/// LSP Positions navigate through line numbers and character numbers, eg. `(line: 1, character: 5)` +/// meanwhile byte indexes are used within the compiler to navigate through the source code. fn character_to_line_offset(line: &str, character: u32) -> Result { let line_len = line.len(); let mut character_offset = 0; @@ -199,3 +204,25 @@ mod goto_definition_tests { assert!(&response.is_some()); } } + +#[cfg(test)] +mod character_to_line_offset_tests { + use super::*; + + #[test] + fn test_character_to_line_offset() { + let line = "Hello, dark!"; + let character = 8; + + let result = character_to_line_offset(line, character).unwrap(); + assert_eq!(result, 8); + + // In the case of a multi-byte character, the offset should be the byte index of the character + // byte offset for 8 character (黑) is expected to be 10 + let line = "Hello, 黑!"; + let character = 8; + + let result = character_to_line_offset(line, character).unwrap(); + assert_eq!(result, 10); + } +} diff --git a/noir/tooling/nargo/src/artifacts/contract.rs b/noir/tooling/nargo/src/artifacts/contract.rs index 4ade4f5660eb..04699126762d 100644 --- a/noir/tooling/nargo/src/artifacts/contract.rs +++ b/noir/tooling/nargo/src/artifacts/contract.rs @@ -1,21 +1,16 @@ use acvm::acir::circuit::Circuit; use noirc_abi::{Abi, ContractEvent}; -use noirc_driver::ContractFunctionType; +use noirc_driver::{ContractFunction, ContractFunctionType}; use serde::{Deserialize, Serialize}; -/// `PreprocessedContract` represents a Noir contract which has been preprocessed by a particular backend proving system. -/// -/// This differs from a generic Noir contract artifact in that: -/// - The ACIR bytecode has had an optimization pass applied to tailor it for the backend. -/// - Proving and verification keys have been pregenerated based on this ACIR. #[derive(Serialize, Deserialize)] -pub struct PreprocessedContract { +pub struct ContractArtifact { /// Version of noir used to compile this contract pub noir_version: String, /// The name of the contract. pub name: String, /// Each of the contract's functions are compiled into a separate program stored in this `Vec`. - pub functions: Vec, + pub functions: Vec, /// All the events defined inside the contract scope. pub events: Vec, } @@ -25,7 +20,7 @@ pub struct PreprocessedContract { /// A contract function unlike a regular Noir program however can have additional properties. /// One of these being a function type. #[derive(Debug, Serialize, Deserialize)] -pub struct PreprocessedContractFunction { +pub struct ContractFunctionArtifact { pub name: String, pub function_type: ContractFunctionType, @@ -40,3 +35,15 @@ pub struct PreprocessedContractFunction { )] pub bytecode: Circuit, } + +impl From for ContractFunctionArtifact { + fn from(func: ContractFunction) -> Self { + ContractFunctionArtifact { + name: func.name, + function_type: func.function_type, + is_internal: func.is_internal, + abi: func.abi, + bytecode: func.bytecode, + } + } +} diff --git a/noir/tooling/nargo/src/artifacts/program.rs b/noir/tooling/nargo/src/artifacts/program.rs index 664db0adca42..96e63e6fe502 100644 --- a/noir/tooling/nargo/src/artifacts/program.rs +++ b/noir/tooling/nargo/src/artifacts/program.rs @@ -1,17 +1,13 @@ use acvm::acir::circuit::Circuit; use noirc_abi::Abi; +use noirc_driver::CompiledProgram; use serde::{Deserialize, Serialize}; -/// `PreprocessedProgram` represents a Noir program which has been preprocessed by a particular backend proving system. -/// -/// This differs from a generic Noir program artifact in that: -/// - The ACIR bytecode has had an optimization pass applied to tailor it for the backend. -/// - Proving and verification keys have been pregenerated based on this ACIR. #[derive(Serialize, Deserialize, Debug)] -pub struct PreprocessedProgram { +pub struct ProgramArtifact { pub noir_version: String, - /// Hash of the [`Program`][noirc_frontend::monomorphization::ast::Program] from which this [`PreprocessedProgram`] + /// Hash of the [`Program`][noirc_frontend::monomorphization::ast::Program] from which this [`ProgramArtifact`] /// was compiled. /// /// Used to short-circuit compilation in the case of the source code not changing since the last compilation. @@ -25,3 +21,14 @@ pub struct PreprocessedProgram { )] pub bytecode: Circuit, } + +impl From for ProgramArtifact { + fn from(program: CompiledProgram) -> Self { + ProgramArtifact { + hash: program.hash, + abi: program.abi, + noir_version: program.noir_version, + bytecode: program.circuit, + } + } +} diff --git a/noir/tooling/nargo/src/constants.rs b/noir/tooling/nargo/src/constants.rs index ff8da403c695..0b50d61fe376 100644 --- a/noir/tooling/nargo/src/constants.rs +++ b/noir/tooling/nargo/src/constants.rs @@ -7,6 +7,8 @@ pub const PROOFS_DIR: &str = "proofs"; pub const SRC_DIR: &str = "src"; /// The directory to store circuits' serialized ACIR representations. pub const TARGET_DIR: &str = "target"; +/// The directory to store serialized ACIR representations of exported library functions. +pub const EXPORT_DIR: &str = "export"; // Files /// The file from which Nargo pulls prover inputs diff --git a/noir/tooling/nargo/src/workspace.rs b/noir/tooling/nargo/src/workspace.rs index 65f9ab7e0d9a..5696a7585315 100644 --- a/noir/tooling/nargo/src/workspace.rs +++ b/noir/tooling/nargo/src/workspace.rs @@ -10,7 +10,7 @@ use std::{ }; use crate::{ - constants::{CONTRACT_DIR, PROOFS_DIR, TARGET_DIR}, + constants::{CONTRACT_DIR, EXPORT_DIR, PROOFS_DIR, TARGET_DIR}, package::Package, }; @@ -40,6 +40,10 @@ impl Workspace { pub fn target_directory_path(&self) -> PathBuf { self.root_dir.join(TARGET_DIR) } + + pub fn export_directory_path(&self) -> PathBuf { + self.root_dir.join(EXPORT_DIR) + } } pub enum IntoIter<'a, T> { diff --git a/noir/tooling/nargo_cli/Cargo.toml b/noir/tooling/nargo_cli/Cargo.toml index f280682e15cd..2652adaf3277 100644 --- a/noir/tooling/nargo_cli/Cargo.toml +++ b/noir/tooling/nargo_cli/Cargo.toml @@ -51,7 +51,6 @@ dap.workspace = true # Backends backend-interface = { path = "../backend_interface" } -bb_abstraction_leaks.workspace = true # Logs tracing-subscriber.workspace = true diff --git a/noir/tooling/nargo_cli/build.rs b/noir/tooling/nargo_cli/build.rs index 6f6d65ee89cc..9a0492c99adb 100644 --- a/noir/tooling/nargo_cli/build.rs +++ b/noir/tooling/nargo_cli/build.rs @@ -14,9 +14,6 @@ fn check_rustc_version() { const GIT_COMMIT: &&str = &"GIT_COMMIT"; fn main() { - // Rebuild if the tests have changed - println!("cargo:rerun-if-changed=tests"); - check_rustc_version(); // Only use build_data if the environment variable isn't set @@ -39,6 +36,10 @@ fn main() { }; let test_dir = root_dir.join("test_programs"); + // Rebuild if the tests have changed + println!("cargo:rerun-if-changed=tests"); + println!("cargo:rerun-if-changed={}", test_dir.as_os_str().to_str().unwrap()); + generate_execution_success_tests(&mut test_file, &test_dir); generate_noir_test_success_tests(&mut test_file, &test_dir); generate_noir_test_failure_tests(&mut test_file, &test_dir); diff --git a/noir/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs b/noir/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs index fe79c0b8c233..1eb8153ce9bb 100644 --- a/noir/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs +++ b/noir/tooling/nargo_cli/src/cli/codegen_verifier_cmd.rs @@ -7,7 +7,6 @@ use crate::backends::Backend; use crate::errors::CliError; use acvm::ExpressionWidth; -use bb_abstraction_leaks::ACVM_BACKEND_BARRETENBERG; use clap::Args; use fm::FileManager; use nargo::insert_all_files_for_workspace_into_file_manager; @@ -83,12 +82,5 @@ fn smart_contract_for_package( let program = compile_bin_package(file_manager, workspace, package, compile_options, expression_width)?; - let mut smart_contract_string = backend.eth_contract(&program.circuit)?; - - if backend.name() == ACVM_BACKEND_BARRETENBERG { - smart_contract_string = - bb_abstraction_leaks::complete_barretenberg_verifier_contract(smart_contract_string); - } - - Ok(smart_contract_string) + Ok(backend.eth_contract(&program.circuit)?) } diff --git a/noir/tooling/nargo_cli/src/cli/compile_cmd.rs b/noir/tooling/nargo_cli/src/cli/compile_cmd.rs index 661081778c3e..31105ebe68f2 100644 --- a/noir/tooling/nargo_cli/src/cli/compile_cmd.rs +++ b/noir/tooling/nargo_cli/src/cli/compile_cmd.rs @@ -3,10 +3,9 @@ use std::path::Path; use acvm::ExpressionWidth; use fm::FileManager; use iter_extended::vecmap; -use nargo::artifacts::contract::PreprocessedContract; -use nargo::artifacts::contract::PreprocessedContractFunction; +use nargo::artifacts::contract::{ContractArtifact, ContractFunctionArtifact}; use nargo::artifacts::debug::DebugArtifact; -use nargo::artifacts::program::PreprocessedProgram; +use nargo::artifacts::program::ProgramArtifact; use nargo::errors::CompileError; use nargo::insert_all_files_for_workspace_into_file_manager; use nargo::package::Package; @@ -173,15 +172,15 @@ fn compile_program( let program_artifact_path = workspace.package_build_path(package); let mut debug_artifact_path = program_artifact_path.clone(); debug_artifact_path.set_file_name(format!("debug_{}.json", package.name)); - let cached_program = if let (Ok(preprocessed_program), Ok(mut debug_artifact)) = ( + let cached_program = if let (Ok(program_artifact), Ok(mut debug_artifact)) = ( read_program_from_file(program_artifact_path), read_debug_artifact_from_file(debug_artifact_path), ) { Some(CompiledProgram { - hash: preprocessed_program.hash, - circuit: preprocessed_program.bytecode, - abi: preprocessed_program.abi, - noir_version: preprocessed_program.noir_version, + hash: program_artifact.hash, + circuit: program_artifact.bytecode, + abi: program_artifact.abi, + noir_version: program_artifact.noir_version, debug: debug_artifact.debug_symbols.remove(0), file_map: debug_artifact.file_map, warnings: debug_artifact.warnings, @@ -233,22 +232,17 @@ fn compile_contract( Ok((optimized_contract, warnings)) } -fn save_program( +pub(super) fn save_program( program: CompiledProgram, package: &Package, circuit_dir: &Path, only_acir_opt: bool, ) { - let preprocessed_program = PreprocessedProgram { - hash: program.hash, - abi: program.abi, - noir_version: program.noir_version, - bytecode: program.circuit, - }; + let program_artifact = ProgramArtifact::from(program.clone()); if only_acir_opt { - only_acir(&preprocessed_program, circuit_dir); + only_acir(&program_artifact, circuit_dir); } else { - save_program_to_file(&preprocessed_program, &package.name, circuit_dir); + save_program_to_file(&program_artifact, &package.name, circuit_dir); } let debug_artifact = DebugArtifact { @@ -263,7 +257,7 @@ fn save_program( fn save_contract(contract: CompiledContract, package: &Package, circuit_dir: &Path) { // TODO(#1389): I wonder if it is incorrect for nargo-core to know anything about contracts. // As can be seen here, It seems like a leaky abstraction where ContractFunctions (essentially CompiledPrograms) - // are compiled via nargo-core and then the PreprocessedContract is constructed here. + // are compiled via nargo-core and then the ContractArtifact is constructed here. // This is due to EACH function needing it's own CRS, PKey, and VKey from the backend. let debug_artifact = DebugArtifact { debug_symbols: contract.functions.iter().map(|function| function.debug.clone()).collect(), @@ -271,7 +265,7 @@ fn save_contract(contract: CompiledContract, package: &Package, circuit_dir: &Pa warnings: contract.warnings, }; - let preprocessed_functions = vecmap(contract.functions, |func| PreprocessedContractFunction { + let functions = vecmap(contract.functions, |func| ContractFunctionArtifact { name: func.name, function_type: func.function_type, is_internal: func.is_internal, @@ -279,22 +273,22 @@ fn save_contract(contract: CompiledContract, package: &Package, circuit_dir: &Pa bytecode: func.bytecode, }); - let preprocessed_contract = PreprocessedContract { + let contract_artifact = ContractArtifact { noir_version: contract.noir_version, name: contract.name, - functions: preprocessed_functions, + functions, events: contract.events, }; save_contract_to_file( - &preprocessed_contract, - &format!("{}-{}", package.name, preprocessed_contract.name), + &contract_artifact, + &format!("{}-{}", package.name, contract_artifact.name), circuit_dir, ); save_debug_artifact_to_file( &debug_artifact, - &format!("{}-{}", package.name, preprocessed_contract.name), + &format!("{}-{}", package.name, contract_artifact.name), circuit_dir, ); } diff --git a/noir/tooling/nargo_cli/src/cli/export_cmd.rs b/noir/tooling/nargo_cli/src/cli/export_cmd.rs new file mode 100644 index 000000000000..ac3e93e09b7f --- /dev/null +++ b/noir/tooling/nargo_cli/src/cli/export_cmd.rs @@ -0,0 +1,120 @@ +use nargo::errors::CompileError; +use noirc_errors::FileDiagnostic; +use rayon::prelude::*; + +use fm::FileManager; +use iter_extended::try_vecmap; +use nargo::insert_all_files_for_workspace_into_file_manager; +use nargo::package::Package; +use nargo::prepare_package; +use nargo::workspace::Workspace; +use nargo_toml::{get_package_manifest, resolve_workspace_from_toml, PackageSelection}; +use noirc_driver::{ + compile_no_check, file_manager_with_stdlib, CompileOptions, CompiledProgram, + NOIR_ARTIFACT_VERSION_STRING, +}; + +use noirc_frontend::graph::CrateName; + +use clap::Args; + +use crate::backends::Backend; +use crate::errors::CliError; + +use super::check_cmd::check_crate_and_report_errors; + +use super::compile_cmd::report_errors; +use super::fs::program::save_program_to_file; +use super::NargoConfig; + +/// Exports functions marked with #[export] attribute +#[derive(Debug, Clone, Args)] +pub(crate) struct ExportCommand { + /// The name of the package to compile + #[clap(long, conflicts_with = "workspace")] + package: Option, + + /// Compile all packages in the workspace + #[clap(long, conflicts_with = "package")] + workspace: bool, + + #[clap(flatten)] + compile_options: CompileOptions, +} + +pub(crate) fn run( + _backend: &Backend, + args: ExportCommand, + config: NargoConfig, +) -> Result<(), CliError> { + let toml_path = get_package_manifest(&config.program_dir)?; + let default_selection = + if args.workspace { PackageSelection::All } else { PackageSelection::DefaultOrAll }; + let selection = args.package.map_or(default_selection, PackageSelection::Selected); + + let workspace = resolve_workspace_from_toml( + &toml_path, + selection, + Some(NOIR_ARTIFACT_VERSION_STRING.to_owned()), + )?; + + let mut workspace_file_manager = file_manager_with_stdlib(&workspace.root_dir); + insert_all_files_for_workspace_into_file_manager(&workspace, &mut workspace_file_manager); + + let library_packages: Vec<_> = + workspace.into_iter().filter(|package| package.is_library()).collect(); + + library_packages + .par_iter() + .map(|package| { + compile_exported_functions( + &workspace_file_manager, + &workspace, + package, + &args.compile_options, + ) + }) + .collect() +} + +fn compile_exported_functions( + file_manager: &FileManager, + workspace: &Workspace, + package: &Package, + compile_options: &CompileOptions, +) -> Result<(), CliError> { + let (mut context, crate_id) = prepare_package(file_manager, package); + check_crate_and_report_errors( + &mut context, + crate_id, + compile_options.deny_warnings, + compile_options.disable_macros, + compile_options.silence_warnings, + )?; + + let exported_functions = context.get_all_exported_functions_in_crate(&crate_id); + + let exported_programs = try_vecmap( + exported_functions, + |(function_name, function_id)| -> Result<(String, CompiledProgram), CompileError> { + // TODO: We should to refactor how to deal with compilation errors to avoid this. + let program = compile_no_check(&context, compile_options, function_id, None, false) + .map_err(|error| vec![FileDiagnostic::from(error)]); + + let program = report_errors( + program.map(|program| (program, Vec::new())), + file_manager, + compile_options.deny_warnings, + compile_options.silence_warnings, + )?; + + Ok((function_name, program)) + }, + )?; + + let export_dir = workspace.export_directory_path(); + for (function_name, program) in exported_programs { + save_program_to_file(&program.into(), &function_name.parse().unwrap(), &export_dir); + } + Ok(()) +} diff --git a/noir/tooling/nargo_cli/src/cli/fs/program.rs b/noir/tooling/nargo_cli/src/cli/fs/program.rs index 807df25ba48f..1d2f012736ee 100644 --- a/noir/tooling/nargo_cli/src/cli/fs/program.rs +++ b/noir/tooling/nargo_cli/src/cli/fs/program.rs @@ -2,7 +2,7 @@ use std::path::{Path, PathBuf}; use acvm::acir::circuit::Circuit; use nargo::artifacts::{ - contract::PreprocessedContract, debug::DebugArtifact, program::PreprocessedProgram, + contract::ContractArtifact, debug::DebugArtifact, program::ProgramArtifact, }; use noirc_frontend::graph::CrateName; @@ -11,29 +11,29 @@ use crate::errors::FilesystemError; use super::{create_named_dir, write_to_file}; pub(crate) fn save_program_to_file>( - compiled_program: &PreprocessedProgram, + program_artifact: &ProgramArtifact, crate_name: &CrateName, circuit_dir: P, ) -> PathBuf { let circuit_name: String = crate_name.into(); - save_build_artifact_to_file(compiled_program, &circuit_name, circuit_dir) + save_build_artifact_to_file(program_artifact, &circuit_name, circuit_dir) } /// Writes the bytecode as acir.gz pub(crate) fn only_acir>( - compiled_program: &PreprocessedProgram, + program_artifact: &ProgramArtifact, circuit_dir: P, ) -> PathBuf { create_named_dir(circuit_dir.as_ref(), "target"); let circuit_path = circuit_dir.as_ref().join("acir").with_extension("gz"); - let bytes = Circuit::serialize_circuit(&compiled_program.bytecode); + let bytes = Circuit::serialize_circuit(&program_artifact.bytecode); write_to_file(&bytes, &circuit_path); circuit_path } pub(crate) fn save_contract_to_file>( - compiled_contract: &PreprocessedContract, + compiled_contract: &ContractArtifact, circuit_name: &str, circuit_dir: P, ) -> PathBuf { @@ -64,7 +64,7 @@ fn save_build_artifact_to_file, T: ?Sized + serde::Serialize>( pub(crate) fn read_program_from_file>( circuit_path: P, -) -> Result { +) -> Result { let file_path = circuit_path.as_ref().with_extension("json"); let input_string = diff --git a/noir/tooling/nargo_cli/src/cli/mod.rs b/noir/tooling/nargo_cli/src/cli/mod.rs index cbed65593a10..01adbe9da980 100644 --- a/noir/tooling/nargo_cli/src/cli/mod.rs +++ b/noir/tooling/nargo_cli/src/cli/mod.rs @@ -17,6 +17,7 @@ mod compile_cmd; mod dap_cmd; mod debug_cmd; mod execute_cmd; +mod export_cmd; mod fmt_cmd; mod info_cmd; mod init_cmd; @@ -69,6 +70,8 @@ enum NargoCommand { Init(init_cmd::InitCommand), Execute(execute_cmd::ExecuteCommand), #[command(hide = true)] // Hidden while the feature is being built out + Export(export_cmd::ExportCommand), + #[command(hide = true)] // Hidden while the feature is being built out Debug(debug_cmd::DebugCommand), Prove(prove_cmd::ProveCommand), Verify(verify_cmd::VerifyCommand), @@ -109,6 +112,7 @@ pub(crate) fn start_cli() -> eyre::Result<()> { NargoCommand::Compile(args) => compile_cmd::run(&backend, args, config), NargoCommand::Debug(args) => debug_cmd::run(&backend, args, config), NargoCommand::Execute(args) => execute_cmd::run(&backend, args, config), + NargoCommand::Export(args) => export_cmd::run(&backend, args, config), NargoCommand::Prove(args) => prove_cmd::run(&backend, args, config), NargoCommand::Verify(args) => verify_cmd::run(&backend, args, config), NargoCommand::Test(args) => test_cmd::run(&backend, args, config), diff --git a/noir/tooling/nargo_cli/src/cli/test_cmd.rs b/noir/tooling/nargo_cli/src/cli/test_cmd.rs index 32893baa1577..69f03b49cbde 100644 --- a/noir/tooling/nargo_cli/src/cli/test_cmd.rs +++ b/noir/tooling/nargo_cli/src/cli/test_cmd.rs @@ -144,7 +144,7 @@ fn run_tests( for (test_name, test_function) in test_functions { write!(writer, "[{}] Testing {test_name}... ", package.name) - .expect("Failed to write to stdout"); + .expect("Failed to write to stderr"); writer.flush().expect("Failed to flush writer"); match run_test( @@ -159,13 +159,13 @@ fn run_tests( writer .set_color(ColorSpec::new().set_fg(Some(Color::Green))) .expect("Failed to set color"); - writeln!(writer, "ok").expect("Failed to write to stdout"); + writeln!(writer, "ok").expect("Failed to write to stderr"); } TestStatus::Fail { message, error_diagnostic } => { writer .set_color(ColorSpec::new().set_fg(Some(Color::Red))) .expect("Failed to set color"); - writeln!(writer, "{message}\n").expect("Failed to write to stdout"); + writeln!(writer, "FAIL\n{message}\n").expect("Failed to write to stderr"); if let Some(diag) = error_diagnostic { noirc_errors::reporter::report_all( context.file_manager.as_file_map(), @@ -189,12 +189,13 @@ fn run_tests( writer.reset().expect("Failed to reset writer"); } - write!(writer, "[{}] ", package.name).expect("Failed to write to stdout"); + write!(writer, "[{}] ", package.name).expect("Failed to write to stderr"); if count_failed == 0 { writer.set_color(ColorSpec::new().set_fg(Some(Color::Green))).expect("Failed to set color"); - writeln!(writer, "{count_all} test{plural} passed").expect("Failed to write to stdout"); + write!(writer, "{count_all} test{plural} passed").expect("Failed to write to stderr"); writer.reset().expect("Failed to reset writer"); + writeln!(writer).expect("Failed to write to stderr"); Ok(()) } else { @@ -207,13 +208,15 @@ fn run_tests( .set_color(ColorSpec::new().set_fg(Some(Color::Green))) .expect("Failed to set color"); write!(writer, "{count_passed} test{plural_passed} passed, ",) - .expect("Failed to write to stdout"); + .expect("Failed to write to stderr"); } + writer.set_color(ColorSpec::new().set_fg(Some(Color::Red))).expect("Failed to set color"); - writeln!(writer, "{count_failed} test{plural_failed} failed") - .expect("Failed to write to stdout"); + write!(writer, "{count_failed} test{plural_failed} failed") + .expect("Failed to write to stderr"); writer.reset().expect("Failed to reset writer"); + // Writes final newline. Err(CliError::Generic(String::new())) } } diff --git a/noir/tooling/noir_codegen/.gitignore b/noir/tooling/noir_codegen/.gitignore index 15ea344d4535..29b0e40ffa84 100644 --- a/noir/tooling/noir_codegen/.gitignore +++ b/noir/tooling/noir_codegen/.gitignore @@ -1,5 +1,5 @@ crs lib -!test/*/target test/codegen +test/test_lib/export diff --git a/noir/tooling/noir_codegen/package.json b/noir/tooling/noir_codegen/package.json index 97bab8157647..7d76b1a91381 100644 --- a/noir/tooling/noir_codegen/package.json +++ b/noir/tooling/noir_codegen/package.json @@ -35,9 +35,9 @@ "dev": "tsc-multi --watch", "build": "tsc", "test": "yarn test:codegen && yarn test:node && yarn test:clean", - "test:codegen": "tsx src/main.ts ./test/assert_lt/target/** --out-dir ./test/codegen", + "test:codegen": "nargo export --program-dir=./test/test_lib && tsx src/main.ts ./test/test_lib/export/** --out-dir ./test/codegen", "test:node": "mocha --timeout 25000 --exit --config ./.mocharc.json", - "test:clean": "rm -rf ./test/codegen", + "test:clean": "rm -rf ./test/codegen ./test/test_lib/export", "prettier": "prettier 'src/**/*.ts'", "prettier:fix": "prettier --write 'src/**/*.ts' 'test/**/*.ts'", "lint": "NODE_NO_WARNINGS=1 eslint . --ext .ts --ignore-path ./.eslintignore --max-warnings 0", diff --git a/noir/tooling/noir_codegen/src/index.ts b/noir/tooling/noir_codegen/src/index.ts index 19829cd06ff0..fbbab07bcfee 100644 --- a/noir/tooling/noir_codegen/src/index.ts +++ b/noir/tooling/noir_codegen/src/index.ts @@ -46,14 +46,14 @@ export const codegen = (programs: [string, CompiledCircuit][]): string => { functions.push(codegenFunction(name, stripUnwantedFields(program), function_sig)); } + const structTypeDefinitions: string = codegenStructDefinitions(structTypeMap, primitiveTypeMap); + // Add the primitive Noir types that do not have a 1-1 mapping to TypeScript. const primitiveTypeAliases: string[] = []; for (const value of primitiveTypeMap.values()) { primitiveTypeAliases.push(`export type ${value.aliasName} = ${value.tsType};`); } - const structTypeDefinitions: string = codegenStructDefinitions(structTypeMap, primitiveTypeMap); - results = results.concat(...primitiveTypeAliases, '', structTypeDefinitions, ...functions); return results.join('\n'); diff --git a/noir/tooling/noir_codegen/test/assert_lt/target/assert_lt.json b/noir/tooling/noir_codegen/test/assert_lt/target/assert_lt.json deleted file mode 100644 index be1b134d6424..000000000000 --- a/noir/tooling/noir_codegen/test/assert_lt/target/assert_lt.json +++ /dev/null @@ -1 +0,0 @@ -{"noir_version":"0.22.0+6f69b3f511c8b4c51404ad4c18131bdf6b7f6a94","hash":3763979860977920209,"abi":{"parameters":[{"name":"x","type":{"kind":"integer","sign":"unsigned","width":64},"visibility":"private"},{"name":"y","type":{"kind":"integer","sign":"unsigned","width":64},"visibility":"public"},{"name":"array","type":{"kind":"array","length":5,"type":{"kind":"integer","sign":"unsigned","width":8}},"visibility":"private"},{"name":"my_struct","type":{"kind":"struct","path":"NestedStruct","fields":[{"name":"foo","type":{"kind":"struct","path":"MyStruct","fields":[{"name":"foo","type":{"kind":"boolean"}},{"name":"bar","type":{"kind":"array","length":3,"type":{"kind":"string","length":5}}}]}},{"name":"bar","type":{"kind":"array","length":3,"type":{"kind":"struct","path":"MyStruct","fields":[{"name":"foo","type":{"kind":"boolean"}},{"name":"bar","type":{"kind":"array","length":3,"type":{"kind":"string","length":5}}}]}}},{"name":"baz","type":{"kind":"integer","sign":"unsigned","width":64}}]},"visibility":"private"},{"name":"string","type":{"kind":"string","length":5},"visibility":"private"}],"param_witnesses":{"array":[{"start":3,"end":8}],"my_struct":[{"start":8,"end":73}],"string":[{"start":73,"end":78}],"x":[{"start":1,"end":2}],"y":[{"start":2,"end":3}]},"return_type":{"abi_type":{"kind":"tuple","fields":[{"kind":"integer","sign":"unsigned","width":64},{"kind":"integer","sign":"unsigned","width":64},{"kind":"struct","path":"MyStruct","fields":[{"name":"foo","type":{"kind":"boolean"}},{"name":"bar","type":{"kind":"array","length":3,"type":{"kind":"string","length":5}}}]}]},"visibility":"public"},"return_witnesses":[98,99,100,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]},"bytecode":"H4sIAAAAAAAA/+1cC3PTRhBex8SJ40cezsNxaOtSoGnpQ7Jsy+6LkFJaSFIoKVCgQGzsTJlpCWU85Zf1/9Ur6drz9pAF3jtZM9FMRvliZff7bqXV6bTeYwD4E/wtNfyZCfY7Ep4hOD38mZfwGYJnCc4QPBdg4RMC+7hZk232vORLtutYzXq979b6tmN3rFq722pY9Ua32bJbdqPV6NVajtNv1Vtuu9t2rbZdd/r2caPtHAeG5/k4WmIcsmRcFgjOEZwnuEBwkeBFgpcIXiZ4heASwasErxG8TvCGxjiXhzbKGuJcBv44b5JxqRC8RfBZgt8h+F2C3yO4SvD7BJ8j+AOCzxN8geCLBH+oMc7bQxvbGuK8Dfxx/oiMy8cEXyL4E4I/Jfgzgj8n2CLYJrhGsENwneAGwU2CXY1xbg1ttDTEuQX8cW6TcfmC4C8J/orgrwn+huDLBO8QfIXgXYK/Jfgqwd8RfI3g7wn+AUbv/9fJ5zcI3iN4n+AD+O88EpuOecAO0zkk85whPCe1fZ3R1o9s4+fYaSkmOuPEx9m/PsV2E/x5qRwz3NLBPqNBCxA/dPyKir+xOtcRnJsa7N4C3otTh+5b/DHSmkhuMNr6iU2zUzOVSPg4jyaS23CaSFiCc1uD3UOY7kSCug/5Y6Q1kewx2vqZTbPjmEokfJxHE8kdOE0kLMG5o8HuXZjuRIK67/LHSAtXnDkdAv+j/D1IRgLdZ7T1C5tmp24qgfJxHk2g9+E0gbIE574Guw9guhMo6n7AHyMtXDHR3wP+BPoQkpFADxht/cqm2WmYSqB8nEcT6CM4TaAswXmkwe5jmO4Eirof88dIC1dM9A+BP4E+mXLdGJ8nihhNqttU4ksB/w3q72CPxRYzhH9GMVbWhJuGJGrJ3MV+VuJfDH5Pa9CSYbVZa6CWM6A+j2gs5BhlAs2M+my0Ocdr0xLFTnw23S7ayEpjIsZPcM9Jn89L45VlHq+U5FPYFTirzW8N6xi84qgw/QsKHgsG9S9o89vsCo1h+nMKHjmD+nPa/Dax3sIrfgvTn1fwyBvUn9fmt9lHG4Ux+gsKHgWD+gva/DaxTsm7x4XpLyp4FA3ql/lF5ZqNmSu/X6eDNhbH6F9U8Fg0qF/mF5VrNmau/H5drPfzioLD9C8peCwZ1C/zi8o1HzNXfr8u1m56Bdth+pcVPJYN6pf5ReWai5krv98m1sl6xfRh+lcUPFYM6pf5ReWaj5krv9+mlwNLY/SXFDxKBvXL/KJyLSSIay5BXPMxc+X363rz69Ux+lcVPFYN6pf5ReW6kiCuywniGve48vt1vTWmtTH61xQ81gzql/lF5ZqNmSu/39pTtLE+Rv+6gse6Qf0yv6hcszFz5ffrenPhjTH6NxQ8Ngzql/lF5ZpLENdCgriWYubK77eJ30H0vqwcpr+s4FE2qF/mF5VrNkFcizFz5ffr4PdzvS+Xh+nfVPDYNKh/U5tfx3tuqYzRX1HwqBjUX9Hm1+2hja0x+rcUPLYM6he+8L20eH//4uWz5wNKGre09LsodEhJx1xQHDsHGivI0vD/qhGZHJcfufJl0mqdmbfnZdM/KKSz2ZbH9SjY4+ujWSnw4hg8AXal41PSPiXFZBdGTxx6TOo1duTqCvH/iStNPAoGkrvs7YiP4789FDqSTbExly9pu6gmtdVnHM+w8XtDnhbhaR8xau4wxnWC8QtNcNN83YTF2Zps4yy7tDmvky7w5x3Zpq64P+Xj7ejk2WOM+77EEc9PvGGLElXR7w1vslieJsq1cFkCb7JYWoCvrHEJHJeWcQkAl1hx6Q6XhPCRGB9fcAqP01icUmKPLOyLhb2wsP9VFfw+V+fA72d1HvzZ4kXw+1Nhvyfsz4Q9mS6B33sJ+y1hjyXsq4QXGN6UcR0YBx2fLfDdID5j4RoWPsNjHRr2+sH+PtjTB/v4YO+ey8E4XgF/coH9d66C32fnGvj9dLCHDvZ6wTYNe+D3xDmA0cmLWFafZJzw/Mbzrwejk5fnwf5ssO8MBv0/Xgyqg5Nqp9ervno2+K168lf/5fHvJ6/+ASeieLQUUAAA"} \ No newline at end of file diff --git a/noir/tooling/noir_codegen/test/index.test.ts b/noir/tooling/noir_codegen/test/index.test.ts index 822993b2f1e4..03fb680a537d 100644 --- a/noir/tooling/noir_codegen/test/index.test.ts +++ b/noir/tooling/noir_codegen/test/index.test.ts @@ -1,10 +1,12 @@ import { expect } from 'chai'; -import { assert_lt, MyStruct, u64, ForeignCallHandler } from './codegen/index.js'; +// eslint-disable-next-line @typescript-eslint/ban-ts-comment +// @ts-ignore File is codegenned at test time. +import { exported_function_foo, MyStruct, u64, ForeignCallHandler } from './codegen/index.js'; it('codegens a callable function', async () => { - const my_struct = { foo: true, bar: ['12345', '12345', '12345'] }; + const my_struct = { foo: true, bar: ['12345', '12345', '12345'], baz: '0x00' }; - const [sum, constant, struct]: [u64, u64, MyStruct] = await assert_lt( + const [sum, constant, struct]: [u64, u64, MyStruct] = await exported_function_foo( '2', '3', [0, 0, 0, 0, 0], @@ -18,7 +20,7 @@ it('codegens a callable function', async () => { expect(sum).to.be.eq('0x05'); expect(constant).to.be.eq('0x03'); - expect(struct).to.be.deep.eq({ foo: true, bar: ['12345', '12345', '12345'] }); + expect(struct).to.be.deep.eq(my_struct); }); it('allows passing a custom foreign call handler', async () => { @@ -33,9 +35,9 @@ it('allows passing a custom foreign call handler', async () => { return []; }; - const my_struct = { foo: true, bar: ['12345', '12345', '12345'] }; + const my_struct = { foo: true, bar: ['12345', '12345', '12345'], baz: '0x00' }; - const [sum, constant, struct]: [u64, u64, MyStruct] = await assert_lt( + const [sum, constant, struct]: [u64, u64, MyStruct] = await exported_function_foo( '2', '3', [0, 0, 0, 0, 0], @@ -100,5 +102,5 @@ it('allows passing a custom foreign call handler', async () => { expect(sum).to.be.eq('0x05'); expect(constant).to.be.eq('0x03'); - expect(struct).to.be.deep.eq({ foo: true, bar: ['12345', '12345', '12345'] }); + expect(struct).to.be.deep.eq(my_struct); }); diff --git a/noir/tooling/noir_codegen/test/assert_lt/Nargo.toml b/noir/tooling/noir_codegen/test/test_lib/Nargo.toml similarity index 55% rename from noir/tooling/noir_codegen/test/assert_lt/Nargo.toml rename to noir/tooling/noir_codegen/test/test_lib/Nargo.toml index f32ec18cae78..74b6167b614a 100644 --- a/noir/tooling/noir_codegen/test/assert_lt/Nargo.toml +++ b/noir/tooling/noir_codegen/test/test_lib/Nargo.toml @@ -1,5 +1,5 @@ [package] -name = "assert_lt" -type = "bin" +name = "test_lib" +type = "lib" authors = [""] [dependencies] diff --git a/noir/tooling/noir_codegen/test/assert_lt/src/main.nr b/noir/tooling/noir_codegen/test/test_lib/src/lib.nr similarity index 55% rename from noir/tooling/noir_codegen/test/assert_lt/src/main.nr rename to noir/tooling/noir_codegen/test/test_lib/src/lib.nr index ed370bd87c03..23607c6f65f1 100644 --- a/noir/tooling/noir_codegen/test/assert_lt/src/main.nr +++ b/noir/tooling/noir_codegen/test/test_lib/src/lib.nr @@ -1,6 +1,7 @@ struct MyStruct { foo: bool, bar: [str<5>; 3], + baz: Field } struct NestedStruct { @@ -9,13 +10,8 @@ struct NestedStruct { baz: u64 } -fn main( - x: u64, - y: pub u64, - array: [u8; 5], - my_struct: NestedStruct, - string: str<5> -) -> pub (u64, u64, MyStruct) { +#[export] +fn exported_function_foo(x: u64, y: u64, array: [u8; 5], my_struct: NestedStruct, string: str<5>) -> (u64, u64, MyStruct) { assert(array.len() == 5); assert(my_struct.foo.foo); assert(string == "12345"); @@ -24,3 +20,8 @@ fn main( assert(x < y); (x + y, 3, my_struct.foo) } + +#[export] +fn exported_function_bar(my_struct: NestedStruct) -> (u64) { + my_struct.baz +} diff --git a/noir/tooling/noir_js_backend_barretenberg/package.json b/noir/tooling/noir_js_backend_barretenberg/package.json index 93fdc8563381..1688db1ab2d6 100644 --- a/noir/tooling/noir_js_backend_barretenberg/package.json +++ b/noir/tooling/noir_js_backend_barretenberg/package.json @@ -42,7 +42,7 @@ "lint": "NODE_NO_WARNINGS=1 eslint . --ext .ts --ignore-path ./.eslintignore --max-warnings 0" }, "dependencies": { - "@aztec/bb.js": "0.16.0", + "@aztec/bb.js": "0.17.0", "@noir-lang/types": "workspace:*", "fflate": "^0.8.0" }, diff --git a/noir/tooling/noir_js_backend_barretenberg/src/index.ts b/noir/tooling/noir_js_backend_barretenberg/src/index.ts index 100418debd01..6e619fd59cf3 100644 --- a/noir/tooling/noir_js_backend_barretenberg/src/index.ts +++ b/noir/tooling/noir_js_backend_barretenberg/src/index.ts @@ -5,8 +5,7 @@ import { Backend, CompiledCircuit, ProofData } from '@noir-lang/types'; import { BackendOptions } from './types.js'; import { deflattenPublicInputs, flattenPublicInputsAsArray } from './public_inputs.js'; -export { flattenPublicInputs } from './public_inputs.js'; - +export { publicInputsToWitnessMap } from './public_inputs.js'; // This is the number of bytes in a UltraPlonk proof // minus the public inputs. const numBytesInProofWithoutPublicInputs: number = 2144; @@ -34,7 +33,7 @@ export class BarretenbergBackend implements Backend { // eslint-disable-next-line @typescript-eslint/ban-ts-comment //@ts-ignore const { Barretenberg, RawBuffer, Crs } = await import('@aztec/bb.js'); - const api = await Barretenberg.new(this.options.threads); + const api = await Barretenberg.new({ threads: this.options.threads }); const [_exact, _total, subgroupSize] = await api.acirGetCircuitSizes(this.acirUncompressedBytecode); const crs = await Crs.new(subgroupSize + 1); @@ -95,7 +94,7 @@ export class BarretenbergBackend implements Backend { const publicInputsConcatenated = proofWithPublicInputs.slice(0, splitIndex); const proof = proofWithPublicInputs.slice(splitIndex); - const publicInputs = deflattenPublicInputs(publicInputsConcatenated, this.acirCircuit.abi); + const publicInputs = deflattenPublicInputs(publicInputsConcatenated); return { proof, publicInputs }; } @@ -127,7 +126,9 @@ export class BarretenbergBackend implements Backend { }> { await this.instantiate(); const proof = reconstructProofWithPublicInputs(proofData); - const proofAsFields = await this.api.acirSerializeProofIntoFields(this.acirComposer, proof, numOfPublicInputs); + const proofAsFields = ( + await this.api.acirSerializeProofIntoFields(this.acirComposer, proof, numOfPublicInputs) + ).slice(numOfPublicInputs); // TODO: perhaps we should put this in the init function. Need to benchmark // TODO how long it takes. diff --git a/noir/tooling/noir_js_backend_barretenberg/src/public_inputs.ts b/noir/tooling/noir_js_backend_barretenberg/src/public_inputs.ts index 37bc5b130122..75ee0de68007 100644 --- a/noir/tooling/noir_js_backend_barretenberg/src/public_inputs.ts +++ b/noir/tooling/noir_js_backend_barretenberg/src/public_inputs.ts @@ -1,18 +1,11 @@ import { Abi, WitnessMap } from '@noir-lang/types'; -export function flattenPublicInputs(publicInputs: WitnessMap): string[] { - const publicInputIndices = [...publicInputs.keys()].sort((a, b) => a - b); - const flattenedPublicInputs = publicInputIndices.map((index) => publicInputs.get(index) as string); - return flattenedPublicInputs; -} - -export function flattenPublicInputsAsArray(publicInputs: WitnessMap): Uint8Array { - const flatPublicInputs = flattenPublicInputs(publicInputs); - const flattenedPublicInputs = flatPublicInputs.map(hexToUint8Array); +export function flattenPublicInputsAsArray(publicInputs: string[]): Uint8Array { + const flattenedPublicInputs = publicInputs.map(hexToUint8Array); return flattenUint8Arrays(flattenedPublicInputs); } -export function deflattenPublicInputs(flattenedPublicInputs: Uint8Array, abi: Abi): WitnessMap { +export function deflattenPublicInputs(flattenedPublicInputs: Uint8Array): string[] { const publicInputSize = 32; const chunkedFlattenedPublicInputs: Uint8Array[] = []; @@ -21,6 +14,16 @@ export function deflattenPublicInputs(flattenedPublicInputs: Uint8Array, abi: Ab chunkedFlattenedPublicInputs.push(publicInput); } + return chunkedFlattenedPublicInputs.map(uint8ArrayToHex); +} + +export function witnessMapToPublicInputs(publicInputs: WitnessMap): string[] { + const publicInputIndices = [...publicInputs.keys()].sort((a, b) => a - b); + const flattenedPublicInputs = publicInputIndices.map((index) => publicInputs.get(index) as string); + return flattenedPublicInputs; +} + +export function publicInputsToWitnessMap(publicInputs: string[], abi: Abi): WitnessMap { const return_value_witnesses = abi.return_witnesses; const public_parameters = abi.parameters.filter((param) => param.visibility === 'public'); const public_parameter_witnesses: number[] = public_parameters.flatMap((param) => @@ -35,13 +38,13 @@ export function deflattenPublicInputs(flattenedPublicInputs: Uint8Array, abi: Ab (a, b) => a - b, ); - const publicInputs: WitnessMap = new Map(); + const witnessMap: WitnessMap = new Map(); public_input_witnesses.forEach((witness_index, index) => { - const witness_value = uint8ArrayToHex(chunkedFlattenedPublicInputs[index]); - publicInputs.set(witness_index, witness_value); + const witness_value = publicInputs[index]; + witnessMap.set(witness_index, witness_value); }); - return publicInputs; + return witnessMap; } function flattenUint8Arrays(arrays: Uint8Array[]): Uint8Array { diff --git a/noir/tooling/noir_js_backend_barretenberg/test/public_input_deflattening.test.ts b/noir/tooling/noir_js_backend_barretenberg/test/public_input_deflattening.test.ts index dab1c56436a5..079a1ad268b8 100644 --- a/noir/tooling/noir_js_backend_barretenberg/test/public_input_deflattening.test.ts +++ b/noir/tooling/noir_js_backend_barretenberg/test/public_input_deflattening.test.ts @@ -1,6 +1,6 @@ import { Abi } from '@noir-lang/types'; import { expect } from 'chai'; -import { flattenPublicInputsAsArray, deflattenPublicInputs, flattenPublicInputs } from '../src/public_inputs.js'; +import { witnessMapToPublicInputs, publicInputsToWitnessMap } from '../src/public_inputs.js'; const abi: Abi = { parameters: [ @@ -69,7 +69,7 @@ it('flattens a witness map in order of its witness indices', async () => { ]), ); - const flattened_public_inputs = flattenPublicInputs(witness_map); + const flattened_public_inputs = witnessMapToPublicInputs(witness_map); expect(flattened_public_inputs).to.be.deep.eq([ '0x0000000000000000000000000000000000000000000000000000000000000002', '0x000000000000000000000000000000000000000000000000000000000000000b', @@ -89,8 +89,8 @@ it('recovers the original witness map when deflattening a public input array', a ]), ); - const flattened_public_inputs = flattenPublicInputsAsArray(witness_map); - const deflattened_public_inputs = deflattenPublicInputs(flattened_public_inputs, abi); + const flattened_public_inputs = witnessMapToPublicInputs(witness_map); + const deflattened_public_inputs = publicInputsToWitnessMap(flattened_public_inputs, abi); expect(deflattened_public_inputs).to.be.deep.eq(witness_map); }); diff --git a/noir/tooling/noir_js_types/src/types.ts b/noir/tooling/noir_js_types/src/types.ts index b997d92425d3..ee4921bd6062 100644 --- a/noir/tooling/noir_js_types/src/types.ts +++ b/noir/tooling/noir_js_types/src/types.ts @@ -1,4 +1,4 @@ -import { Abi, WitnessMap } from '@noir-lang/noirc_abi'; +import { Abi } from '@noir-lang/noirc_abi'; export { Abi, WitnessMap } from '@noir-lang/noirc_abi'; @@ -45,7 +45,7 @@ export interface Backend { * */ export type ProofData = { /** @description Public inputs of a proof */ - publicInputs: WitnessMap; + publicInputs: string[]; /** @description An byte array representing the proof */ proof: Uint8Array; }; diff --git a/noir/tooling/noirc_abi/src/lib.rs b/noir/tooling/noirc_abi/src/lib.rs index 884c49c01066..066b1635ced6 100644 --- a/noir/tooling/noirc_abi/src/lib.rs +++ b/noir/tooling/noirc_abi/src/lib.rs @@ -157,11 +157,7 @@ impl AbiType { .expect("Cannot have variable sized strings as a parameter to main"); Self::String { length: size } } - Type::FmtString(_, _) => unreachable!("format strings cannot be used in the abi"), - Type::Error => unreachable!(), - Type::Unit => unreachable!(), - Type::Constant(_) => unreachable!(), - Type::TraitAsType(..) => unreachable!(), + Type::Struct(def, ref args) => { let struct_type = def.borrow(); let fields = struct_type.get_fields(args); @@ -175,12 +171,17 @@ impl AbiType { let fields = vecmap(fields, |typ| Self::from_type(context, typ)); Self::Tuple { fields } } - Type::TypeVariable(_, _) => unreachable!(), - Type::NamedGeneric(..) => unreachable!(), - Type::Forall(..) => unreachable!(), - Type::Function(_, _, _) => unreachable!(), + Type::Error + | Type::Unit + | Type::Constant(_) + | Type::TraitAsType(..) + | Type::TypeVariable(_, _) + | Type::NamedGeneric(..) + | Type::Forall(..) + | Type::NotConstant + | Type::Function(_, _, _) => unreachable!("Type cannot be used in the abi"), + Type::FmtString(_, _) => unreachable!("format strings cannot be used in the abi"), Type::MutableReference(_) => unreachable!("&mut cannot be used in the abi"), - Type::NotConstant => unreachable!(), } } diff --git a/noir/yarn.lock b/noir/yarn.lock index 65b977864b4d..01860e559218 100644 --- a/noir/yarn.lock +++ b/noir/yarn.lock @@ -235,6 +235,20 @@ __metadata: languageName: node linkType: hard +"@aztec/bb.js@npm:0.17.0": + version: 0.17.0 + resolution: "@aztec/bb.js@npm:0.17.0" + dependencies: + comlink: ^4.4.1 + commander: ^10.0.1 + debug: ^4.3.4 + tslib: ^2.4.0 + bin: + bb.js: dest/node/main.js + checksum: 459838076e4db7e6ca17f95acbd5c83028ea3b5c21e016a1561cec065a95d20e8d65906ad63e44e071556bc331070ceda6d48c058f5accdb76b3c6daab8ef1a5 + languageName: node + linkType: hard + "@babel/code-frame@npm:^7.0.0, @babel/code-frame@npm:^7.10.4, @babel/code-frame@npm:^7.12.11, @babel/code-frame@npm:^7.16.0, @babel/code-frame@npm:^7.22.13, @babel/code-frame@npm:^7.23.5, @babel/code-frame@npm:^7.8.3": version: 7.23.5 resolution: "@babel/code-frame@npm:7.23.5" @@ -3958,7 +3972,7 @@ __metadata: version: 0.0.0-use.local resolution: "@noir-lang/backend_barretenberg@workspace:tooling/noir_js_backend_barretenberg" dependencies: - "@aztec/bb.js": 0.16.0 + "@aztec/bb.js": 0.17.0 "@noir-lang/types": "workspace:*" "@types/node": ^20.6.2 "@types/prettier": ^3 diff --git a/yarn-project/yarn.lock b/yarn-project/yarn.lock index 6cd9c7ffcd5e..9aefd3093359 100644 --- a/yarn-project/yarn.lock +++ b/yarn-project/yarn.lock @@ -2766,7 +2766,7 @@ __metadata: version: 0.0.0-use.local resolution: "@noir-lang/backend_barretenberg@portal:../noir/packages/backend_barretenberg::locator=%40aztec%2Faztec3-packages%40workspace%3A." dependencies: - "@aztec/bb.js": 0.16.0 + "@aztec/bb.js": 0.17.0 "@noir-lang/types": 0.22.0 fflate: ^0.8.0 languageName: node